@@ -153,6 +153,10 @@ pub struct FuncEnvironment<'module_environment> {
153153 /// any yield.
154154 epoch_deadline_var : cranelift_frontend:: Variable ,
155155
156+ // A cached pointer to the epoch interrupt page so we don't have to
157+ // continually dig it out of the `VMStoreContext`
158+ epoch_interrupt_page_ptr_var : cranelift_frontend:: Variable ,
159+
156160 /// A cached pointer to the per-Engine epoch counter, when
157161 /// performing epoch-based interruption. Initialized in the
158162 /// function prologue. We prefer to use a variable here rather
@@ -216,6 +220,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
216220 fuel_var : Variable :: reserved_value ( ) ,
217221 epoch_deadline_var : Variable :: reserved_value ( ) ,
218222 epoch_ptr_var : Variable :: reserved_value ( ) ,
223+ epoch_interrupt_page_ptr_var : Variable :: reserved_value ( ) ,
219224
220225 // Start with at least one fuel being consumed because even empty
221226 // functions should consume at least some fuel.
@@ -589,6 +594,43 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
589594 self . epoch_check_full ( builder, cur_epoch_value, continuation_block) ;
590595 }
591596
597+ /// Codegens what needs to go at the top of a function to support
598+ /// epoch_interrupt_via_mmu.
599+ fn epoch_mmu_function_entry ( & mut self , builder : & mut FunctionBuilder < ' _ > ) {
600+ debug_assert ! ( self . epoch_interrupt_page_ptr_var. is_reserved_value( ) ) ;
601+ self . epoch_interrupt_page_ptr_var = builder. declare_var ( self . pointer_type ( ) ) ;
602+
603+ // Cache ptr to interrupt page in a local (and hopefully a register, at
604+ // the discretion of regalloc), rather than digging it out of the
605+ // `VMStoreContext` every time.
606+ let vmstore_ctx = self . get_vmstore_context_ptr ( builder) ;
607+ let epoch_interrupt_page_ptr = builder. ins ( ) . load (
608+ self . pointer_type ( ) ,
609+ ir:: MemFlags :: trusted ( ) ,
610+ vmstore_ctx,
611+ ir:: immediates:: Offset32 :: new (
612+ self . offsets . ptr . vmstore_context_epoch_interrupt_page_ptr ( ) as i32 ,
613+ ) ,
614+ ) ;
615+ builder. def_var ( self . epoch_interrupt_page_ptr_var , epoch_interrupt_page_ptr) ;
616+
617+ Self :: epoch_mmu_interruption_check ( epoch_interrupt_page_ptr, builder) ;
618+ }
619+
620+ /// Codegens a dead load from the epoch interrupt page, which causes a trap
621+ /// if an interrupt is due.
622+ fn epoch_mmu_interruption_check (
623+ epoch_interrupt_page_ptr : ir:: Value ,
624+ builder : & mut FunctionBuilder < ' _ > ,
625+ ) {
626+ let _ = builder. ins ( ) . load (
627+ ir:: types:: I32 , // Arbitrary. Pick whatever works on all ISAs and is fastest.
628+ ir:: MemFlags :: new ( ) . with_aligned ( ) . with_readonly ( ) ,
629+ epoch_interrupt_page_ptr,
630+ ir:: immediates:: Offset32 :: new ( 0 ) ,
631+ ) ;
632+ }
633+
592634 #[ cfg( feature = "wmemcheck" ) ]
593635 fn hook_malloc_exit ( & mut self , builder : & mut FunctionBuilder , retvals : & [ ir:: Value ] ) {
594636 let check_malloc = self . builtin_functions . check_malloc ( builder. func ) ;
@@ -3314,6 +3356,13 @@ impl FuncEnvironment<'_> {
33143356 self . epoch_check ( builder) ;
33153357 }
33163358
3359+ // If we're using MMU-based epoch detection, provoke an interrupt if
3360+ // it's time.
3361+ if self . tunables . epoch_interruption_via_mmu {
3362+ let page_ptr = builder. use_var ( self . epoch_interrupt_page_ptr_var ) ;
3363+ Self :: epoch_mmu_interruption_check ( page_ptr, builder) ;
3364+ }
3365+
33173366 Ok ( ( ) )
33183367 }
33193368
@@ -3375,8 +3424,7 @@ impl FuncEnvironment<'_> {
33753424 }
33763425
33773426 if self . tunables . epoch_interruption_via_mmu {
3378- builder. ins ( ) . iconst ( I32 , 33 ) ; // a useless constant, hopefully not optimized out
3379- // NEXT: Dead-load something from the vmctx instead.
3427+ self . epoch_mmu_function_entry ( builder) ;
33803428 }
33813429
33823430 #[ cfg( feature = "wmemcheck" ) ]
0 commit comments