Skip to content

Commit 6e7112d

Browse files
committed
Replace nops with dead loads from the interrupt page. Add MMU-based epoch checks to loop headers as well.
Cache the interrupt page ptr in a local for speed, as we did with the epoch deadline. Here is how I interpret the generated code in epoch-interruption-mmu.wat: ``` ;; @001B v2 = load.i64 notrap aligned readonly can_move v0+8 ;; Skip over magic number (4b) and alignment (another 4b). ;; @001B v3 = load.i64 notrap aligned v2+16 ;; Get interrupt page ptr. ;; @001B v4 = load.i32 aligned readonly v3 ;; Read from page ptr. ```
1 parent d120988 commit 6e7112d

3 files changed

Lines changed: 63 additions & 5 deletions

File tree

crates/cranelift/src/func_environ.rs

Lines changed: 50 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,10 @@ pub struct FuncEnvironment<'module_environment> {
153153
/// any yield.
154154
epoch_deadline_var: cranelift_frontend::Variable,
155155

156+
// A cached pointer to the epoch interrupt page so we don't have to
157+
// continually dig it out of the `VMStoreContext`
158+
epoch_interrupt_page_ptr_var: cranelift_frontend::Variable,
159+
156160
/// A cached pointer to the per-Engine epoch counter, when
157161
/// performing epoch-based interruption. Initialized in the
158162
/// function prologue. We prefer to use a variable here rather
@@ -216,6 +220,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
216220
fuel_var: Variable::reserved_value(),
217221
epoch_deadline_var: Variable::reserved_value(),
218222
epoch_ptr_var: Variable::reserved_value(),
223+
epoch_interrupt_page_ptr_var: Variable::reserved_value(),
219224

220225
// Start with at least one fuel being consumed because even empty
221226
// functions should consume at least some fuel.
@@ -589,6 +594,43 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
589594
self.epoch_check_full(builder, cur_epoch_value, continuation_block);
590595
}
591596

597+
/// Codegens what needs to go at the top of a function to support
598+
/// epoch_interrupt_via_mmu.
599+
fn epoch_mmu_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
600+
debug_assert!(self.epoch_interrupt_page_ptr_var.is_reserved_value());
601+
self.epoch_interrupt_page_ptr_var = builder.declare_var(self.pointer_type());
602+
603+
// Cache ptr to interrupt page in a local (and hopefully a register, at
604+
// the discretion of regalloc), rather than digging it out of the
605+
// `VMStoreContext` every time.
606+
let vmstore_ctx = self.get_vmstore_context_ptr(builder);
607+
let epoch_interrupt_page_ptr = builder.ins().load(
608+
self.pointer_type(),
609+
ir::MemFlags::trusted(),
610+
vmstore_ctx,
611+
ir::immediates::Offset32::new(
612+
self.offsets.ptr.vmstore_context_epoch_interrupt_page_ptr() as i32,
613+
),
614+
);
615+
builder.def_var(self.epoch_interrupt_page_ptr_var, epoch_interrupt_page_ptr);
616+
617+
Self::epoch_mmu_interruption_check(epoch_interrupt_page_ptr, builder);
618+
}
619+
620+
/// Codegens a dead load from the epoch interrupt page, which causes a trap
621+
/// if an interrupt is due.
622+
fn epoch_mmu_interruption_check(
623+
epoch_interrupt_page_ptr: ir::Value,
624+
builder: &mut FunctionBuilder<'_>,
625+
) {
626+
let _ = builder.ins().load(
627+
ir::types::I32, // Arbitrary. Pick whatever works on all ISAs and is fastest.
628+
ir::MemFlags::new().with_aligned().with_readonly(),
629+
epoch_interrupt_page_ptr,
630+
ir::immediates::Offset32::new(0),
631+
);
632+
}
633+
592634
#[cfg(feature = "wmemcheck")]
593635
fn hook_malloc_exit(&mut self, builder: &mut FunctionBuilder, retvals: &[ir::Value]) {
594636
let check_malloc = self.builtin_functions.check_malloc(builder.func);
@@ -3314,6 +3356,13 @@ impl FuncEnvironment<'_> {
33143356
self.epoch_check(builder);
33153357
}
33163358

3359+
// If we're using MMU-based epoch detection, provoke an interrupt if
3360+
// it's time.
3361+
if self.tunables.epoch_interruption_via_mmu {
3362+
let page_ptr = builder.use_var(self.epoch_interrupt_page_ptr_var);
3363+
Self::epoch_mmu_interruption_check(page_ptr, builder);
3364+
}
3365+
33173366
Ok(())
33183367
}
33193368

@@ -3375,8 +3424,7 @@ impl FuncEnvironment<'_> {
33753424
}
33763425

33773426
if self.tunables.epoch_interruption_via_mmu {
3378-
builder.ins().iconst(I32, 33); // a useless constant, hopefully not optimized out
3379-
// NEXT: Dead-load something from the vmctx instead.
3427+
self.epoch_mmu_function_entry(builder);
33803428
}
33813429

33823430
#[cfg(feature = "wmemcheck")]

crates/environ/src/vmoffsets.rs

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,10 +175,16 @@ pub trait PtrSize {
175175
self.vmstore_context_fuel_consumed() + 8
176176
}
177177

178+
/// Return the offset of the `epoch_interrupt_page_ptr` field of `VMStoreContext`.
179+
#[inline]
180+
fn vmstore_context_epoch_interrupt_page_ptr(&self) -> u8 {
181+
self.vmstore_context_epoch_deadline() + 8
182+
}
183+
178184
/// Return the offset of the `stack_limit` field of `VMStoreContext`
179185
#[inline]
180186
fn vmstore_context_stack_limit(&self) -> u8 {
181-
self.vmstore_context_epoch_deadline() + 8
187+
self.vmstore_context_epoch_interrupt_page_ptr() + self.size()
182188
}
183189

184190
/// Return the offset of the `gc_heap` field of `VMStoreContext`.

tests/disas/epoch-interruption-mmu.wat

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,15 @@
88
;; function u0:0(i64 vmctx, i64) tail {
99
;; gv0 = vmctx
1010
;; gv1 = load.i64 notrap aligned readonly gv0+8
11-
;; gv2 = load.i64 notrap aligned gv1+16
11+
;; gv2 = load.i64 notrap aligned gv1+24
12+
;; gv3 = vmctx
13+
;; gv4 = load.i64 notrap aligned readonly can_move gv3+8
1214
;; stack_limit = gv2
1315
;;
1416
;; block0(v0: i64, v1: i64):
15-
;; @001b v2 = iconst.i32 33
17+
;; @001b v2 = load.i64 notrap aligned readonly can_move v0+8
18+
;; @001b v3 = load.i64 notrap aligned v2+16
19+
;; @001b v4 = load.i32 aligned readonly v3
1620
;; @001c jump block1
1721
;;
1822
;; block1:

0 commit comments

Comments
 (0)