Skip to content

Commit 1d94a21

Browse files
committed
Move interrupt page initialization to a reasonable place.
Now it is initted only if the `epoch-interruption-via-mmu` option is on. And, because the only instantiation of `VMStoreContext` is in the course of instantiating a `StoreOpaque`, a decent place to dispose of it is in `Drop for StoreOpaque`.
1 parent c169958 commit 1d94a21

2 files changed

Lines changed: 54 additions & 29 deletions

File tree

crates/wasmtime/src/runtime/store.rs

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,11 @@ impl<T> Store<T> {
597597
let inner = StoreOpaque {
598598
_marker: marker::PhantomPinned,
599599
engine: engine.clone(),
600-
vm_store_context: Default::default(),
600+
vm_store_context: if engine.tunables().epoch_interruption_via_mmu {
601+
VMStoreContext::with_interrupt_page()
602+
} else {
603+
VMStoreContext::default()
604+
},
601605
#[cfg(feature = "stack-switching")]
602606
continuations: Vec::new(),
603607
instances: PrimaryMap::new(),
@@ -2519,6 +2523,9 @@ impl Drop for StoreOpaque {
25192523
}
25202524
}
25212525
}
2526+
// VMStoreContext is pod-type, so we dispose of the interrupt page on it
2527+
// here instead, if allocated.
2528+
self.vm_store_context.unmap_interrupt_page();
25222529
}
25232530
}
25242531

crates/wasmtime/src/runtime/vm/vmcontext.rs

Lines changed: 46 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1100,7 +1100,7 @@ pub struct VMStoreContext {
11001100
/// When it is time to switch, the host uses mprotect() to forbid reads. The
11011101
/// fault soon caused by guest code then lands in the signal handler, which
11021102
/// effects a switch and resets the page permissions.
1103-
pub epoch_interrupt_page_ptr: Option<VmPtr<c_void>>, // ptr-sized
1103+
epoch_interrupt_page_ptr: Option<VmPtr<c_void>>, // ptr-sized
11041104

11051105
/// Current stack limit of the wasm module.
11061106
///
@@ -1184,23 +1184,7 @@ impl Default for VMStoreContext {
11841184
VMStoreContext {
11851185
fuel_consumed: UnsafeCell::new(0),
11861186
epoch_deadline: UnsafeCell::new(0),
1187-
// TODO: Allocate this only when epoch_interruption_via_mmu is on.
1188-
// Probably set it to None here and allocate it elsewhere.
1189-
epoch_interrupt_page_ptr: unsafe {
1190-
let page_ptr = mmap_anonymous(
1191-
ptr::null_mut(), // Let the kernel pick location.
1192-
page_size(),
1193-
ProtFlags::READ,
1194-
// Privacy doesn't matter, as we never write to the
1195-
// interrupt page. However, private is the safer choice in
1196-
// case someone starts doing so.
1197-
MapFlags::PRIVATE,
1198-
)
1199-
.expect("an interrupt page should be allocable");
1200-
let non_null_page_ptr = NonNull::new(page_ptr)
1201-
.expect("if mmap returns successfully, its result should not be null");
1202-
Some(non_null_page_ptr.into())
1203-
},
1187+
epoch_interrupt_page_ptr: None,
12041188
stack_limit: UnsafeCell::new(usize::max_value()),
12051189
gc_heap: VMMemoryDefinition {
12061190
base: NonNull::dangling().into(),
@@ -1215,16 +1199,50 @@ impl Default for VMStoreContext {
12151199
}
12161200
}
12171201

1218-
// TODO: Kill this and find somewhere else to munmap it, because VMStoreContext
1219-
// is documented as being pod-type above.
1220-
// impl Drop for VMStoreContext {
1221-
// fn drop(&mut self) {
1222-
// unsafe {
1223-
// munmap(self.epoch_interrupt_page_ptr, page_size())
1224-
// .expect("should be able to unmap interrupt page");
1225-
// }
1226-
// }
1227-
// }
1202+
impl VMStoreContext {
1203+
/// Returns a new instance that has a page of memory allocated for use with
1204+
/// epoch_interruption_via_mmu.
1205+
///
1206+
/// The caller is responsible for calling
1207+
/// [`VMStoreContext::unmap_interrupt_page()`], since this has no
1208+
/// destructor.
1209+
pub fn with_interrupt_page() -> Self {
1210+
let mut ret = Self::default();
1211+
ret.epoch_interrupt_page_ptr = unsafe {
1212+
// mmap_anonymous() is unsafe.
1213+
let page_ptr = mmap_anonymous(
1214+
ptr::null_mut(), // Let the kernel pick location.
1215+
page_size(),
1216+
ProtFlags::READ,
1217+
// Privacy doesn't matter, as we never write to the
1218+
// interrupt page. However, private is the safer choice in
1219+
// case someone starts doing so.
1220+
MapFlags::PRIVATE,
1221+
)
1222+
.expect("an interrupt page should be allocable");
1223+
let non_null_page_ptr = NonNull::new(page_ptr)
1224+
.expect("if mmap returns successfully, its result should not be null");
1225+
Some(non_null_page_ptr.into())
1226+
};
1227+
ret
1228+
}
1229+
1230+
/// Disposes of any allocated epoch interrupt page. This must be called if
1231+
/// [`VMStoreContext::with_interrupt_page()`] was used to construct this
1232+
/// instance, lest we leak a page.
1233+
pub fn unmap_interrupt_page(&mut self) {
1234+
if let Some(interrupt_page) = self.epoch_interrupt_page_ptr {
1235+
// SAFETY: The only origin of the interrupt_page ptr is
1236+
// with_interrupt_page(), which panics unless it succeeds and is
1237+
// non-null.
1238+
unsafe {
1239+
munmap(interrupt_page.as_ptr(), page_size())
1240+
.expect("should be able to unmap interrupt page");
1241+
}
1242+
}
1243+
self.epoch_interrupt_page_ptr = None
1244+
}
1245+
}
12281246

12291247
#[cfg(test)]
12301248
mod test_vmstore_context {

0 commit comments

Comments
 (0)