Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add a knob for reset stack #4813

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion crates/fuzzing/src/generators/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,8 @@ impl Config {
self.wasmtime.memory_guaranteed_dense_image_size,
))
.allocation_strategy(self.wasmtime.strategy.to_wasmtime())
.generate_address_map(self.wasmtime.generate_address_map);
.generate_address_map(self.wasmtime.generate_address_map)
.async_stack_zeroing(self.wasmtime.async_stack_zeroing);

self.wasmtime.codegen.configure(&mut cfg);

Expand Down Expand Up @@ -386,6 +387,7 @@ pub struct WasmtimeConfig {
padding_between_functions: Option<u16>,
generate_address_map: bool,
native_unwind_info: bool,
async_stack_zeroing: bool,
}

impl WasmtimeConfig {
Expand Down
62 changes: 56 additions & 6 deletions crates/runtime/src/instance/allocator/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ cfg_if::cfg_if! {
use imp::{commit_memory_pages, commit_table_pages, decommit_memory_pages, decommit_table_pages};

#[cfg(all(feature = "async", unix))]
use imp::{commit_stack_pages, decommit_stack_pages};
use imp::{commit_stack_pages, reset_stack_pages_to_zero};

#[cfg(feature = "async")]
use super::FiberStackError;
Expand Down Expand Up @@ -887,11 +887,16 @@ struct StackPool {
max_instances: usize,
page_size: usize,
index_allocator: Mutex<PoolingAllocationState>,
async_stack_zeroing: bool,
}

#[cfg(all(feature = "async", unix))]
impl StackPool {
fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result<Self> {
fn new(
instance_limits: &InstanceLimits,
stack_size: usize,
async_stack_zeroing: bool,
) -> Result<Self> {
use rustix::mm::{mprotect, MprotectFlags};

let page_size = crate::page_size();
Expand Down Expand Up @@ -931,6 +936,7 @@ impl StackPool {
stack_size,
max_instances,
page_size,
async_stack_zeroing,
// We always use a `NextAvailable` strategy for stack
// allocation. We don't want or need an affinity policy
// here: stacks do not benefit from being allocated to the
Expand Down Expand Up @@ -997,7 +1003,9 @@ impl StackPool {
let index = (start_of_stack - base) / self.stack_size;
assert!(index < self.max_instances);

decommit_stack_pages(bottom_of_stack as _, stack_size).unwrap();
if self.async_stack_zeroing {
reset_stack_pages_to_zero(bottom_of_stack as _, stack_size).unwrap();
}
Duslia marked this conversation as resolved.
Show resolved Hide resolved

self.index_allocator.lock().unwrap().free(SlotId(index));
}
Expand All @@ -1024,6 +1032,7 @@ impl PoolingInstanceAllocator {
instance_limits: InstanceLimits,
stack_size: usize,
tunables: &Tunables,
async_stack_zeroing: bool,
) -> Result<Self> {
if instance_limits.count == 0 {
bail!("the instance count limit cannot be zero");
Expand All @@ -1032,11 +1041,12 @@ impl PoolingInstanceAllocator {
let instances = InstancePool::new(strategy, &instance_limits, tunables)?;

drop(stack_size); // suppress unused warnings w/o async feature
drop(async_stack_zeroing); // suppress unused warnings w/o async feature

Ok(Self {
instances: instances,
#[cfg(all(feature = "async", unix))]
stacks: StackPool::new(&instance_limits, stack_size)?,
stacks: StackPool::new(&instance_limits, stack_size, async_stack_zeroing)?,
#[cfg(all(feature = "async", windows))]
stack_size,
})
Expand Down Expand Up @@ -1332,6 +1342,7 @@ mod test {
..Default::default()
},
1,
true,
)?;

let native_page_size = crate::page_size();
Expand Down Expand Up @@ -1408,6 +1419,7 @@ mod test {
},
4096,
&Tunables::default(),
true,
)
.map_err(|e| e.to_string())
.expect_err("expected a failure constructing instance allocator"),
Expand All @@ -1430,6 +1442,7 @@ mod test {
static_memory_bound: 1,
..Tunables::default()
},
true,
)
.map_err(|e| e.to_string())
.expect_err("expected a failure constructing instance allocator"),
Expand All @@ -1453,6 +1466,7 @@ mod test {
static_memory_offset_guard_size: 0,
..Tunables::default()
},
true
)
.map_err(|e| e.to_string())
.expect_err("expected a failure constructing instance allocator"),
Expand All @@ -1473,12 +1487,13 @@ mod test {
memories: 0,
..Default::default()
},
4096,
128,
&Tunables::default(),
true,
)?;

unsafe {
for _ in 0..10 {
for _ in 0..255 {
let stack = allocator.allocate_fiber_stack()?;

// The stack pointer is at the top, so decrement it first
Expand All @@ -1493,4 +1508,39 @@ mod test {

Ok(())
}

#[cfg(all(unix, target_pointer_width = "64", feature = "async"))]
#[test]
fn test_stack_unzeroed() -> Result<()> {
let allocator = PoolingInstanceAllocator::new(
PoolingAllocationStrategy::NextAvailable,
InstanceLimits {
count: 1,
table_elements: 0,
memory_pages: 0,
tables: 0,
memories: 0,
..Default::default()
},
128,
&Tunables::default(),
false,
)?;

unsafe {
for i in 0..255 {
let stack = allocator.allocate_fiber_stack()?;

// The stack pointer is at the top, so decrement it first
let addr = stack.top().unwrap().sub(1);

assert_eq!(*addr, i);
*addr = i + 1;

allocator.deallocate_fiber_stack(&stack);
}
}

Ok(())
}
}
2 changes: 1 addition & 1 deletion crates/runtime/src/instance/allocator/pooling/unix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,6 @@ pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
}

#[cfg(feature = "async")]
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
pub fn reset_stack_pages_to_zero(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false)
}
32 changes: 32 additions & 0 deletions crates/wasmtime/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ pub struct Config {
pub(crate) memory_init_cow: bool,
pub(crate) memory_guaranteed_dense_image_size: u64,
pub(crate) force_memory_init_memfd: bool,
pub(crate) async_stack_zeroing: bool,
}

/// User-provided configuration for the compiler.
Expand Down Expand Up @@ -196,6 +197,7 @@ impl Config {
memory_init_cow: true,
memory_guaranteed_dense_image_size: 16 << 20,
force_memory_init_memfd: false,
async_stack_zeroing: false,
};
#[cfg(compiler)]
{
Expand Down Expand Up @@ -341,6 +343,35 @@ impl Config {
self
}

/// Configures whether or not stacks used for async futures are reset to
/// zero after usage.
///
/// When the [`async_support`](Config::async_support) method is enabled for
/// Wasmtime and the [`call_async`] variant
/// of calling WebAssembly is used then Wasmtime will create a separate
/// runtime execution stack for each future produced by [`call_async`].
/// When using the pooling instance allocator
/// ([`InstanceAllocationStrategy::Pooling`]) this allocation will happen
/// from a pool of stacks and additionally deallocation will simply release
/// the stack back to the pool. During the deallocation process Wasmtime
/// won't by default reset the contents of the stack back to zero.
///
/// When this option is enabled it can be seen as a defense-in-depth
/// mechanism to reset a stack back to zero. This is not required for
/// correctness and can be a costly operation in highly concurrent
/// environments due to modifications of the virtual address space requiring
/// process-wide synchronization.
///
/// This option defaults to `false`.
///
/// [`call_async`]: crate::TypedFunc::call_async
#[cfg(feature = "async")]
#[cfg_attr(nightlydoc, doc(cfg(feature = "async")))]
pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
self.async_stack_zeroing = enable;
self
}

/// Configures whether DWARF debug information will be emitted during
/// compilation.
///
Expand Down Expand Up @@ -1432,6 +1463,7 @@ impl Config {
instance_limits,
stack_size,
&self.tunables,
self.async_stack_zeroing,
)?)),
}
}
Expand Down