diff --git a/Cargo.lock b/Cargo.lock index 562e79b75183..65a2d50dfe68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3324,6 +3324,7 @@ dependencies = [ "memoffset", "more-asserts", "psm", + "rand 0.7.3", "region", "thiserror", "wasmtime-environ", diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 7f74f46754c1..042f0b9dcfbc 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -50,6 +50,7 @@ fn align(offset: u32, width: u32) -> u32 { /// This class computes offsets to fields within `VMContext` and other /// related structs that JIT code accesses directly. +#[derive(Debug, Clone, Copy)] pub struct VMOffsets { /// The size in bytes of a pointer on the target. pub pointer_size: u8, diff --git a/crates/runtime/Cargo.toml b/crates/runtime/Cargo.toml index 82c0b5fddd9a..dfecbf15e081 100644 --- a/crates/runtime/Cargo.toml +++ b/crates/runtime/Cargo.toml @@ -24,6 +24,7 @@ cfg-if = "1.0" backtrace = "0.3.55" lazy_static = "1.3.0" psm = "0.1.11" +rand = "0.7.3" [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.7", features = ["winbase", "memoryapi", "errhandlingapi"] } diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index 17aa76389c5a..4de076e2c531 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -26,6 +26,12 @@ use wasmtime_environ::{ ir, Module, ModuleTranslation, ModuleType, OwnedDataInitializer, TableElements, VMOffsets, }; +mod pooling; + +pub use self::pooling::{ + InstanceLimits, ModuleLimits, PoolingAllocationStrategy, PoolingInstanceAllocator, +}; + /// Represents a request for a new runtime instance. pub struct InstanceAllocationRequest<'a> { /// The module being instantiated. @@ -72,11 +78,18 @@ pub enum InstantiationError { /// A trap ocurred during instantiation, after linking. #[error("Trap occurred during instantiation")] Trap(Trap), + + /// A limit on how many instances are supported has been reached. + #[error("Limit of {0} concurrent instances has been reached")] + Limit(u32), } /// An error while creating a fiber stack. #[cfg(feature = "async")] #[derive(Error, Debug)] pub enum FiberStackError { + /// Insufficient resources available for the request. + #[error("Insufficient resources: {0}")] + Resource(String), /// An error for when the allocator doesn't support custom fiber stacks. #[error("Custom fiber stacks are not supported by the allocator")] NotSupported, @@ -220,7 +233,7 @@ unsafe fn initialize_vmcontext( globals.len(), ); - // Initialize the defined functions + // Initialize the functions for (index, sig) in instance.module.functions.iter() { let type_index = lookup_shared_signature(*sig); diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs new file mode 100644 index 000000000000..6b1b4ed813f6 --- /dev/null +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -0,0 +1,1453 @@ +use super::{ + initialize_vmcontext, FiberStackError, InstanceAllocationRequest, InstanceAllocator, + InstanceHandle, InstantiationError, +}; +use crate::{ + instance::Instance, table::max_table_element_size, Memory, Mmap, OnDemandInstanceAllocator, + Table, VMContext, +}; +use rand::Rng; +use std::cell::RefCell; +use std::cmp::min; +use std::convert::TryFrom; +use std::mem; +use std::sync::{Arc, Mutex}; +use wasmtime_environ::{ + entity::{EntitySet, PrimaryMap}, + MemoryStyle, Module, ModuleTranslation, OwnedDataInitializer, Tunables, VMOffsets, + WASM_PAGE_SIZE, +}; + +#[cfg(target_os = "linux")] +mod linux; +#[cfg(target_os = "linux")] +use linux::{decommit, make_accessible}; + +#[cfg(all(unix, not(target_os = "linux")))] +mod unix; +#[cfg(all(unix, not(target_os = "linux")))] +use unix::{decommit, make_accessible}; + +#[cfg(windows)] +mod windows; +#[cfg(windows)] +use windows::{decommit, make_accessible}; + +fn round_up_to_pow2(n: usize, to: usize) -> usize { + debug_assert!(to > 0); + debug_assert!(to.is_power_of_two()); + (n + to - 1) & !(to - 1) +} + +/// Represents the limits placed on a module for compiling with the pooling instance allocator. +#[derive(Debug, Copy, Clone)] +pub struct ModuleLimits { + /// The maximum number of imported functions for a module (default is 1000). + pub imported_functions: u32, + + /// The maximum number of imported tables for a module (default is 0). + pub imported_tables: u32, + + /// The maximum number of imported memories for a module (default is 0). + pub imported_memories: u32, + + /// The maximum number of imported globals for a module (default is 0). + pub imported_globals: u32, + + /// The maximum number of defined types for a module (default is 100). + pub types: u32, + + /// The maximum number of defined functions for a module (default is 10000). + pub functions: u32, + + /// The maximum number of defined tables for a module (default is 1). + pub tables: u32, + + /// The maximum number of defined memories for a module (default is 1). + pub memories: u32, + + /// The maximum number of defined globals for a module (default is 10). + pub globals: u32, + + /// The maximum table elements for any table defined in a module (default is 10000). + /// + /// If a table's minimum element limit is greater than this value, the module will + /// fail to compile. + /// + /// If a table's maximum element limit is unbounded or greater than this value, + /// the maximum will be `table_elements` for the purpose of any `table.grow` instruction. + pub table_elements: u32, + + /// The maximum number of pages for any memory defined in a module (default is 160). + /// + /// The default of 16 means at most 10 MiB of host memory may be committed for each instance. + /// + /// If a memory's minimum page limit is greater than this value, the module will + /// fail to compile. + /// + /// If a memory's maximum page limit is unbounded or greater than this value, + /// the maximum will be `memory_pages` for the purpose of any `memory.grow` instruction. + /// + /// This value cannot exceed any address space limits placed on instances. + pub memory_pages: u32, +} + +impl ModuleLimits { + fn validate_module(&self, module: &Module) -> Result<(), String> { + if module.num_imported_funcs > self.imported_functions as usize { + return Err(format!( + "imported function count of {} exceeds the limit of {}", + module.num_imported_funcs, self.imported_functions + )); + } + + if module.num_imported_tables > self.imported_tables as usize { + return Err(format!( + "imported tables count of {} exceeds the limit of {}", + module.num_imported_tables, self.imported_tables + )); + } + + if module.num_imported_memories > self.imported_memories as usize { + return Err(format!( + "imported memories count of {} exceeds the limit of {}", + module.num_imported_memories, self.imported_memories + )); + } + + if module.num_imported_globals > self.imported_globals as usize { + return Err(format!( + "imported globals count of {} exceeds the limit of {}", + module.num_imported_globals, self.imported_globals + )); + } + + if module.types.len() > self.types as usize { + return Err(format!( + "defined types count of {} exceeds the limit of {}", + module.types.len(), + self.types + )); + } + + let functions = module.functions.len() - module.num_imported_funcs; + if functions > self.functions as usize { + return Err(format!( + "defined functions count of {} exceeds the limit of {}", + functions, self.functions + )); + } + + let tables = module.table_plans.len() - module.num_imported_tables; + if tables > self.tables as usize { + return Err(format!( + "defined tables count of {} exceeds the limit of {}", + tables, self.tables + )); + } + + let memories = module.memory_plans.len() - module.num_imported_memories; + if memories > self.memories as usize { + return Err(format!( + "defined memories count of {} exceeds the limit of {}", + memories, self.memories + )); + } + + let globals = module.globals.len() - module.num_imported_globals; + if globals > self.globals as usize { + return Err(format!( + "defined globals count of {} exceeds the limit of {}", + globals, self.globals + )); + } + + for (i, plan) in module.table_plans.values().as_slice()[module.num_imported_tables..] + .iter() + .enumerate() + { + if plan.table.minimum > self.table_elements { + return Err(format!( + "table index {} has a minimum element size of {} which exceeds the limit of {}", + i, plan.table.minimum, self.table_elements + )); + } + } + + for (i, plan) in module.memory_plans.values().as_slice()[module.num_imported_memories..] + .iter() + .enumerate() + { + if plan.memory.minimum > self.memory_pages { + return Err(format!( + "memory index {} has a minimum page size of {} which exceeds the limit of {}", + i, plan.memory.minimum, self.memory_pages + )); + } + + if let MemoryStyle::Dynamic = plan.style { + return Err(format!( + "memory index {} has an unsupported dynamic memory plan style", + i, + )); + } + } + + Ok(()) + } +} + +impl Default for ModuleLimits { + fn default() -> Self { + // See doc comments for `ModuleLimits` for these default values + Self { + imported_functions: 1000, + imported_tables: 0, + imported_memories: 0, + imported_globals: 0, + types: 100, + functions: 10000, + tables: 1, + memories: 1, + globals: 10, + table_elements: 10000, + memory_pages: 160, + } + } +} + +/// Represents the limits placed on instances by the pooling instance allocator. +#[derive(Debug, Copy, Clone)] +pub struct InstanceLimits { + /// The maximum number of concurrent instances supported (default is 1000). + pub count: u32, + + /// The maximum reserved host address space size to use for each instance in bytes. + /// + /// Note: this value has important performance ramifications. + /// + /// On 64-bit platforms, the default for this value will be 6 GiB. A value of less than 4 GiB will + /// force runtime bounds checking for memory accesses and thus will negatively impact performance. + /// Any value above 4 GiB will start eliding bounds checks provided the `offset` of the memory access is + /// less than (`address_space_size` - 4 GiB). A value of 8 GiB will completely elide *all* bounds + /// checks; consequently, 8 GiB will be the maximum supported value. The default of 6 GiB reserves + /// less host address space for each instance, but a memory access with an offet above 2 GiB will incur + /// runtime bounds checks. + /// + /// On 32-bit platforms, the default for this value will be 10 MiB. A 32-bit host has very limited address + /// space to reserve for a lot of concurrent instances. As a result, runtime bounds checking will be used + /// for all memory accesses. For better runtime performance, a 64-bit host is recommended. + /// + /// This value will be rounded up by the WebAssembly page size (64 KiB). + pub address_space_size: u64, +} + +impl Default for InstanceLimits { + fn default() -> Self { + // See doc comments for `InstanceLimits` for these default values + Self { + count: 1000, + #[cfg(target_pointer_width = "32")] + address_space_size: 0xA00000, + #[cfg(target_pointer_width = "64")] + address_space_size: 0x180000000, + } + } +} + +/// The allocation strategy to use for the pooling instance allocator. +#[derive(Debug, Clone)] +pub enum PoolingAllocationStrategy { + /// Allocate from the next available instance. + NextAvailable, + /// Allocate from a random available instance. + Random, +} + +impl PoolingAllocationStrategy { + fn next(&self, free_count: usize) -> usize { + debug_assert!(free_count > 0); + + match self { + Self::NextAvailable => free_count - 1, + Self::Random => rand::thread_rng().gen_range(0, free_count), + } + } +} + +impl Default for PoolingAllocationStrategy { + fn default() -> Self { + Self::NextAvailable + } +} + +struct BasePointerIterator { + base: *mut u8, + current: usize, + num: usize, + size: usize, +} + +impl BasePointerIterator { + fn new(base: *mut u8, num: usize, size: usize) -> Self { + Self { + base, + current: 0, + num, + size, + } + } +} + +impl Iterator for BasePointerIterator { + type Item = *mut u8; + + fn next(&mut self) -> Option { + let current = self.current; + if current == self.num { + return None; + } + + self.current += 1; + + Some(unsafe { self.base.add(current * self.size) }) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.num - self.current; + (remaining, Some(remaining)) + } +} + +/// Represents the memory of the pooling instance allocator. +/// +/// The memory is divided into two parts: an array of instances and an array of fiber stacks. +/// +/// An instance consists of space for a maximal `Instance` structure with page-alignment padding, followed by space +/// for the Wasm tables with page-alignment padding, and then followed by the space for the Wasm memories. +/// +/// A free list is maintained so that the next available instance is quickly allocated. +/// +/// The instance layout (for the default limits) looks something like this: +/// +/// ```text +/// 0x00000: +-----------------------+ <-- Start of `Instance` structure +/// 0x00000: | .module | +/// 0x00008: | .offsets | +/// 0x0000X: | ... | +/// 0x000XX: | .vmctx | +/// 0x44XXX: ~ ..alignment padding.. ~ +/// 0x45000: +-----------------------+ <-- Start of Wasm tables +/// 0x45xxx: | | +/// 0x46XXX: | ...table elements... | +/// 0x47XXX: | | +/// 0x58XXX: ~ ..alignment padding.. ~ +/// 0x59000: +-----------------------+ <-- Start of Wasm memories (with guard pages) +/// 0x59XXX: | | +/// 0x5AXXX: | .... memories .... | +/// 0xYYYYY: | | +/// 0xZZZZZ: +-----------------------+ +/// ``` +/// +/// Immediately following the instances is an array of fiber stacks to support async in Wasmtime. +/// +/// There are as many fiber stacks as there are instances, provided the requested stack size limit is non-zero. +/// +/// Each stack ends (i.e. lowest address) with a guard page to trap for overflow. +/// +/// Stacks are stored seperately from the instances because they are allocated via a `Store` in the Wasmtime API +/// and have no relation to an `Instance`. +/// +/// Like instances, a free list of stacks will be maintaind for quick stack allocation. +/// +/// On Windows, the native fiber implementation is used and no space for fiber stacks is allocated. +#[derive(Debug)] +struct PoolMemory { + mem: Mmap, + offsets: VMOffsets, + max_instances: usize, + page_size: usize, + tables_offset: usize, + table_size: usize, + memories_offset: usize, + memory_size: usize, + instance_size: usize, + max_table_elements: u32, + max_memory_pages: u32, + stacks_offset: usize, + stack_size: usize, +} + +impl PoolMemory { + fn new( + module_limits: &ModuleLimits, + instance_limits: &InstanceLimits, + stack_size: usize, + ) -> Result { + let page_size = region::page::size(); + + let memory_size = usize::try_from(instance_limits.address_space_size) + .map_err(|_| "address space size exceeds addressible memory".to_string())?; + + debug_assert!(memory_size % page_size == 0); + + // On Windows, don't allocate any fiber stacks as native fibers are always used + // Add a page to the stack size for the guard page when using fiber stacks + let stack_size = if cfg!(windows) || stack_size == 0 { + 0 + } else { + round_up_to_pow2(stack_size, page_size) + .checked_add(page_size) + .ok_or_else(|| "stack size exceeds addressible memory".to_string())? + }; + + // Calculate the maximum size of an Instance structure given the limits + let offsets = VMOffsets { + pointer_size: std::mem::size_of::<*const u8>() as u8, + num_signature_ids: module_limits.types, + num_imported_functions: module_limits.imported_functions, + num_imported_tables: module_limits.imported_tables, + num_imported_memories: module_limits.imported_memories, + num_imported_globals: module_limits.imported_globals, + num_defined_functions: module_limits.functions, + num_defined_tables: module_limits.tables, + num_defined_memories: module_limits.memories, + num_defined_globals: module_limits.globals, + }; + + let tables_offset = round_up_to_pow2( + mem::size_of::() + .checked_add(offsets.size_of_vmctx() as usize) + .ok_or_else(|| "instance size exceeds addressible memory".to_string())?, + page_size, + ); + + let table_size = round_up_to_pow2( + max_table_element_size() + .checked_mul(module_limits.table_elements as usize) + .ok_or_else(|| "instance size exceeds addressible memory".to_string())?, + page_size, + ); + + let total_tables_size = (module_limits.tables as usize) + .checked_mul(table_size) + .ok_or_else(|| "instance size exceeds addressible memory".to_string())?; + + let memories_offset = tables_offset + .checked_add(total_tables_size) + .ok_or_else(|| "instance size exceeds addressible memory".to_string())?; + + let total_memories_size = (module_limits.memories as usize) + .checked_mul(memory_size) + .ok_or_else(|| "instance size exceeds addressible memory".to_string())?; + + let instance_size = memories_offset + .checked_add(total_memories_size) + .ok_or_else(|| "instance size exceeds addressible memory".to_string())?; + + let max_instances = instance_limits.count as usize; + let stacks_offset = max_instances + .checked_mul(instance_size) + .ok_or_else(|| "total instance size exceeds addressible memory".to_string())?; + + // Add the stacks to the allocation size if requested + let allocation_size = + if stack_size == 0 { + stacks_offset + } else { + stacks_offset + .checked_add(max_instances.checked_mul(stack_size).ok_or_else(|| { + "total size of stacks exceeds addressible memory".to_string() + })?) + .ok_or_else(|| "memory pool size exceeds addressible memory".to_string())? + }; + + let mem = Self { + mem: Mmap::accessible_reserved(0, allocation_size) + .map_err(|e| format!("failed to allocate pool memory: {}", e))?, + offsets, + max_instances, + page_size, + tables_offset, + table_size, + memories_offset, + memory_size, + instance_size, + max_table_elements: module_limits.table_elements, + max_memory_pages: module_limits.memory_pages, + stacks_offset, + stack_size, + }; + + // Use a default module to initialize the instances to start + let module = Arc::new(Module::default()); + for i in 0..instance_limits.count as usize { + unsafe { + mem.initialize_instance(i, &module)?; + } + } + + Ok(mem) + } + + unsafe fn initialize_instance(&self, index: usize, module: &Arc) -> Result<(), String> { + debug_assert!(index < self.max_instances); + + let instance_ptr = self.mem.as_mut_ptr().add(index * self.instance_size); + + // Make all pages of the maximal `Instance` structure accessible + if !make_accessible(instance_ptr, self.tables_offset) { + return Err("failed to make instance memory accessible".into()); + } + + // Write a default instance with preallocated memory/table map storage to the ptr + std::ptr::write( + instance_ptr as _, + Instance { + module: module.clone(), + offsets: self.offsets, + memories: PrimaryMap::with_capacity(self.offsets.num_defined_memories as usize), + tables: PrimaryMap::with_capacity(self.offsets.num_defined_tables as usize), + dropped_elements: RefCell::new(EntitySet::new()), + dropped_data: RefCell::new(EntitySet::new()), + host_state: Box::new(()), + vmctx: VMContext {}, + }, + ); + + Ok(()) + } + + unsafe fn drop_instance(&self, index: usize) { + debug_assert!(index < self.max_instances); + let instance_ptr = self.mem.as_mut_ptr().add(index * self.instance_size); + std::ptr::drop_in_place(instance_ptr as *mut Instance); + } + + unsafe fn get_instance(&self, index: usize) -> PoolInstance { + debug_assert!(index < self.max_instances); + let instance_ptr = self.mem.as_mut_ptr().add(index * self.instance_size); + let tables_base = instance_ptr.add(self.tables_offset); + let memories_base = instance_ptr.add(self.memories_offset); + + PoolInstance { + ptr: instance_ptr as _, + memories: BasePointerIterator::new( + memories_base, + self.offsets.num_defined_memories as usize, + self.memory_size, + ), + max_pages: self.max_memory_pages, + tables: BasePointerIterator::new( + tables_base, + self.offsets.num_defined_tables as usize, + self.table_size, + ), + max_elements: self.max_table_elements, + page_size: self.page_size, + } + } + + unsafe fn get_instance_index(&self, instance: *mut Instance) -> usize { + let instance_addr = instance as usize; + let base = self.mem.as_mut_ptr() as usize; + + debug_assert!(instance_addr >= base && instance_addr < base + self.stacks_offset); + debug_assert!((instance_addr - base) % self.instance_size == 0); + + let index = (instance_addr - base) / self.instance_size; + debug_assert!(index < self.max_instances); + index + } + + #[allow(dead_code)] + unsafe fn get_stack(&self, index: usize) -> Result<*mut u8, String> { + debug_assert!(index < self.max_instances); + + // Remove the guard page from the size + let stack_size = self.stack_size - self.page_size; + let bottom_of_stack = self + .mem + .as_mut_ptr() + .add(self.stacks_offset + (index * self.stack_size) + self.page_size); + + // Make the stack accessible (excluding the guard page) + if !make_accessible(bottom_of_stack, stack_size) { + return Err("failed to make instance memory accessible".into()); + } + + // The top of the stack should be returned + Ok(bottom_of_stack.add(stack_size)) + } + + #[allow(dead_code)] + unsafe fn decommit_stack(&self, top_of_stack: *mut u8) -> usize { + // Remove the guard page from the size + let stack_size = self.stack_size - self.page_size; + let bottom_of_stack = top_of_stack.sub(stack_size); + + let base = self.mem.as_mut_ptr() as usize; + let stacks_base = base + self.stacks_offset; + let start_of_stack = (bottom_of_stack as usize) - self.page_size; + + debug_assert!(start_of_stack >= stacks_base && start_of_stack < (base + self.mem.len())); + debug_assert!((start_of_stack - stacks_base) % self.stack_size == 0); + + let index = (start_of_stack - stacks_base) / self.stack_size; + debug_assert!(index < self.max_instances); + + decommit(bottom_of_stack, stack_size); + + index + } +} + +struct PoolInstance { + ptr: *mut Instance, + memories: BasePointerIterator, + max_pages: u32, + tables: BasePointerIterator, + max_elements: u32, + page_size: usize, +} + +impl PoolInstance { + pub unsafe fn try_into_handle( + mut self, + req: InstanceAllocationRequest, + ) -> Result { + debug_assert!(!req.externref_activations_table.is_null()); + debug_assert!(!req.stack_map_registry.is_null()); + + let instance = &mut *self.ptr; + + instance.module = req.module; + instance.offsets = VMOffsets::new( + std::mem::size_of::<*const u8>() as u8, + instance.module.as_ref(), + ); + instance.host_state = req.host_state; + + self.set_instance_memories()?; + self.set_instance_tables()?; + + initialize_vmcontext( + instance, + req.imports.functions, + req.imports.tables, + req.imports.memories, + req.imports.globals, + req.finished_functions, + req.lookup_shared_signature, + req.interrupts, + req.externref_activations_table, + req.stack_map_registry, + &|index| instance.memories[index].vmmemory(), + &|index| instance.tables[index].vmtable(), + ); + + Ok(InstanceHandle::new(self.ptr)) + } + + unsafe fn set_instance_memories(&mut self) -> Result<(), InstantiationError> { + let instance = &mut *self.ptr; + let module = instance.module.as_ref(); + + instance.memories.clear(); + for plan in + (&module.memory_plans.values().as_slice()[module.num_imported_memories..]).iter() + { + instance.memories.push( + Memory::new_static( + plan, + self.memories.next().unwrap(), + self.max_pages, + Some(make_accessible), + ) + .map_err(InstantiationError::Resource)?, + ); + } + + let mut dropped_data = instance.dropped_data.borrow_mut(); + dropped_data.clear(); + dropped_data.resize(module.passive_data.len()); + + Ok(()) + } + + unsafe fn set_instance_tables(&mut self) -> Result<(), InstantiationError> { + let instance = &mut *self.ptr; + let module = instance.module.as_ref(); + + instance.tables.clear(); + for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() { + let base = self.tables.next().unwrap(); + + // Make the table data accessible + if !make_accessible(base, self.max_elements as usize * max_table_element_size()) { + return Err(InstantiationError::Resource( + "failed to make instance memory accessible".into(), + )); + } + + instance + .tables + .push(Table::new_static(plan, base, self.max_elements)); + } + + let mut dropped_elements = instance.dropped_elements.borrow_mut(); + dropped_elements.clear(); + dropped_elements.resize(module.passive_elements.len()); + + Ok(()) + } + + unsafe fn decommit(self) { + // Decommit the used Wasm memory + let instance = &mut *self.ptr; + for (mem, base) in instance.memories.values().zip(self.memories) { + let size = (mem.size() * WASM_PAGE_SIZE) as usize; + if size > 0 { + decommit(base, size); + } + } + + // Decommit the used table space + let table_element_size = max_table_element_size(); + for (table, base) in instance.tables.values().zip(self.tables) { + let size = round_up_to_pow2(table.size() as usize * table_element_size, self.page_size); + if size > 0 { + decommit(base, size); + } + } + } +} + +impl Drop for PoolMemory { + fn drop(&mut self) { + unsafe { + for i in 0..self.max_instances { + self.drop_instance(i); + } + } + } +} + +/// Implements the pooling instance allocator. +/// +/// This allocator reserves a contiguous region of memory for storing instances, memories, tables, and fiber stacks. +#[derive(Debug)] +pub struct PoolingInstanceAllocator { + strategy: PoolingAllocationStrategy, + module_limits: ModuleLimits, + instance_limits: InstanceLimits, + mem: PoolMemory, + free_instances: Mutex>, + #[cfg(feature = "async")] + free_stacks: Mutex>, +} + +impl PoolingInstanceAllocator { + /// Creates a new pooling instance allocator with the given strategy and limits. + pub fn new( + strategy: PoolingAllocationStrategy, + module_limits: ModuleLimits, + mut instance_limits: InstanceLimits, + stack_size: usize, + ) -> Result { + if instance_limits.count == 0 { + return Err("the instance count limit cannot be zero".into()); + } + + if cfg!(not(feature = "async")) && stack_size != 0 { + return Err("cannot allocate stacks when the async feature is not enabled".into()); + } + + // Round the instance address space size to the nearest Wasm page size + instance_limits.address_space_size = u64::try_from(round_up_to_pow2( + usize::try_from(instance_limits.address_space_size).unwrap(), + WASM_PAGE_SIZE as usize, + )) + .unwrap(); + + // Cap the instance address space size to 8 GiB (maximum 4 GiB address space + 4 GiB of guard region) + instance_limits.address_space_size = min(instance_limits.address_space_size, 0x200000000); + + // The maximum module memory page count cannot exceed 65536 pages + if module_limits.memory_pages > 0x10000 { + return Err(format!( + "module memory page limit of {} exceeds the maximum of 65536", + module_limits.memory_pages + )); + } + + // The maximum module memory page count cannot exceed the instance address space size + if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64 > instance_limits.address_space_size + { + return Err(format!( + "module memory page limit of {} pages exeeds the instance address space size limit of {} bytes", + module_limits.memory_pages, + instance_limits.address_space_size + )); + } + + let mem = PoolMemory::new(&module_limits, &instance_limits, stack_size)?; + let free_instances: Vec = (0..mem.max_instances).collect(); + + Ok(Self { + strategy, + module_limits, + instance_limits, + mem, + #[cfg(feature = "async")] + free_stacks: Mutex::new(free_instances.clone()), + free_instances: Mutex::new(free_instances), + }) + } +} + +impl Drop for PoolingInstanceAllocator { + fn drop(&mut self) { + assert_eq!( + self.free_instances.lock().unwrap().len(), + self.mem.max_instances, + "expected all instances to be deallocated" + ); + + #[cfg(feature = "async")] + assert_eq!( + self.free_stacks.lock().unwrap().len(), + self.mem.max_instances, + "expected all stacks to be deallocated" + ); + } +} + +unsafe impl InstanceAllocator for PoolingInstanceAllocator { + fn validate_module(&self, translation: &ModuleTranslation) -> Result<(), String> { + self.module_limits.validate_module(&translation.module) + } + + fn adjust_tunables(&self, tunables: &mut Tunables) { + let address_space_size = self.instance_limits.address_space_size; + + // For address spaces larger than 4 GiB, use a guard region to elide + if address_space_size >= 0x100000000 { + tunables.static_memory_bound = 0x10000; // in Wasm pages + tunables.static_memory_offset_guard_size = address_space_size - 0x100000000; + } else { + tunables.static_memory_bound = + u32::try_from(address_space_size).unwrap() / WASM_PAGE_SIZE; + tunables.static_memory_offset_guard_size = 0; + } + + // Treat the static memory bound as the maximum for unbounded Wasm memories + // Because we guarantee a module cannot compile unless it fits in the limits of + // the pool allocator, this ensures all memories are treated as static (i.e. immovable). + tunables.static_memory_bound_is_maximum = true; + } + + unsafe fn allocate( + &self, + req: InstanceAllocationRequest, + ) -> Result { + let index = { + let mut free_instances = self.free_instances.lock().unwrap(); + if free_instances.is_empty() { + return Err(InstantiationError::Limit(self.instance_limits.count)); + } + let free_index = self.strategy.next(free_instances.len()); + free_instances.swap_remove(free_index) + }; + + self.mem.get_instance(index).try_into_handle(req) + } + + unsafe fn initialize( + &self, + handle: &InstanceHandle, + is_bulk_memory: bool, + data_initializers: &Arc<[OwnedDataInitializer]>, + ) -> Result<(), InstantiationError> { + // TODO: refactor this implementation + + // Check initializer bounds before initializing anything. Only do this + // when bulk memory is disabled, since the bulk memory proposal changes + // instantiation such that the intermediate results of failed + // initializations are visible. + if !is_bulk_memory { + OnDemandInstanceAllocator::check_table_init_bounds(handle.instance())?; + OnDemandInstanceAllocator::check_memory_init_bounds( + handle.instance(), + data_initializers.as_ref(), + )?; + } + + // Apply fallible initializers. Note that this can "leak" state even if + // it fails. + OnDemandInstanceAllocator::initialize_tables(handle.instance())?; + OnDemandInstanceAllocator::initialize_memories( + handle.instance(), + data_initializers.as_ref(), + )?; + + Ok(()) + } + + unsafe fn deallocate(&self, handle: &InstanceHandle) { + let index = self.mem.get_instance_index(handle.instance); + let instance = self.mem.get_instance(index); + + instance.decommit(); + + // Put the instance back on the free list + { + self.free_instances.lock().unwrap().push(index); + } + } + + #[cfg(all(feature = "async", windows))] + fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> { + // Custom fiber stacks are not supported on Windows + Err(FiberStackError::NotSupported) + } + + #[cfg(all(feature = "async", not(windows)))] + fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> { + let index = { + let mut free_stacks = self.free_stacks.lock().unwrap(); + if free_stacks.is_empty() { + return Err(FiberStackError::Limit(self.instance_limits.count)); + } + let free_index = self.strategy.next(free_stacks.len()); + free_stacks.swap_remove(free_index) + }; + + unsafe { self.mem.get_stack(index).map_err(FiberStackError::Resource) } + } + + #[cfg(all(feature = "async", windows))] + unsafe fn deallocate_fiber_stack(&self, _stack: *mut u8) { + // This should never be called as `allocate_fiber_stack` never returns success on Windows + unreachable!() + } + + #[cfg(all(feature = "async", not(windows)))] + unsafe fn deallocate_fiber_stack(&self, stack: *mut u8) { + let index = self.mem.decommit_stack(stack); + + { + self.free_stacks.lock().unwrap().push(index); + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use wasmtime_environ::{ + entity::EntityRef, + ir::Type, + wasm::{Global, GlobalInit, Memory, SignatureIndex, Table, TableElementType, WasmType}, + MemoryPlan, ModuleType, TablePlan, TableStyle, + }; + + #[test] + fn test_module_imported_functions_limit() { + let limits = ModuleLimits { + imported_functions: 0, + ..Default::default() + }; + + let mut module = Module::default(); + + module.functions.push(SignatureIndex::new(0)); + assert_eq!(limits.validate_module(&module), Ok(())); + + module.num_imported_funcs = 1; + assert_eq!( + limits.validate_module(&module), + Err("imported function count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_imported_tables_limit() { + let limits = ModuleLimits { + imported_tables: 0, + ..Default::default() + }; + + let mut module = Module::default(); + + module.table_plans.push(TablePlan { + style: TableStyle::CallerChecksSignature, + table: Table { + wasm_ty: WasmType::FuncRef, + ty: TableElementType::Func, + minimum: 0, + maximum: None, + }, + }); + + assert_eq!(limits.validate_module(&module), Ok(())); + + module.num_imported_tables = 1; + assert_eq!( + limits.validate_module(&module), + Err("imported tables count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_imported_memories_limit() { + let limits = ModuleLimits { + imported_memories: 0, + ..Default::default() + }; + + let mut module = Module::default(); + + module.memory_plans.push(MemoryPlan { + style: MemoryStyle::Static { bound: 0 }, + memory: Memory { + minimum: 0, + maximum: None, + shared: false, + }, + offset_guard_size: 0, + }); + + assert_eq!(limits.validate_module(&module), Ok(())); + + module.num_imported_memories = 1; + assert_eq!( + limits.validate_module(&module), + Err("imported memories count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_imported_globals_limit() { + let limits = ModuleLimits { + imported_globals: 0, + ..Default::default() + }; + + let mut module = Module::default(); + + module.globals.push(Global { + wasm_ty: WasmType::I32, + ty: Type::int(32).unwrap(), + mutability: false, + initializer: GlobalInit::I32Const(0), + }); + + assert_eq!(limits.validate_module(&module), Ok(())); + + module.num_imported_globals = 1; + assert_eq!( + limits.validate_module(&module), + Err("imported globals count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_defined_types_limit() { + let limits = ModuleLimits { + types: 0, + ..Default::default() + }; + + let mut module = Module::default(); + assert_eq!(limits.validate_module(&module), Ok(())); + + module + .types + .push(ModuleType::Function(SignatureIndex::new(0))); + assert_eq!( + limits.validate_module(&module), + Err("defined types count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_defined_functions_limit() { + let limits = ModuleLimits { + functions: 0, + ..Default::default() + }; + + let mut module = Module::default(); + assert_eq!(limits.validate_module(&module), Ok(())); + + module.functions.push(SignatureIndex::new(0)); + assert_eq!( + limits.validate_module(&module), + Err("defined functions count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_defined_tables_limit() { + let limits = ModuleLimits { + tables: 0, + ..Default::default() + }; + + let mut module = Module::default(); + assert_eq!(limits.validate_module(&module), Ok(())); + + module.table_plans.push(TablePlan { + style: TableStyle::CallerChecksSignature, + table: Table { + wasm_ty: WasmType::FuncRef, + ty: TableElementType::Func, + minimum: 0, + maximum: None, + }, + }); + assert_eq!( + limits.validate_module(&module), + Err("defined tables count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_defined_memories_limit() { + let limits = ModuleLimits { + memories: 0, + ..Default::default() + }; + + let mut module = Module::default(); + assert_eq!(limits.validate_module(&module), Ok(())); + + module.memory_plans.push(MemoryPlan { + style: MemoryStyle::Static { bound: 0 }, + memory: Memory { + minimum: 0, + maximum: None, + shared: false, + }, + offset_guard_size: 0, + }); + assert_eq!( + limits.validate_module(&module), + Err("defined memories count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_defined_globals_limit() { + let limits = ModuleLimits { + globals: 0, + ..Default::default() + }; + + let mut module = Module::default(); + assert_eq!(limits.validate_module(&module), Ok(())); + + module.globals.push(Global { + wasm_ty: WasmType::I32, + ty: Type::int(32).unwrap(), + mutability: false, + initializer: GlobalInit::I32Const(0), + }); + assert_eq!( + limits.validate_module(&module), + Err("defined globals count of 1 exceeds the limit of 0".into()) + ); + } + + #[test] + fn test_module_table_minimum_elements_limit() { + let limits = ModuleLimits { + tables: 1, + table_elements: 10, + ..Default::default() + }; + + let mut module = Module::default(); + module.table_plans.push(TablePlan { + style: TableStyle::CallerChecksSignature, + table: Table { + wasm_ty: WasmType::FuncRef, + ty: TableElementType::Func, + minimum: 11, + maximum: None, + }, + }); + assert_eq!( + limits.validate_module(&module), + Err( + "table index 0 has a minimum element size of 11 which exceeds the limit of 10" + .into() + ) + ); + } + + #[test] + fn test_module_memory_minimum_size_limit() { + let limits = ModuleLimits { + memories: 1, + memory_pages: 5, + ..Default::default() + }; + + let mut module = Module::default(); + module.memory_plans.push(MemoryPlan { + style: MemoryStyle::Static { bound: 0 }, + memory: Memory { + minimum: 6, + maximum: None, + shared: false, + }, + offset_guard_size: 0, + }); + assert_eq!( + limits.validate_module(&module), + Err("memory index 0 has a minimum page size of 6 which exceeds the limit of 5".into()) + ); + } + + #[test] + fn test_module_with_dynamic_memory_style() { + let limits = ModuleLimits { + memories: 1, + memory_pages: 5, + ..Default::default() + }; + + let mut module = Module::default(); + module.memory_plans.push(MemoryPlan { + style: MemoryStyle::Dynamic, + memory: Memory { + minimum: 1, + maximum: None, + shared: false, + }, + offset_guard_size: 0, + }); + assert_eq!( + limits.validate_module(&module), + Err("memory index 0 has an unsupported dynamic memory plan style".into()) + ); + } + + #[test] + fn test_next_available_allocation_strategy() { + let strat = PoolingAllocationStrategy::NextAvailable; + assert_eq!(strat.next(10), 9); + assert_eq!(strat.next(5), 4); + assert_eq!(strat.next(1), 0); + } + + #[test] + fn test_random_allocation_strategy() { + let strat = PoolingAllocationStrategy::Random; + assert!(strat.next(100) < 100); + assert_eq!(strat.next(1), 0); + } + + #[test] + fn test_base_pointer_iterator() { + let mut iter = BasePointerIterator::new(std::ptr::null_mut(), 5, 3); + + assert_eq!(iter.next(), Some(0usize as _)); + assert_eq!(iter.next(), Some(3usize as _)); + assert_eq!(iter.next(), Some(6usize as _)); + assert_eq!(iter.next(), Some(9usize as _)); + assert_eq!(iter.next(), Some(12usize as _)); + assert_eq!(iter.next(), None); + + let mut iter = BasePointerIterator::new(std::ptr::null_mut(), 0, 10); + assert_eq!(iter.next(), None); + } + + #[cfg(target_pointer_width = "64")] + #[test] + fn test_pool_layout() -> Result<(), String> { + let module_limits = ModuleLimits::default(); + let instance_limits = InstanceLimits { + count: 1, + address_space_size: WASM_PAGE_SIZE as u64, + }; + + let mem = PoolMemory::new(&module_limits, &instance_limits, 4096)?; + + // TODO: make this test work for platforms with page sizes other than 4 KiB? + assert_eq!(mem.page_size, 4096); + + let expected_instance_size = 69 * mem.page_size; + let expected_tables_size = 20 * mem.page_size; + let expected_memories_size = WASM_PAGE_SIZE as usize; + let expected_stack_size = 2 * mem.page_size; + let expected_instance_entry_size = + expected_instance_size + expected_tables_size + expected_memories_size; + + assert_eq!(mem.tables_offset, expected_instance_size); + assert_eq!( + mem.table_size, + round_up_to_pow2(8 * module_limits.table_elements as usize, mem.page_size) + ); + assert_eq!( + mem.memories_offset, + expected_instance_size + expected_tables_size + ); + assert_eq!(mem.memory_size, expected_memories_size); + assert_eq!(mem.instance_size, expected_instance_entry_size); + assert_eq!(mem.max_table_elements, module_limits.table_elements); + assert_eq!(mem.max_memory_pages, module_limits.memory_pages); + assert_eq!( + mem.mem.len(), + 1 * expected_instance_entry_size + expected_stack_size + ); + + unsafe { + for i in 0..instance_limits.count as usize { + let instance = mem.get_instance(i); + + assert_eq!( + instance.ptr as usize - mem.mem.as_ptr() as usize, + i * expected_instance_entry_size + ); + assert_eq!(instance.max_elements, module_limits.table_elements); + assert_eq!(instance.max_pages, module_limits.memory_pages); + + let table_base = (instance.ptr as *mut u8).add(expected_instance_size); + let memories_base = table_base.add(expected_tables_size); + + let mut count = 0; + for (i, base) in instance.tables.enumerate() { + assert_eq!(base, table_base.add(i * mem.table_size)); + count += 1; + } + + assert_eq!(count, module_limits.tables); + + let mut count = 0; + for (i, base) in instance.memories.enumerate() { + assert_eq!(base, memories_base.add(i * mem.memory_size)); + count += 1; + } + + assert_eq!(count, module_limits.memories); + + assert_eq!(mem.get_instance_index(instance.ptr), i); + + let stack = mem.get_stack(i)?; + assert_eq!( + stack as usize - mem.stacks_offset - mem.mem.as_ptr() as usize, + (i + 1) * expected_stack_size + ); + } + } + + Ok(()) + } + + #[test] + fn test_pooling_allocator_with_zero_instance_count() { + assert_eq!( + PoolingInstanceAllocator::new( + PoolingAllocationStrategy::Random, + ModuleLimits::default(), + InstanceLimits { + count: 0, + ..Default::default() + }, + 4096 + ) + .expect_err("expected a failure constructing instance allocator"), + "the instance count limit cannot be zero" + ); + } + + #[test] + fn test_pooling_allocator_with_memory_pages_exeeded() { + assert_eq!( + PoolingInstanceAllocator::new( + PoolingAllocationStrategy::Random, + ModuleLimits { + memory_pages: 0x10001, + ..Default::default() + }, + InstanceLimits { + count: 1, + address_space_size: 1, + }, + 4096 + ) + .expect_err("expected a failure constructing instance allocator"), + "module memory page limit of 65537 exceeds the maximum of 65536" + ); + } + + #[test] + fn test_pooling_allocator_with_address_space_exeeded() { + assert_eq!( + PoolingInstanceAllocator::new( + PoolingAllocationStrategy::Random, + ModuleLimits { + memory_pages: 2, + ..Default::default() + }, + InstanceLimits { + count: 1, + address_space_size: 1, + }, + 4096, + ) + .expect_err("expected a failure constructing instance allocator"), + "module memory page limit of 2 pages exeeds the instance address space size limit of 65536 bytes" + ); + } + + #[cfg_attr(target_arch = "aarch64", ignore)] // https://github.com/bytecodealliance/wasmtime/pull/2518#issuecomment-747280133 + #[cfg(unix)] + #[test] + fn test_stack_zeroed() -> Result<(), String> { + let allocator = PoolingInstanceAllocator::new( + PoolingAllocationStrategy::NextAvailable, + ModuleLimits { + imported_functions: 0, + types: 0, + functions: 0, + tables: 0, + memories: 0, + globals: 0, + table_elements: 0, + memory_pages: 0, + ..Default::default() + }, + InstanceLimits { + count: 1, + address_space_size: 1, + }, + 4096, + )?; + + unsafe { + let first = allocator + .allocate_fiber_stack() + .map_err(|e| format!("failed to allocate stack: {}", e))?; + // The stack pointer is at the top, so decerement it first + *first.sub(1) = 1; + + allocator.deallocate_fiber_stack(first); + + let second = allocator + .allocate_fiber_stack() + .map_err(|e| format!("failed to allocate stack: {}", e))?; + assert_eq!(first, second); + + // The stack pointer is at the top, so decerement it first + assert_eq!(*second.sub(1), 0); + + allocator.deallocate_fiber_stack(second); + } + + Ok(()) + } +} diff --git a/crates/runtime/src/instance/allocator/pooling/linux.rs b/crates/runtime/src/instance/allocator/pooling/linux.rs new file mode 100644 index 000000000000..d6892f5630bc --- /dev/null +++ b/crates/runtime/src/instance/allocator/pooling/linux.rs @@ -0,0 +1,15 @@ +pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool { + region::protect(addr, len, region::Protection::READ_WRITE).is_ok() +} + +pub unsafe fn decommit(addr: *mut u8, len: usize) { + region::protect(addr, len, region::Protection::NONE).unwrap(); + + // On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access + assert_eq!( + libc::madvise(addr as _, len, libc::MADV_DONTNEED), + 0, + "madvise failed to mark pages as missing: {}", + std::io::Error::last_os_error() + ); +} diff --git a/crates/runtime/src/instance/allocator/pooling/unix.rs b/crates/runtime/src/instance/allocator/pooling/unix.rs new file mode 100644 index 000000000000..a3d3da047508 --- /dev/null +++ b/crates/runtime/src/instance/allocator/pooling/unix.rs @@ -0,0 +1,19 @@ +pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool { + region::protect(addr, len, region::Protection::READ_WRITE).is_ok() +} + +pub unsafe fn decommit(addr: *mut u8, len: usize) { + assert_eq!( + libc::mmap( + addr as _, + len, + libc::PROT_NONE, + libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED, + -1, + 0, + ) as *mut u8, + addr, + "mmap failed to remap pages: {}", + std::io::Error::last_os_error() + ); +} diff --git a/crates/runtime/src/instance/allocator/pooling/windows.rs b/crates/runtime/src/instance/allocator/pooling/windows.rs new file mode 100644 index 000000000000..3eef9043e569 --- /dev/null +++ b/crates/runtime/src/instance/allocator/pooling/windows.rs @@ -0,0 +1,15 @@ +use winapi::um::memoryapi::{VirtualAlloc, VirtualFree}; +use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE}; + +pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool { + // This doesn't use the `region` crate because the memory needs to be committed + !VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null() +} + +pub unsafe fn decommit(addr: *mut u8, len: usize) { + assert!( + VirtualFree(addr as _, len, MEM_DECOMMIT) != 0, + "failed to decommit memory pages: {}", + std::io::Error::last_os_error() + ); +} diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index 85b3aa01963b..e7c0c278b18a 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -58,6 +58,11 @@ pub use crate::vmcontext::{ #[cfg(feature = "async")] pub use crate::instance::FiberStackError; +#[cfg(any(target_os = "linux", target_os = "macos", target_os = "windows"))] +pub use crate::instance::{ + InstanceLimits, ModuleLimits, PoolingAllocationStrategy, PoolingInstanceAllocator, +}; + /// Version number of this crate. pub const VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/crates/runtime/src/memory.rs b/crates/runtime/src/memory.rs index 1b865c0eecda..e71a387ca3be 100644 --- a/crates/runtime/src/memory.rs +++ b/crates/runtime/src/memory.rs @@ -164,7 +164,7 @@ impl RuntimeLinearMemory for MmapMemory { /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code. fn vmmemory(&self) -> VMMemoryDefinition { - let mut mmap = self.mmap.borrow_mut(); + let mmap = self.mmap.borrow(); VMMemoryDefinition { base: mmap.alloc.as_mut_ptr(), current_length: mmap.size as usize * WASM_PAGE_SIZE as usize, @@ -177,7 +177,7 @@ enum MemoryStorage { base: *mut u8, size: Cell, maximum: u32, - make_accessible: Option bool>, + make_accessible: Option bool>, }, Dynamic(Box), } @@ -203,11 +203,13 @@ impl Memory { plan: &MemoryPlan, base: *mut u8, maximum: u32, - make_accessible: Option bool>, + make_accessible: Option bool>, ) -> Result { if plan.memory.minimum > 0 { if let Some(make_accessible) = &make_accessible { - if !make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize) { + if unsafe { + !make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize) + } { return Err("memory cannot be made accessible".into()); } } @@ -259,7 +261,7 @@ impl Memory { let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize; if let Some(make_accessible) = make_accessible { - if !make_accessible(unsafe { base.add(start) }, len) { + if unsafe { !make_accessible(base.add(start), len) } { return None; } } diff --git a/crates/runtime/src/mmap.rs b/crates/runtime/src/mmap.rs index 483ec5be0963..0ae2fb495639 100644 --- a/crates/runtime/src/mmap.rs +++ b/crates/runtime/src/mmap.rs @@ -234,7 +234,7 @@ impl Mmap { } /// Return the allocated memory as a mutable pointer to u8. - pub fn as_mut_ptr(&mut self) -> *mut u8 { + pub fn as_mut_ptr(&self) -> *mut u8 { self.ptr as *mut u8 } diff --git a/crates/runtime/src/table.rs b/crates/runtime/src/table.rs index 4a2d0bd4de0e..b45ccc4c6c86 100644 --- a/crates/runtime/src/table.rs +++ b/crates/runtime/src/table.rs @@ -66,6 +66,19 @@ enum TableElements { ExternRefs(Vec>), } +// Ideally this should be static assertion that table elements are pointer-sized +pub(crate) fn max_table_element_size() -> usize { + debug_assert_eq!( + std::mem::size_of::<*mut VMCallerCheckedAnyfunc>(), + std::mem::size_of::<*const ()>() + ); + debug_assert_eq!( + std::mem::size_of::>(), + std::mem::size_of::<*const ()>() + ); + std::mem::size_of::<*const ()>() +} + #[derive(Debug)] enum TableStorage { Static { diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index 19151600b54f..77d5c52be789 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -14,7 +14,10 @@ use wasmtime_environ::settings::{self, Configurable, SetError}; use wasmtime_environ::{isa, isa::TargetIsa, Tunables}; use wasmtime_jit::{native, CompilationStrategy, Compiler}; use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent}; -use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator}; +use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator, PoolingInstanceAllocator}; + +// Re-export the limit structures for the pooling allocator +pub use wasmtime_runtime::{InstanceLimits, ModuleLimits, PoolingAllocationStrategy}; /// Represents the module instance allocation strategy to use. #[derive(Clone)] @@ -26,6 +29,19 @@ pub enum InstanceAllocationStrategy { /// /// This is the default allocation strategy for Wasmtime. OnDemand, + /// The pooling instance allocation strategy. + /// + /// A pool of resources is created in advance and module instantiation reuses resources + /// from the pool. Resources are returned to the pool when the `Store` referencing the instance + /// is dropped. + Pooling { + /// The allocation strategy to use. + strategy: PoolingAllocationStrategy, + /// The module limits to use. + module_limits: ModuleLimits, + /// The instance limits to use. + instance_limits: InstanceLimits, + }, } impl Default for InstanceAllocationStrategy { @@ -205,6 +221,9 @@ impl Config { /// on stack overflow, a host function that overflows the stack will /// abort the process. /// + /// `max_wasm_stack` must be set prior to setting an instance allocation + /// strategy. + /// /// By default this option is 1 MiB. pub fn max_wasm_stack(&mut self, size: usize) -> Result<&mut Self> { #[cfg(feature = "async")] @@ -216,6 +235,12 @@ impl Config { bail!("wasm stack size cannot be zero"); } + if self.instance_allocator.is_some() { + bail!( + "wasm stack size cannot be modified after setting an instance allocation strategy" + ); + } + self.max_wasm_stack = size; Ok(self) } @@ -230,12 +255,20 @@ impl Config { /// close to one another; doing so may cause host functions to overflow the /// stack and abort the process. /// + /// `async_stack_size` must be set prior to setting an instance allocation + /// strategy. + /// /// By default this option is 2 MiB. #[cfg(feature = "async")] pub fn async_stack_size(&mut self, size: usize) -> Result<&mut Self> { if size < self.max_wasm_stack { bail!("async stack size cannot be less than the maximum wasm stack size"); } + if self.instance_allocator.is_some() { + bail!( + "async stack size cannot be modified after setting an instance allocation strategy" + ); + } self.async_stack_size = size; Ok(self) } @@ -577,14 +610,35 @@ impl Config { } /// Sets the instance allocation strategy to use. - pub fn with_instance_allocation_strategy( + pub fn with_allocation_strategy( &mut self, strategy: InstanceAllocationStrategy, - ) -> &mut Self { + ) -> Result<&mut Self> { self.instance_allocator = match strategy { InstanceAllocationStrategy::OnDemand => None, + InstanceAllocationStrategy::Pooling { + strategy, + module_limits, + instance_limits, + } => { + #[cfg(feature = "async")] + let stack_size = self.async_stack_size; + + #[cfg(not(feature = "async"))] + let stack_size = 0; + + Some(Arc::new( + PoolingInstanceAllocator::new( + strategy, + module_limits, + instance_limits, + stack_size, + ) + .map_err(|e| anyhow::anyhow!(e))?, + )) + } }; - self + Ok(self) } /// Configures the maximum size, in bytes, where a linear memory is diff --git a/tests/all/async_functions.rs b/tests/all/async_functions.rs index 2f83b62bf01a..eb3ea0961199 100644 --- a/tests/all/async_functions.rs +++ b/tests/all/async_functions.rs @@ -364,3 +364,37 @@ fn fuel_eventually_finishes() { let instance = Instance::new_async(&store, &module, &[]); run(instance).unwrap(); } + +#[test] +fn async_with_pooling_stacks() { + let mut config = Config::new(); + config + .with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 1, + table_elements: 0, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: 1, + address_space_size: 1, + }, + }) + .expect("pooling allocator created"); + + let engine = Engine::new(&config); + let store = Store::new_async(&engine); + let func = Func::new_async( + &store, + FuncType::new(None, None), + (), + move |_caller, _state, _params, _results| Box::new(async { Ok(()) }), + ); + run(func.call_async(&[])).unwrap(); + run(func.call_async(&[])).unwrap(); + let future1 = func.call_async(&[]); + let future2 = func.call_async(&[]); + run(future2).unwrap(); + run(future1).unwrap(); +} diff --git a/tests/all/main.rs b/tests/all/main.rs index 526c5eb4c6b1..d965bbd1accb 100644 --- a/tests/all/main.rs +++ b/tests/all/main.rs @@ -18,6 +18,7 @@ mod module; mod module_linking; mod module_serialize; mod name; +mod pooling_allocator; mod stack_overflow; mod table; mod traps; diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs new file mode 100644 index 000000000000..5d4a0fad1d56 --- /dev/null +++ b/tests/all/pooling_allocator.rs @@ -0,0 +1,317 @@ +use anyhow::Result; +use wasmtime::*; + +#[test] +fn successful_instantiation() -> Result<()> { + let mut config = Config::new(); + config.with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 1, + table_elements: 10, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: 1, + address_space_size: 1, + }, + })?; + + let engine = Engine::new(&config); + let module = Module::new(&engine, r#"(module (memory 1) (table 10 funcref))"#)?; + + // Module should instantiate + let store = Store::new(&engine); + Instance::new(&store, &module, &[])?; + + Ok(()) +} + +#[test] +fn memory_limit() -> Result<()> { + let mut config = Config::new(); + config.with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 3, + table_elements: 10, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: 1, + address_space_size: 196608, + }, + })?; + + let engine = Engine::new(&config); + + // Module should fail to validate because the minimum is greater than the configured limit + match Module::new(&engine, r#"(module (memory 4))"#) { + Ok(_) => panic!("module compilation should fail"), + Err(e) => assert_eq!(e.to_string(), "Validation error: memory index 0 has a minimum page size of 4 which exceeds the limit of 3") + } + + let module = Module::new( + &engine, + r#"(module (memory (export "m") 0) (func (export "f") (result i32) (memory.grow (i32.const 1))))"#, + )?; + + // Instantiate the module and grow the memory via the `f` function + { + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + let f = instance.get_func("f").unwrap().get0::().unwrap(); + + assert_eq!(f().expect("function should not trap"), 0); + assert_eq!(f().expect("function should not trap"), 1); + assert_eq!(f().expect("function should not trap"), 2); + assert_eq!(f().expect("function should not trap"), -1); + assert_eq!(f().expect("function should not trap"), -1); + } + + // Instantiate the module and grow the memory via the Wasmtime API + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + + let memory = instance.get_memory("m").unwrap(); + assert_eq!(memory.size(), 0); + assert_eq!(memory.grow(1).expect("memory should grow"), 0); + assert_eq!(memory.size(), 1); + assert_eq!(memory.grow(1).expect("memory should grow"), 1); + assert_eq!(memory.size(), 2); + assert_eq!(memory.grow(1).expect("memory should grow"), 2); + assert_eq!(memory.size(), 3); + assert!(memory.grow(1).is_err()); + + Ok(()) +} + +#[test] +#[cfg_attr(target_arch = "aarch64", ignore)] // https://github.com/bytecodealliance/wasmtime/pull/2518#issuecomment-747280133 +fn memory_zeroed() -> Result<()> { + let mut config = Config::new(); + config.with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 1, + table_elements: 0, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: 1, + address_space_size: 1, + }, + })?; + + let engine = Engine::new(&config); + + let module = Module::new(&engine, r#"(module (memory (export "m") 1))"#)?; + + // Instantiate the module and write a value to the entire memory + let ptr = { + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + let memory = instance.get_memory("m").unwrap(); + + assert_eq!(memory.size(), 1); + assert_eq!(memory.data_size(), 65536); + + let ptr = memory.data_ptr(); + + unsafe { + std::ptr::write_bytes(ptr, 0xFE, memory.data_size()); + } + + ptr + }; + + // Instantiate the module again and check that the memory is zero + { + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + let memory = instance.get_memory("m").unwrap(); + + assert_eq!(ptr, memory.data_ptr()); + assert_eq!(memory.size(), 1); + assert_eq!(memory.data_size(), 65536); + + unsafe { + for i in 0..65536 { + assert_eq!(*ptr.offset(i), 0); + } + } + } + + Ok(()) +} + +#[test] +fn table_limit() -> Result<()> { + const TABLE_ELEMENTS: u32 = 10; + let mut config = Config::new(); + config.with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 1, + table_elements: TABLE_ELEMENTS, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: 1, + address_space_size: 1, + }, + })?; + + let engine = Engine::new(&config); + + // Module should fail to validate because the minimum is greater than the configured limit + match Module::new(&engine, r#"(module (table 31 funcref))"#) { + Ok(_) => panic!("module compilation should fail"), + Err(e) => assert_eq!(e.to_string(), "Validation error: table index 0 has a minimum element size of 31 which exceeds the limit of 10") + } + + let module = Module::new( + &engine, + r#"(module (table (export "t") 0 funcref) (func (export "f") (result i32) (table.grow (ref.null func) (i32.const 1))))"#, + )?; + + // Instantiate the module and grow the table via the `f` function + { + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + let f = instance.get_func("f").unwrap().get0::().unwrap(); + + for i in 0..TABLE_ELEMENTS { + assert_eq!(f().expect("function should not trap"), i as i32); + } + + assert_eq!(f().expect("function should not trap"), -1); + assert_eq!(f().expect("function should not trap"), -1); + } + + // Instantiate the module and grow the table via the Wasmtime API + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + + let table = instance.get_table("t").unwrap(); + + for i in 0..TABLE_ELEMENTS { + assert_eq!(table.size(), i); + assert_eq!( + table + .grow(1, Val::FuncRef(None)) + .expect("table should grow"), + i + ); + } + + assert_eq!(table.size(), TABLE_ELEMENTS); + assert!(table.grow(1, Val::FuncRef(None)).is_err()); + + Ok(()) +} + +#[test] +#[cfg_attr(target_arch = "aarch64", ignore)] // https://github.com/bytecodealliance/wasmtime/pull/2518#issuecomment-747280133 +fn table_zeroed() -> Result<()> { + let mut config = Config::new(); + config.with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 1, + table_elements: 10, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: 1, + address_space_size: 1, + }, + })?; + + let engine = Engine::new(&config); + + let module = Module::new(&engine, r#"(module (table (export "t") 10 funcref))"#)?; + + // Instantiate the module and fill the table with a reference + { + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + let table = instance.get_table("t").unwrap(); + let f = Func::wrap(&store, || {}); + + assert_eq!(table.size(), 10); + + for i in 0..10 { + table.set(i, Val::FuncRef(Some(f.clone()))).unwrap(); + } + } + + // Instantiate the module again and check that the table has "null" values + { + let store = Store::new(&engine); + let instance = Instance::new(&store, &module, &[])?; + let table = instance.get_table("t").unwrap(); + + assert_eq!(table.size(), 10); + + for i in 0..10 { + match table.get(i).unwrap() { + Val::FuncRef(r) => assert!(r.is_none()), + _ => panic!("expected a funcref"), + } + } + } + + Ok(()) +} + +#[test] +fn instantiation_limit() -> Result<()> { + const INSTANCE_LIMIT: u32 = 10; + let mut config = Config::new(); + config.with_allocation_strategy(InstanceAllocationStrategy::Pooling { + strategy: PoolingAllocationStrategy::NextAvailable, + module_limits: ModuleLimits { + memory_pages: 1, + table_elements: 10, + ..Default::default() + }, + instance_limits: InstanceLimits { + count: INSTANCE_LIMIT, + address_space_size: 1, + }, + })?; + + let engine = Engine::new(&config); + let module = Module::new(&engine, r#"(module)"#)?; + + // Instantiate to the limit + { + let store = Store::new(&engine); + + for _ in 0..INSTANCE_LIMIT { + Instance::new(&store, &module, &[])?; + } + + match Instance::new(&store, &module, &[]) { + Ok(_) => panic!("instantiation should fail"), + Err(e) => assert_eq!( + e.to_string(), + format!( + "Limit of {} concurrent instances has been reached", + INSTANCE_LIMIT + ) + ), + } + } + + // With the above store dropped, ensure instantiations can be made + + let store = Store::new(&engine); + + for _ in 0..INSTANCE_LIMIT { + Instance::new(&store, &module, &[])?; + } + + Ok(()) +}