From 455783aa21b06a4fe5a7a902480eb774c7e56788 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 1 Feb 2020 01:12:06 +0800 Subject: [PATCH 1/6] Make full preemption an optional feature. --- lib/runtime-core/src/backend.rs | 15 ++++ lib/singlepass-backend/src/codegen_x64.rs | 101 ++++++++++++---------- src/bin/wasmer.rs | 11 ++- 3 files changed, 79 insertions(+), 48 deletions(-) diff --git a/lib/runtime-core/src/backend.rs b/lib/runtime-core/src/backend.rs index db6b8ef65f9..a069f511780 100644 --- a/lib/runtime-core/src/backend.rs +++ b/lib/runtime-core/src/backend.rs @@ -109,9 +109,24 @@ impl BackendCompilerConfig { pub struct CompilerConfig { /// Symbol information generated from emscripten; used for more detailed debug messages pub symbol_map: Option>, + + /// Optionally override the automatically determined memory bound check mode. pub memory_bound_check_mode: MemoryBoundCheckMode, + + /// Whether to generate explicit stack checks against a field in `InternalCtx`. pub enforce_stack_check: bool, + + /// Whether to enable state tracking. Necessary for managed mode. pub track_state: bool, + + /// Whether to enable full preemption checkpoint generation. + /// + /// This inserts checkpoints at critical locations such as loop backedges and function calls, + /// allowing non-cooperative unwinding/task switching. + /// + /// When enabled there can be a small amount of runtime performance overhead. + pub full_preemption: bool, + pub features: Features, // Target info. Presently only supported by LLVM. diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index b58b84cedf5..1e1ac78a1e0 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -639,6 +639,7 @@ struct CodegenConfig { memory_bound_check_mode: MemoryBoundCheckMode, enforce_stack_check: bool, track_state: bool, + full_preemption: bool, } impl ModuleCodeGenerator @@ -908,6 +909,7 @@ impl ModuleCodeGenerator memory_bound_check_mode: config.memory_bound_check_mode, enforce_stack_check: config.enforce_stack_check, track_state: config.track_state, + full_preemption: config.full_preemption, })); Ok(()) } @@ -2478,28 +2480,31 @@ impl FunctionCodeGenerator for X64FunctionCode { // Check interrupt signal without branching let activate_offset = a.get_offset().0; - a.emit_mov( - Size::S64, - Location::Memory( - Machine::get_vmctx_reg(), - vm::Ctx::offset_interrupt_signal_mem() as i32, - ), - Location::GPR(GPR::RAX), - ); - self.fsm.loop_offsets.insert( - a.get_offset().0, - OffsetInfo { - end_offset: a.get_offset().0 + 1, - activate_offset, - diff_id: state_diff_id, - }, - ); - self.fsm.wasm_function_header_target_offset = Some(SuspendOffset::Loop(a.get_offset().0)); - a.emit_mov( - Size::S64, - Location::Memory(GPR::RAX, 0), - Location::GPR(GPR::RAX), - ); + if self.config.full_preemption { + a.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + vm::Ctx::offset_interrupt_signal_mem() as i32, + ), + Location::GPR(GPR::RAX), + ); + self.fsm.loop_offsets.insert( + a.get_offset().0, + OffsetInfo { + end_offset: a.get_offset().0 + 1, + activate_offset, + diff_id: state_diff_id, + }, + ); + self.fsm.wasm_function_header_target_offset = + Some(SuspendOffset::Loop(a.get_offset().0)); + a.emit_mov( + Size::S64, + Location::Memory(GPR::RAX, 0), + Location::GPR(GPR::RAX), + ); + } if self.machine.state.wasm_inst_offset != usize::MAX { return Err(CodegenError { @@ -6557,31 +6562,33 @@ impl FunctionCodeGenerator for X64FunctionCode { a.emit_label(label); // Check interrupt signal without branching - a.emit_mov( - Size::S64, - Location::Memory( - Machine::get_vmctx_reg(), - vm::Ctx::offset_interrupt_signal_mem() as i32, - ), - Location::GPR(GPR::RAX), - ); - self.fsm.loop_offsets.insert( - a.get_offset().0, - OffsetInfo { - end_offset: a.get_offset().0 + 1, - activate_offset, - diff_id: state_diff_id, - }, - ); - self.fsm.wasm_offset_to_target_offset.insert( - self.machine.state.wasm_inst_offset, - SuspendOffset::Loop(a.get_offset().0), - ); - a.emit_mov( - Size::S64, - Location::Memory(GPR::RAX, 0), - Location::GPR(GPR::RAX), - ); + if self.config.full_preemption { + a.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + vm::Ctx::offset_interrupt_signal_mem() as i32, + ), + Location::GPR(GPR::RAX), + ); + self.fsm.loop_offsets.insert( + a.get_offset().0, + OffsetInfo { + end_offset: a.get_offset().0 + 1, + activate_offset, + diff_id: state_diff_id, + }, + ); + self.fsm.wasm_offset_to_target_offset.insert( + self.machine.state.wasm_inst_offset, + SuspendOffset::Loop(a.get_offset().0), + ); + a.emit_mov( + Size::S64, + Location::Memory(GPR::RAX, 0), + Location::GPR(GPR::RAX), + ); + } } Operator::Nop => {} Operator::MemorySize { reserved } => { diff --git a/src/bin/wasmer.rs b/src/bin/wasmer.rs index 5d5bb6e42a6..7974d228537 100644 --- a/src/bin/wasmer.rs +++ b/src/bin/wasmer.rs @@ -703,6 +703,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> { symbol_map: em_symbol_map.clone(), memory_bound_check_mode: MemoryBoundCheckMode::Disable, enforce_stack_check: true, + + // Kernel loader does not support explicit preemption checkpoints. + full_preemption: false, + track_state, features: options.features.into_backend_features(), backend_specific_config, @@ -717,6 +721,11 @@ fn execute_wasm(options: &Run) -> Result<(), String> { CompilerConfig { symbol_map: em_symbol_map.clone(), track_state, + + // Enable full preemption if state tracking is enabled. + // Preemption only makes sense with state information. + full_preemption: track_state, + features: options.features.into_backend_features(), backend_specific_config, ..Default::default() @@ -813,7 +822,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> { LoaderName::Kernel => Box::new( instance .load(::wasmer_kernel_loader::KernelLoader) - .map_err(|e| format!("Can't use the local loader: {:?}", e))?, + .map_err(|e| format!("Can't use the kernel loader: {:?}", e))?, ), }; println!("{:?}", ins.call(index, &args)); From 60c7d1e0fc05f52e3f70247a0b25a3754cdb5aa9 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 4 Feb 2020 01:58:21 +0800 Subject: [PATCH 2/6] Fix register zeroing in emit_compare_and_swap. --- lib/singlepass-backend/src/codegen_x64.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index 1e1ac78a1e0..7fc2d14842e 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -2105,6 +2105,10 @@ impl X64FunctionCode { true, value_size, |a, m, addr| { + // Memory moves with size < 32b do not zero upper bits. + if memory_sz != Size::S32 && memory_sz != Size::S64 { + a.emit_xor(Size::S32, Location::GPR(compare), Location::GPR(compare)); + } a.emit_mov(memory_sz, Location::Memory(addr, 0), Location::GPR(compare)); a.emit_mov(stack_sz, Location::GPR(compare), ret); cb(a, m, compare, value); From fe5ed7b27a3f96a575f7d65faafaac4bea07ce66 Mon Sep 17 00:00:00 2001 From: Heyang Zhou Date: Wed, 5 Feb 2020 00:35:15 +0800 Subject: [PATCH 3/6] Improve comment for `memory_bound_check_mode`. Co-Authored-By: nlewycky --- lib/runtime-core/src/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/runtime-core/src/backend.rs b/lib/runtime-core/src/backend.rs index a069f511780..bc21cdcddf4 100644 --- a/lib/runtime-core/src/backend.rs +++ b/lib/runtime-core/src/backend.rs @@ -110,7 +110,7 @@ pub struct CompilerConfig { /// Symbol information generated from emscripten; used for more detailed debug messages pub symbol_map: Option>, - /// Optionally override the automatically determined memory bound check mode. + /// How to make the decision whether to emit bounds checks for memory accesses. pub memory_bound_check_mode: MemoryBoundCheckMode, /// Whether to generate explicit stack checks against a field in `InternalCtx`. From b5a629b065f8edae33b5a5931662a807dd07498b Mon Sep 17 00:00:00 2001 From: Heyang Zhou Date: Wed, 5 Feb 2020 00:37:40 +0800 Subject: [PATCH 4/6] Use less-than to compare sizes. Co-Authored-By: nlewycky --- lib/singlepass-backend/src/codegen_x64.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index 7fc2d14842e..27f79dc62a9 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -2106,7 +2106,7 @@ impl X64FunctionCode { value_size, |a, m, addr| { // Memory moves with size < 32b do not zero upper bits. - if memory_sz != Size::S32 && memory_sz != Size::S64 { + if memory_sz < Size::S32 { a.emit_xor(Size::S32, Location::GPR(compare), Location::GPR(compare)); } a.emit_mov(memory_sz, Location::Memory(addr, 0), Location::GPR(compare)); From d3f2cf594adc8aada6340cb0f0820c11b7c46617 Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 5 Feb 2020 00:44:59 +0800 Subject: [PATCH 5/6] Fix comments. --- lib/runtime-core/src/backend.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/runtime-core/src/backend.rs b/lib/runtime-core/src/backend.rs index bc21cdcddf4..efede97bb33 100644 --- a/lib/runtime-core/src/backend.rs +++ b/lib/runtime-core/src/backend.rs @@ -113,7 +113,11 @@ pub struct CompilerConfig { /// How to make the decision whether to emit bounds checks for memory accesses. pub memory_bound_check_mode: MemoryBoundCheckMode, - /// Whether to generate explicit stack checks against a field in `InternalCtx`. + /// Whether to generate explicit native stack checks against `stack_lower_bound` in `InternalCtx`. + /// + /// Usually it's adequate to use hardware memory protection mechanisms such as `mprotect` on Unix to + /// prevent stack overflow. But for low-level environments, e.g. the kernel, faults are generally + /// not expected and relying on hardware memory protection would add too much complexity. pub enforce_stack_check: bool, /// Whether to enable state tracking. Necessary for managed mode. @@ -122,7 +126,7 @@ pub struct CompilerConfig { /// Whether to enable full preemption checkpoint generation. /// /// This inserts checkpoints at critical locations such as loop backedges and function calls, - /// allowing non-cooperative unwinding/task switching. + /// allowing preemptive unwinding/task switching. /// /// When enabled there can be a small amount of runtime performance overhead. pub full_preemption: bool, From d2fc5c801396942211f21524cb21f8ab8057becd Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 5 Feb 2020 00:45:24 +0800 Subject: [PATCH 6/6] Cargo fmt --- lib/runtime-core/src/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/runtime-core/src/backend.rs b/lib/runtime-core/src/backend.rs index efede97bb33..bdb6628cd5f 100644 --- a/lib/runtime-core/src/backend.rs +++ b/lib/runtime-core/src/backend.rs @@ -114,7 +114,7 @@ pub struct CompilerConfig { pub memory_bound_check_mode: MemoryBoundCheckMode, /// Whether to generate explicit native stack checks against `stack_lower_bound` in `InternalCtx`. - /// + /// /// Usually it's adequate to use hardware memory protection mechanisms such as `mprotect` on Unix to /// prevent stack overflow. But for low-level environments, e.g. the kernel, faults are generally /// not expected and relying on hardware memory protection would add too much complexity.