Skip to content

Commit

Permalink
Merge #1183
Browse files Browse the repository at this point in the history
1183: Make full preemption an optional feature. r=syrusakbary a=losfair

Full preemption requires two additional memory loads on loop backedges and function calls. This PR allows disabling full preemption at code generation time, and disables it by default.

Co-authored-by: losfair <zhy20000919@hotmail.com>
Co-authored-by: Heyang Zhou <zhy20000919@hotmail.com>
  • Loading branch information
bors[bot] and losfair authored Feb 5, 2020
2 parents abbdda7 + d2fc5c8 commit 2c44b70
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 48 deletions.
19 changes: 19 additions & 0 deletions lib/runtime-core/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,28 @@ impl BackendCompilerConfig {
pub struct CompilerConfig {
/// Symbol information generated from emscripten; used for more detailed debug messages
pub symbol_map: Option<HashMap<u32, String>>,

/// How to make the decision whether to emit bounds checks for memory accesses.
pub memory_bound_check_mode: MemoryBoundCheckMode,

/// Whether to generate explicit native stack checks against `stack_lower_bound` in `InternalCtx`.
///
/// Usually it's adequate to use hardware memory protection mechanisms such as `mprotect` on Unix to
/// prevent stack overflow. But for low-level environments, e.g. the kernel, faults are generally
/// not expected and relying on hardware memory protection would add too much complexity.
pub enforce_stack_check: bool,

/// Whether to enable state tracking. Necessary for managed mode.
pub track_state: bool,

/// Whether to enable full preemption checkpoint generation.
///
/// This inserts checkpoints at critical locations such as loop backedges and function calls,
/// allowing preemptive unwinding/task switching.
///
/// When enabled there can be a small amount of runtime performance overhead.
pub full_preemption: bool,

pub features: Features,

// Target info. Presently only supported by LLVM.
Expand Down
105 changes: 58 additions & 47 deletions lib/singlepass-backend/src/codegen_x64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,7 @@ struct CodegenConfig {
memory_bound_check_mode: MemoryBoundCheckMode,
enforce_stack_check: bool,
track_state: bool,
full_preemption: bool,
}

impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
Expand Down Expand Up @@ -908,6 +909,7 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
memory_bound_check_mode: config.memory_bound_check_mode,
enforce_stack_check: config.enforce_stack_check,
track_state: config.track_state,
full_preemption: config.full_preemption,
}));
Ok(())
}
Expand Down Expand Up @@ -2103,6 +2105,10 @@ impl X64FunctionCode {
true,
value_size,
|a, m, addr| {
// Memory moves with size < 32b do not zero upper bits.
if memory_sz < Size::S32 {
a.emit_xor(Size::S32, Location::GPR(compare), Location::GPR(compare));
}
a.emit_mov(memory_sz, Location::Memory(addr, 0), Location::GPR(compare));
a.emit_mov(stack_sz, Location::GPR(compare), ret);
cb(a, m, compare, value);
Expand Down Expand Up @@ -2478,28 +2484,31 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
// Check interrupt signal without branching
let activate_offset = a.get_offset().0;

a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_function_header_target_offset = Some(SuspendOffset::Loop(a.get_offset().0));
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
if self.config.full_preemption {
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_function_header_target_offset =
Some(SuspendOffset::Loop(a.get_offset().0));
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
}

if self.machine.state.wasm_inst_offset != usize::MAX {
return Err(CodegenError {
Expand Down Expand Up @@ -6557,31 +6566,33 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a.emit_label(label);

// Check interrupt signal without branching
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_offset_to_target_offset.insert(
self.machine.state.wasm_inst_offset,
SuspendOffset::Loop(a.get_offset().0),
);
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
if self.config.full_preemption {
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_offset_to_target_offset.insert(
self.machine.state.wasm_inst_offset,
SuspendOffset::Loop(a.get_offset().0),
);
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
}
}
Operator::Nop => {}
Operator::MemorySize { reserved } => {
Expand Down
11 changes: 10 additions & 1 deletion src/bin/wasmer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
symbol_map: em_symbol_map.clone(),
memory_bound_check_mode: MemoryBoundCheckMode::Disable,
enforce_stack_check: true,

// Kernel loader does not support explicit preemption checkpoints.
full_preemption: false,

track_state,
features: options.features.into_backend_features(),
backend_specific_config,
Expand All @@ -717,6 +721,11 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
CompilerConfig {
symbol_map: em_symbol_map.clone(),
track_state,

// Enable full preemption if state tracking is enabled.
// Preemption only makes sense with state information.
full_preemption: track_state,

features: options.features.into_backend_features(),
backend_specific_config,
..Default::default()
Expand Down Expand Up @@ -813,7 +822,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
LoaderName::Kernel => Box::new(
instance
.load(::wasmer_kernel_loader::KernelLoader)
.map_err(|e| format!("Can't use the local loader: {:?}", e))?,
.map_err(|e| format!("Can't use the kernel loader: {:?}", e))?,
),
};
println!("{:?}", ins.call(index, &args));
Expand Down

0 comments on commit 2c44b70

Please sign in to comment.