Skip to content

Commit

Permalink
near_vm: change LimitedMemoryPool to create new maps if dry (#10839)
Browse files Browse the repository at this point in the history
Now that the size of the `UniversalArtifact` cache is configurable by
the operator, the `LimitedMemoryPool` would need to account for the
configuration and allocate an appropriate number of pages depending on
that configuration. However, we do not have easy access to that
configuration from the contract runtime, and I don't think I want to
unravel the thread of moving the set up of contract runtime resources
all the way down to the place where this configuration is directly
available.

In the long term we should still keep memory pool limited somehow and
make sure we pre-allocate exactly the required number of maps of the
right size. To do that we would have to refactor the contract runtime
setup to rely less on the global state and do it somewhere near the
setup of the transaction runtime as well. This is also relevant wrt.
limited replayability work.
  • Loading branch information
nagisa authored Mar 20, 2024
1 parent 86b53cb commit 21fba5b
Show file tree
Hide file tree
Showing 11 changed files with 36 additions and 41 deletions.
3 changes: 1 addition & 2 deletions nearcore/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -299,8 +299,7 @@ pub struct Config {

/// The number of the contracts kept loaded up for execution.
///
/// Each loaded contract will increase the baseline memory use of the node appreciably. This
/// number must not exceed the excess parallelism available in the contract runtime.
/// Each loaded contract will increase the baseline memory use of the node appreciably.
pub max_loaded_contracts: usize,
}

Expand Down
17 changes: 7 additions & 10 deletions runtime/near-vm-runner/src/near_vm_runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use near_parameters::vm::VMKind;
use near_parameters::RuntimeFeesConfig;
use near_vm_compiler_singlepass::Singlepass;
use near_vm_engine::universal::{
LimitedMemoryPool, Universal, UniversalArtifact, UniversalEngine, UniversalExecutable,
MemoryPool, Universal, UniversalArtifact, UniversalEngine, UniversalExecutable,
UniversalExecutableRef,
};
use near_vm_types::{FunctionIndex, InstanceConfig, MemoryType, Pages, WASM_PAGE_SIZE};
Expand Down Expand Up @@ -242,23 +242,20 @@ impl NearVM {
// We only support universal engine at the moment.
assert_eq!(VM_CONFIG.engine, NearVmEngine::Universal);

static CODE_MEMORY_POOL_CELL: OnceLock<LimitedMemoryPool> = OnceLock::new();
static CODE_MEMORY_POOL_CELL: OnceLock<MemoryPool> = OnceLock::new();
let code_memory_pool = CODE_MEMORY_POOL_CELL
.get_or_init(|| {
// FIXME: should have as many code memories as there are possible parallel
// invocations of the runtime… How do we determine that? Should we make it
// configurable for the node operators, perhaps, so that they can make an informed
// choice based on the amount of memory they have and shards they track? Should we
// actually use some sort of semaphore to enforce a parallelism limit?
//
// NB: 64MiB is a best guess as to what the maximum size a loaded artifact can
// NOTE: 8MiB is a best guess as to what the maximum size a loaded artifact can
// plausibly be. This is not necessarily true – there may be WebAssembly
// instructions that expand by more than 4 times in terms of instruction size after
// a conversion to x86_64, In that case a re-allocation will occur and executing
// that particular function call will be slower. Not to mention there isn't a
// strong guarantee on the upper bound of the memory that the contract runtime may
// require.
LimitedMemoryPool::new(256, 1 * 1024 * 1024).unwrap_or_else(|e| {
// NOTE: 128 is not the upper limit on the number of maps that may be allocated at
// once. This number may grow depending on the size of the in-memory VMArtifact
// cache, which is configurable by the operator.
MemoryPool::new(128, 8 * 1024 * 1024).unwrap_or_else(|e| {
panic!("could not pre-allocate resources for the runtime: {e}");
})
})
Expand Down
4 changes: 2 additions & 2 deletions runtime/near-vm/engine/src/universal/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ pub struct Universal {
compiler_config: Option<Box<dyn CompilerConfig>>,
target: Option<Target>,
features: Option<Features>,
pool: Option<super::LimitedMemoryPool>,
pool: Option<super::MemoryPool>,
}

impl Universal {
Expand Down Expand Up @@ -42,7 +42,7 @@ impl Universal {
}

/// Set the pool of reusable code memory
pub fn code_memory_pool(mut self, pool: super::LimitedMemoryPool) -> Self {
pub fn code_memory_pool(mut self, pool: super::MemoryPool) -> Self {
self.pool = Some(pool);
self
}
Expand Down
27 changes: 14 additions & 13 deletions runtime/near-vm/engine/src/universal/code_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,23 +235,20 @@ impl Drop for CodeMemory {

unsafe impl Send for CodeMemory {}

/// The pool of preallocated memory maps for storing the code.
/// The pool of memory maps for storing the code.
///
/// This pool cannot grow and will only allow up to a number of code mappings that were specified
/// at construction time.
///
/// However it is possible for the mappings inside to grow to accomodate larger code.
/// The memories and the size of the pool may grow towards a high watermark.
#[derive(Clone)]
pub struct LimitedMemoryPool {
pub struct MemoryPool {
pool: Arc<std::sync::Mutex<Vec<CodeMemory>>>,
}

impl LimitedMemoryPool {
/// Create a new pool with `count` mappings initialized to `default_memory_size` each.
pub fn new(count: usize, default_memory_size: usize) -> rustix::io::Result<Self> {
let mut pool = Vec::with_capacity(count);
for _ in 0..count {
pool.push(CodeMemory::create(default_memory_size)?);
impl MemoryPool {
/// Create a new pool with `preallocate_count` mappings initialized to `initial_map_size` each.
pub fn new(preallocate_count: usize, initial_map_size: usize) -> rustix::io::Result<Self> {
let mut pool = Vec::with_capacity(preallocate_count);
for _ in 0..preallocate_count {
pool.push(CodeMemory::create(initial_map_size)?);
}
let pool = Arc::new(std::sync::Mutex::new(pool));
Ok(Self { pool })
Expand All @@ -260,7 +257,11 @@ impl LimitedMemoryPool {
/// Get a memory mapping, at least `size` bytes large.
pub fn get(&self, size: usize) -> rustix::io::Result<CodeMemory> {
let mut guard = self.pool.lock().expect("unreachable due to panic=abort");
let mut memory = guard.pop().ok_or(rustix::io::Errno::NOMEM)?;
let mut memory = match guard.pop() {
Some(m) => m,
// This memory will later return to this pool via the drop of `CodeMemory`.
None => CodeMemory::create(std::cmp::max(size, 1))?,
};
memory.source_pool = Some(Arc::clone(&self.pool));
if memory.size < size {
Ok(memory.resize(size)?)
Expand Down
6 changes: 3 additions & 3 deletions runtime/near-vm/engine/src/universal/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ impl UniversalEngine {
compiler: Box<dyn Compiler>,
target: Target,
features: Features,
memory_allocator: super::LimitedMemoryPool,
memory_allocator: super::MemoryPool,
) -> Self {
Self {
inner: Arc::new(Mutex::new(UniversalEngineInner {
Expand Down Expand Up @@ -68,7 +68,7 @@ impl UniversalEngine {
///
/// Headless engines can't compile or validate any modules,
/// they just take already processed Modules (via `Module::serialize`).
pub fn headless(memory_allocator: super::LimitedMemoryPool) -> Self {
pub fn headless(memory_allocator: super::MemoryPool) -> Self {
Self {
inner: Arc::new(Mutex::new(UniversalEngineInner {
compiler: None,
Expand Down Expand Up @@ -486,7 +486,7 @@ pub struct UniversalEngineInner {
/// The compiler
compiler: Option<Box<dyn Compiler>>,
/// Pool from which code memory can be allocated.
code_memory_pool: super::LimitedMemoryPool,
code_memory_pool: super::MemoryPool,
/// The features to compile the Wasm module with
features: Features,
/// The signature registry is used mainly to operate with trampolines
Expand Down
2 changes: 1 addition & 1 deletion runtime/near-vm/engine/src/universal/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ mod link;

pub use self::artifact::UniversalArtifact;
pub use self::builder::Universal;
pub use self::code_memory::{CodeMemory, LimitedMemoryPool};
pub use self::code_memory::{CodeMemory, MemoryPool};
pub use self::engine::UniversalEngine;
pub use self::executable::{UniversalExecutable, UniversalExecutableRef};
pub use self::link::link_module;
2 changes: 1 addition & 1 deletion runtime/near-vm/test-api/src/sys/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ impl Default for Store {
fn get_engine(mut config: impl CompilerConfig + 'static) -> UniversalEngine {
cfg_if::cfg_if! {
if #[cfg(feature = "default-universal")] {
let pool = near_vm_engine::universal::LimitedMemoryPool::new(1, 0x10000).unwrap();
let pool = near_vm_engine::universal::MemoryPool::new(1, 0x10000).unwrap();
near_vm_engine::universal::Universal::new(config)
.code_memory_pool(pool)
.engine()
Expand Down
4 changes: 2 additions & 2 deletions runtime/near-vm/tests/compilers/compilation.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use std::sync::Arc;

use near_vm_compiler::CompileError;
use near_vm_engine::universal::{LimitedMemoryPool, Universal};
use near_vm_engine::universal::{MemoryPool, Universal};
use near_vm_test_api::*;
use near_vm_vm::Artifact;

Expand Down Expand Up @@ -75,7 +75,7 @@ fn profiling() {
"#;
let wasm = wat2wasm(wat.as_bytes()).unwrap();
let compiler = Singlepass::default();
let pool = LimitedMemoryPool::new(1, 0x10000).unwrap();
let pool = MemoryPool::new(1, 0x10000).unwrap();
let engine = Arc::new(Universal::new(compiler).code_memory_pool(pool).engine());
let store = Store::new(Arc::clone(&engine));
match compile_uncached(&store, &engine, &wasm, false) {
Expand Down
4 changes: 1 addition & 3 deletions runtime/near-vm/tests/compilers/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@ impl Config {

pub fn engine(&self, compiler_config: Box<dyn CompilerConfig>) -> UniversalEngine {
let mut engine = near_vm_engine::universal::Universal::new(compiler_config)
.code_memory_pool(
near_vm_engine::universal::LimitedMemoryPool::new(128, 16 * 4096).unwrap(),
);
.code_memory_pool(near_vm_engine::universal::MemoryPool::new(4, 16 * 4096).unwrap());
if let Some(ref features) = self.features {
engine = engine.features(features.clone())
}
Expand Down
4 changes: 2 additions & 2 deletions runtime/near-vm/tests/compilers/deterministic.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
use anyhow::Result;
use near_vm_compiler_singlepass::Singlepass;
use near_vm_engine::universal::{LimitedMemoryPool, Universal};
use near_vm_engine::universal::{MemoryPool, Universal};
use near_vm_test_api::{wat2wasm, BaseTunables};

fn compile_and_compare(wasm: &[u8]) -> Result<()> {
let compiler = Singlepass::default();
let pool = LimitedMemoryPool::new(1, 0x10000).unwrap();
let pool = MemoryPool::new(1, 0x10000).unwrap();
let engine = Universal::new(compiler).code_memory_pool(pool).engine();
let tunables = BaseTunables::for_target(engine.target());

Expand Down
4 changes: 2 additions & 2 deletions runtime/near-vm/tests/compilers/stack_limiter.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
use near_vm_compiler_singlepass::Singlepass;
use near_vm_engine::universal::{LimitedMemoryPool, Universal};
use near_vm_engine::universal::{MemoryPool, Universal};
use near_vm_test_api::*;
use near_vm_types::InstanceConfig;
use near_vm_vm::TrapCode;

fn get_store() -> Store {
let compiler = Singlepass::default();
let pool = LimitedMemoryPool::new(6, 0x100000).expect("foo");
let pool = MemoryPool::new(6, 0x100000).expect("foo");
let store = Store::new(Universal::new(compiler).code_memory_pool(pool).engine().into());
store
}
Expand Down

0 comments on commit 21fba5b

Please sign in to comment.