diff --git a/core/store/src/trie/mem/arena/alloc.rs b/core/store/src/trie/mem/arena/alloc.rs index 7a485f26e32..f6af8650621 100644 --- a/core/store/src/trie/mem/arena/alloc.rs +++ b/core/store/src/trie/mem/arena/alloc.rs @@ -160,13 +160,11 @@ impl Allocator { self.freelists[size_class] = pos; } - #[cfg(test)] - pub fn num_active_allocs(&self) -> usize { + pub(super) fn num_active_allocs(&self) -> usize { self.active_allocs_count } - #[cfg(test)] - pub fn active_allocs_bytes(&self) -> usize { + pub(super) fn active_allocs_bytes(&self) -> usize { self.active_allocs_bytes } } @@ -175,8 +173,9 @@ impl Allocator { mod test { use super::MAX_ALLOC_SIZE; use crate::trie::mem::arena::alloc::CHUNK_SIZE; + use crate::trie::mem::arena::hybrid::HybridArena; use crate::trie::mem::arena::single_thread::STArena; - use crate::trie::mem::arena::{Arena, ArenaMut, ArenaWithDealloc}; + use crate::trie::mem::arena::{Arena, ArenaMut, ArenaSliceMut, ArenaWithDealloc}; use std::mem::size_of; #[test] @@ -246,4 +245,16 @@ mod test { } } } + + #[test] + #[should_panic(expected = "Cannot deallocate shared memory")] + fn test_hybrid_arena_panic_on_dealloc_shared_memory() { + let mut arena = STArena::new("test_arena".to_owned()); + let ArenaSliceMut { pos, len, .. } = arena.alloc(50); + let frozen_arena = HybridArena::from(arena).freeze(); + let mut hybrid_arena = HybridArena::from_frozen("hybrid_arena".to_string(), frozen_arena); + + // Call to deallocate should panic because the pos is from shared memory. + hybrid_arena.dealloc(pos, len) + } } diff --git a/core/store/src/trie/mem/arena/frozen.rs b/core/store/src/trie/mem/arena/frozen.rs new file mode 100644 index 00000000000..dc42d5f7ee2 --- /dev/null +++ b/core/store/src/trie/mem/arena/frozen.rs @@ -0,0 +1,41 @@ +use std::sync::Arc; + +use super::single_thread::STArenaMemory; +use super::{Arena, ArenaMemory, ArenaPos}; + +/// FrozenArenaMemory holds a cloneable read-only shared memory instance. +/// This can later be converted to HybridArenaMemory. +#[derive(Clone)] +pub struct FrozenArenaMemory { + pub(super) shared_memory: Arc, +} + +/// We only implement the ArenaMemory interface for FrozenArena, as it is read-only. +/// ArenaMemoryMut is not implemented. +impl ArenaMemory for FrozenArenaMemory { + fn raw_slice(&self, pos: ArenaPos, len: usize) -> &[u8] { + self.shared_memory.raw_slice(pos, len) + } +} + +/// FrozenArena is a read-only arena that is cloneable and can be shared between threads. +#[derive(Clone)] +pub struct FrozenArena { + /// The memory of the arena. + pub(super) memory: FrozenArenaMemory, + + /// active_allocs_bytes and active_allocs_count are used while initializing + /// allocator for HybridArena. + pub(super) active_allocs_bytes: usize, + pub(super) active_allocs_count: usize, +} + +/// We only implement the Arena interface for FrozenArena, as it is read-only. +/// ArenaMut and ArenaWithDealloc are not implemented. +impl Arena for FrozenArena { + type Memory = FrozenArenaMemory; + + fn memory(&self) -> &FrozenArenaMemory { + &self.memory + } +} diff --git a/core/store/src/trie/mem/arena/hybrid.rs b/core/store/src/trie/mem/arena/hybrid.rs new file mode 100644 index 00000000000..1394d1b89eb --- /dev/null +++ b/core/store/src/trie/mem/arena/hybrid.rs @@ -0,0 +1,237 @@ +use std::convert::From; +use std::sync::Arc; + +use super::alloc::Allocator; +use super::frozen::{FrozenArena, FrozenArenaMemory}; +use super::single_thread::{STArena, STArenaMemory}; +use super::{ + Arena, ArenaMemory, ArenaMemoryMut, ArenaMut, ArenaPos, ArenaSliceMut, ArenaWithDealloc, +}; + +/// HybridArenaMemory represents a combination of owned and shared memory. +/// +/// Access to owned_memory and shared_memory can be thought of as a layered memory model. +/// On the top layer, we have the owned_memory which is mutable and can be mutated by the owning arena. +/// On the bottom layer, we have the shared_memory which is read-only and can be shared between threads. +/// +/// Memory position or ArenaPos { chunk, pos } references and positions to the shared memory remain valid. +/// All new allocations to the owned memory have a `pos` with chunk offset by shared_memory.chunks.len(). +/// Since the shared memory is read-only, shared_memory.chunks.len() never changes. +/// +/// For `pos` with chunk value >= shared_memory.chunks.len(), the memory is read from owned_memory. +/// For `pos` with chunk value < shared_memory.chunks.len(), the memory is read from shared_memory. +/// +/// More information about HybridArena in section below. +pub struct HybridArenaMemory { + owned_memory: STArenaMemory, + shared_memory: Arc, +} + +/// Conversion from FrozenArenaMemory to HybridArenaMemory. +/// This creates a new instance of owned memory while sharing the shared memory. +impl From for HybridArenaMemory { + fn from(frozen_memory: FrozenArenaMemory) -> Self { + Self { owned_memory: Default::default(), shared_memory: frozen_memory.shared_memory } + } +} + +impl HybridArenaMemory { + #[inline] + fn chunks_offset(&self) -> u32 { + self.shared_memory.chunks.len() as u32 + } +} + +impl ArenaMemory for HybridArenaMemory { + fn raw_slice(&self, mut pos: ArenaPos, len: usize) -> &[u8] { + debug_assert!(!pos.is_invalid()); + if pos.chunk >= self.chunks_offset() { + pos.chunk -= self.chunks_offset(); + self.owned_memory.raw_slice(pos, len) + } else { + self.shared_memory.raw_slice(pos, len) + } + } +} + +impl ArenaMemoryMut for HybridArenaMemory { + fn raw_slice_mut(&mut self, mut pos: ArenaPos, len: usize) -> &mut [u8] { + debug_assert!(!pos.is_invalid()); + assert!(pos.chunk >= self.chunks_offset(), "Cannot mutate shared memory"); + pos.chunk -= self.chunks_offset(); + self.owned_memory.raw_slice_mut(pos, len) + } +} + +/// HybridArena represents Arena with a combination of owned and shared memory. +/// The shared memory is read-only and can be shared between threads and owned by multiple arenas. +/// The owned memory is mutable and can be mutated by the owning arena. +/// +/// Note that while the pos for the shared memory remains valid, shared memory cannot be mutated. +/// or deallocated. All allocations and deallocations are done on the owned memory only. +/// +/// It's possible to represent STArenaMemory as HybridArenaMemory by setting shared memory as empty. +/// This is useful for converting STArena to HybridArena to unify the interface. +/// +/// For typical MemTries usage, most of the time shared memory will be empty. The only time we use +/// shared memory is during resharding when the child shards need access to the parent shard's memory. +pub struct HybridArena { + memory: HybridArenaMemory, + allocator: Allocator, +} + +/// Conversion from STArena to HybridArena. We set shared memory as empty. +impl From for HybridArena { + fn from(arena: STArena) -> Self { + Self { + memory: HybridArenaMemory { + owned_memory: arena.memory, + shared_memory: Arc::new(Default::default()), + }, + allocator: arena.allocator, + } + } +} + +impl HybridArena { + /// Function to create a new HybridArena from an existing instance of shared memory in FrozenArena. + #[allow(dead_code)] + pub fn from_frozen(name: String, frozen_arena: FrozenArena) -> Self { + let allocator = Allocator::new_with_initial_stats( + name, + frozen_arena.active_allocs_bytes, + frozen_arena.active_allocs_count, + ); + Self { memory: frozen_arena.memory.into(), allocator } + } + + /// HybridArena with an empty shared memory can be frozen. Freezing HybridArena with shared memory will panic. + /// This effectively converts the owned_memory in the Arena to shared_memory. + /// + /// Instances of FrozenArena are cloneable and can be used to create new instances of HybridArena with + /// shared memory from FrozenArena. + #[allow(dead_code)] + pub fn freeze(self) -> FrozenArena { + assert!(!self.has_shared_memory(), "Cannot freeze arena with shared memory"); + FrozenArena { + memory: FrozenArenaMemory { shared_memory: Arc::new(self.memory.owned_memory) }, + active_allocs_bytes: self.allocator.active_allocs_bytes(), + active_allocs_count: self.allocator.num_active_allocs(), + } + } + + #[inline] + pub fn has_shared_memory(&self) -> bool { + self.memory.chunks_offset() > 0 + } + + /// Number of active allocations (alloc calls minus dealloc calls). + #[cfg(test)] + pub fn num_active_allocs(&self) -> usize { + self.allocator.num_active_allocs() + } + + #[cfg(test)] + pub fn active_allocs_bytes(&self) -> usize { + self.allocator.active_allocs_bytes() + } +} + +impl Arena for HybridArena { + type Memory = HybridArenaMemory; + + fn memory(&self) -> &Self::Memory { + &self.memory + } +} + +impl ArenaMut for HybridArena { + type MemoryMut = HybridArenaMemory; + + fn memory_mut(&mut self) -> &mut Self::Memory { + &mut self.memory + } + + fn alloc(&mut self, size: usize) -> ArenaSliceMut { + let ArenaSliceMut { mut pos, len, .. } = + self.allocator.allocate(&mut self.memory.owned_memory, size); + pos.chunk = pos.chunk + self.memory.chunks_offset(); + ArenaSliceMut::new(&mut self.memory, pos, len) + } +} + +impl ArenaWithDealloc for HybridArena { + fn dealloc(&mut self, mut pos: ArenaPos, len: usize) { + assert!(pos.chunk >= self.memory.chunks_offset(), "Cannot deallocate shared memory"); + pos.chunk = pos.chunk - self.memory.chunks_offset(); + self.allocator.deallocate(&mut self.memory.owned_memory, pos, len); + } +} + +#[cfg(test)] +mod tests { + use crate::trie::mem::arena::single_thread::STArena; + use crate::trie::mem::arena::{Arena, ArenaMemory, ArenaMemoryMut, ArenaMut, ArenaPos}; + + use super::HybridArena; + + #[test] + fn test_hybrid_arena() { + let size = 25; + let pos0 = ArenaPos { chunk: 1, pos: 420 }; + + // Create and populate STArena with 2 chunks + let chunks = vec![vec![0; 1000], vec![0; 1000]]; + let mut st_arena = STArena::new_from_existing_chunks("test".to_string(), chunks, 0, 0); + let slice = st_arena.memory_mut().raw_slice_mut(pos0, size); + for i in 0..size { + slice[i] = i as u8; + } + + // Create two HybridArena instances from frozen arena + let frozen_arena = HybridArena::from(st_arena).freeze(); + let mut hybrid_arena1 = HybridArena::from_frozen("test1".to_string(), frozen_arena.clone()); + let mut hybrid_arena2 = HybridArena::from_frozen("test2".to_string(), frozen_arena.clone()); + + // Populate both hybrid arena + hybrid_arena1.alloc(50); // random allocation + let mut slice1 = hybrid_arena1.alloc(size); + let mut slice2 = hybrid_arena2.alloc(size); + for i in 0..size { + slice1.raw_slice_mut()[i] = (size + i) as u8; + slice2.raw_slice_mut()[i] = (2 * size + i) as u8; + } + + // Verify pos2 allocated memory has chunk >= 2 + assert_eq!(slice1.raw_pos(), ArenaPos { chunk: 2, pos: 56 }); + assert_eq!(slice2.raw_pos(), ArenaPos { chunk: 2, pos: 0 }); + + // Verify written values to frozen arena + for i in 0..size { + let val = frozen_arena.memory.raw_slice(pos0, size)[i]; + assert_eq!(val, i as u8); + } + + // Verify shared and owned memory written values + let shared_slice1 = hybrid_arena1.memory().raw_slice(pos0, size); + let shared_slice2 = hybrid_arena2.memory().raw_slice(pos0, size); + let slice1 = hybrid_arena1.memory().raw_slice(ArenaPos { chunk: 2, pos: 56 }, size); + let slice2 = hybrid_arena2.memory().raw_slice(ArenaPos { chunk: 2, pos: 0 }, size); + for i in 0..size { + assert_eq!(shared_slice1[i], i as u8); + assert_eq!(shared_slice2[i], i as u8); + assert_eq!(slice1[i], (size + i) as u8); + assert_eq!(slice2[i], (2 * size + i) as u8); + } + } + + #[test] + #[should_panic(expected = "Cannot mutate shared memory")] + fn test_hybrid_arena_panic_on_mut_access_shared_memory() { + let chunks = vec![vec![0; 1000], vec![0; 1000]]; + let st_arena = STArena::new_from_existing_chunks("test".to_string(), chunks, 0, 0); + let frozen_arena = HybridArena::from(st_arena).freeze(); + let mut hybrid_arena = HybridArena::from_frozen("test".to_string(), frozen_arena); + let _slice = hybrid_arena.memory_mut().raw_slice_mut(ArenaPos { chunk: 1, pos: 25 }, 50); + } +} diff --git a/core/store/src/trie/mem/arena/mod.rs b/core/store/src/trie/mem/arena/mod.rs index 23df3c2aacc..c14b2e11a07 100644 --- a/core/store/src/trie/mem/arena/mod.rs +++ b/core/store/src/trie/mem/arena/mod.rs @@ -8,6 +8,8 @@ use super::flexible_data::encoding::BorshFixedSize; mod alloc; pub mod concurrent; +mod frozen; +pub mod hybrid; mod metrics; pub mod single_thread; diff --git a/core/store/src/trie/mem/arena/single_thread.rs b/core/store/src/trie/mem/arena/single_thread.rs index b15bfaa2d7d..b7426ef76d8 100644 --- a/core/store/src/trie/mem/arena/single_thread.rs +++ b/core/store/src/trie/mem/arena/single_thread.rs @@ -5,16 +5,11 @@ use super::{ /// `ArenaMemory` implementation for `STArena` (single-threaded arena). Stores the in-memory trie /// data as large byte arrays called "chunks". +#[derive(Default)] pub struct STArenaMemory { pub(super) chunks: Vec>, } -impl STArenaMemory { - fn new() -> Self { - Self { chunks: Vec::new() } - } -} - impl ArenaMemory for STArenaMemory { fn raw_slice(&self, pos: ArenaPos, len: usize) -> &[u8] { &self.chunks[pos.chunk()][pos.pos()..pos.pos() + len] @@ -33,8 +28,8 @@ impl ArenaMemoryMut for STArenaMemory { /// To allocate, deallocate, or mutate any allocated memory, a mutable /// reference to the `STArena` is needed. pub struct STArena { - memory: STArenaMemory, - allocator: Allocator, + pub(super) memory: STArenaMemory, + pub(super) allocator: Allocator, } impl STArena { @@ -43,7 +38,7 @@ impl STArena { /// can fit into virtual memory (which there are terabytes of). The actual /// memory usage will only be as much as is needed. pub fn new(name: String) -> Self { - Self { memory: STArenaMemory::new(), allocator: Allocator::new(name) } + Self { memory: Default::default(), allocator: Allocator::new(name) } } pub(crate) fn new_from_existing_chunks( @@ -109,7 +104,7 @@ mod tests { #[test] fn test_arena_ptr_and_slice() { - let mut arena = STArenaMemory::new(); + let mut arena = STArenaMemory::default(); arena.chunks.push(vec![0; 1000]); arena.chunks.push(vec![0; 1000]); diff --git a/core/store/src/trie/mem/iter.rs b/core/store/src/trie/mem/iter.rs index 82647cbb0f2..5c1f9ccc2f2 100644 --- a/core/store/src/trie/mem/iter.rs +++ b/core/store/src/trie/mem/iter.rs @@ -16,7 +16,7 @@ use crate::trie::iterator::TrieItem; use crate::trie::OptimizedValueRef; use crate::{NibbleSlice, Trie}; -use super::arena::single_thread::STArenaMemory; +use super::arena::hybrid::HybridArenaMemory; use super::arena::ArenaMemory; use super::node::{MemTrieNodePtr, MemTrieNodeView}; @@ -72,7 +72,7 @@ impl<'a, M: ArenaMemory> Crumb<'a, M> { /// The trail and the key_nibbles may have different lengths e.g. an extension trie node /// will add only a single item to the trail but may add multiple nibbles to the key_nibbles. -pub type STMemTrieIterator<'a> = MemTrieIterator<'a, STArenaMemory>; +pub type STMemTrieIterator<'a> = MemTrieIterator<'a, HybridArenaMemory>; pub struct MemTrieIterator<'a, M: ArenaMemory> { root: Option>, diff --git a/core/store/src/trie/mem/mem_tries.rs b/core/store/src/trie/mem/mem_tries.rs index 2dfe16a64a1..f16959a6a4e 100644 --- a/core/store/src/trie/mem/mem_tries.rs +++ b/core/store/src/trie/mem/mem_tries.rs @@ -11,7 +11,8 @@ use crate::trie::mem::metrics::MEM_TRIE_NUM_ROOTS; use crate::trie::MemTrieChanges; use crate::Trie; -use super::arena::single_thread::{STArena, STArenaMemory}; +use super::arena::hybrid::{HybridArena, HybridArenaMemory}; +use super::arena::single_thread::STArena; use super::arena::Arena; use super::flexible_data::value::ValueView; use super::iter::STMemTrieIterator; @@ -25,7 +26,7 @@ use super::updating::{construct_root_from_changes, MemTrieUpdate}; /// its children nodes. The `roots` field of this struct logically /// holds an Rc of the root of each trie. pub struct MemTries { - arena: STArena, + arena: HybridArena, /// Maps a state root to a list of nodes that have the same root hash. /// The reason why this is a list is because we do not have a node /// deduplication mechanism so we can't guarantee that nodes of the @@ -44,7 +45,7 @@ pub struct MemTries { impl MemTries { pub fn new(shard_uid: ShardUId) -> Self { Self { - arena: STArena::new(shard_uid.to_string()), + arena: STArena::new(shard_uid.to_string()).into(), roots: HashMap::new(), heights: Default::default(), shard_uid, @@ -57,8 +58,12 @@ impl MemTries { arena: STArena, root: MemTrieNodeId, ) -> Self { - let mut tries = - Self { arena, roots: HashMap::new(), heights: Default::default(), shard_uid }; + let mut tries = Self { + arena: arena.into(), + roots: HashMap::new(), + heights: Default::default(), + shard_uid, + }; tries.insert_root(root.as_ptr(tries.arena.memory()).view().node_hash(), root, block_height); tries } @@ -101,7 +106,7 @@ impl MemTries { pub(super) fn get_root( &self, state_root: &CryptoHash, - ) -> Result, StorageError> { + ) -> Result, StorageError> { assert_ne!(state_root, &CryptoHash::default()); self.roots.get(state_root).map(|ids| ids[0].as_ptr(self.arena.memory())).ok_or_else(|| { StorageError::StorageInconsistentState(format!( @@ -154,7 +159,7 @@ impl MemTries { &self, root: CryptoHash, track_trie_changes: bool, - ) -> Result, StorageError> { + ) -> Result, StorageError> { let root_id = if root == CryptoHash::default() { None } else { Some(self.get_root(&root)?.id()) }; Ok(MemTrieUpdate::new( @@ -188,7 +193,7 @@ impl MemTries { } #[cfg(test)] - pub fn arena(&self) -> &STArena { + pub fn arena(&self) -> &HybridArena { &self.arena }