From b06619982f3cee271e453a555f48b613f3eba2f6 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 22 Nov 2023 19:23:37 +0800 Subject: [PATCH] Add missing docs for the rest of the util module (#1026) This PR is a step towards https://github.com/mmtk/mmtk-core/issues/309. * Deny `missing_docs` for the `util` module and the `vm` module. * Change some items from `pub` to `pub(crate)`. * Remove some unused constants. --- src/lib.rs | 2 + src/plan/immix/global.rs | 2 +- src/plan/plan_constraints.rs | 5 +- src/policy/sft_map.rs | 2 +- src/util/address.rs | 17 +- src/util/alloc/allocator.rs | 1 + src/util/alloc/allocators.rs | 36 ++-- src/util/alloc/bumpallocator.rs | 17 +- src/util/alloc/free_list_allocator.rs | 1 + src/util/alloc/immix_allocator.rs | 6 +- src/util/alloc/large_object_allocator.rs | 2 + src/util/alloc/malloc_allocator.rs | 2 + src/util/alloc/markcompact_allocator.rs | 7 +- src/util/alloc/mod.rs | 2 +- src/util/constants.rs | 172 +++++++++--------- src/util/conversions.rs | 39 ++-- src/util/copy/mod.rs | 11 +- src/util/heap/freelistpageresource.rs | 10 +- src/util/heap/layout/map64.rs | 4 +- src/util/heap/monotonepageresource.rs | 8 +- src/util/linear_scan.rs | 5 + src/util/malloc/malloc_ms_util.rs | 8 +- src/util/memory.rs | 24 ++- src/util/metadata/side_metadata/helpers_32.rs | 3 + src/util/mod.rs | 1 + src/util/opaque_pointer.rs | 12 +- src/util/options.rs | 9 +- src/util/raw_memory_freelist.rs | 4 +- src/util/rust_util/rev_group.rs | 2 + src/vm/mod.rs | 8 +- 30 files changed, 247 insertions(+), 175 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index fb04118185..4680f09b41 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -61,7 +61,9 @@ pub mod build_info; pub mod memory_manager; pub mod plan; pub mod scheduler; +#[deny(missing_docs)] pub mod util; +#[deny(missing_docs)] pub mod vm; pub use crate::plan::{ diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 3b1d6dfbd2..513a9c5d82 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -40,7 +40,7 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { gc_header_bits: 2, gc_header_words: 0, num_specialized_scans: 1, - /// Max immix object size is half of a block. + // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, needs_prepare_mutator: false, ..PlanConstraints::default() diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 4a2c5948be..970451fd7e 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -51,7 +51,10 @@ impl PlanConstraints { num_specialized_scans: 0, max_non_los_default_alloc_bytes: MAX_INT, max_non_los_copy_bytes: MAX_INT, - needs_linear_scan: SUPPORT_CARD_SCANNING || LAZY_SWEEP, + // As `LAZY_SWEEP` is true, needs_linear_scan is true for all the plans. This is strange. + // https://github.com/mmtk/mmtk-core/issues/1027 trackes the issue. + needs_linear_scan: crate::util::constants::SUPPORT_CARD_SCANNING + || crate::util::constants::LAZY_SWEEP, needs_concurrent_workers: false, generate_gc_trace: false, may_trace_duplicate_edges: false, diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index 37225b1384..c84aba3e19 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -410,7 +410,7 @@ mod dense_chunk_map { pub fn new() -> Self { Self { - /// Empty space is at index 0 + // Empty space is at index 0 sft: vec![SFTRefStorage::default()], index_map: HashMap::new(), } diff --git a/src/util/address.rs b/src/util/address.rs index 3c52a854c2..0f860a4436 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -129,7 +129,9 @@ impl Shl for Address { } impl Address { + /// The lowest possible address. pub const ZERO: Self = Address(0); + /// The highest possible address. pub const MAX: Self = Address(usize::max_value()); /// creates Address from a pointer @@ -137,6 +139,7 @@ impl Address { Address(ptr as usize) } + /// creates Address from a Rust reference pub fn from_ref(r: &T) -> Address { Address(r as *const T as usize) } @@ -180,10 +183,12 @@ impl Address { // These const functions are duplicated with the operator traits. But we need them, // as we need them to declare constants. + /// Get the number of bytes between two addresses. The current address needs to be higher than the other address. pub const fn get_extent(self, other: Address) -> ByteSize { self.0 - other.0 } + /// Get the offset from `other` to `self`. The result is negative is `self` is lower than `other`. pub const fn get_offset(self, other: Address) -> ByteOffset { self.0 as isize - other.0 as isize } @@ -192,6 +197,7 @@ impl Address { // The add() function is const fn, and we can use it to declare Address constants. // The Add trait function cannot be const. #[allow(clippy::should_implement_trait)] + /// Add an offset to the address. pub const fn add(self, size: usize) -> Address { Address(self.0 + size) } @@ -200,15 +206,17 @@ impl Address { // The sub() function is const fn, and we can use it to declare Address constants. // The Sub trait function cannot be const. #[allow(clippy::should_implement_trait)] + /// Subtract an offset from the address. pub const fn sub(self, size: usize) -> Address { Address(self.0 - size) } + /// Bitwise 'and' with a mask. pub const fn and(self, mask: usize) -> usize { self.0 & mask } - // Perform a saturating subtract on the Address + /// Perform a saturating subtract on the Address pub const fn saturating_sub(self, size: usize) -> Address { Address(self.0.saturating_sub(size)) } @@ -473,6 +481,7 @@ use crate::vm::VMBinding; pub struct ObjectReference(usize); impl ObjectReference { + /// The null object reference, represented as zero. pub const NULL: ObjectReference = ObjectReference(0); /// Cast the object reference to its raw address. This method is mostly for the convinience of a binding. @@ -511,6 +520,9 @@ impl ObjectReference { VM::VMObjectModel::ref_to_header(self) } + /// Get the start of the allocation address for the object. This method is used by MMTk to get the start of the allocation + /// address originally returned from [`crate::memory_manager::alloc`] for the object. + /// This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_object_start`]. See comments on [`crate::vm::ObjectModel::ref_to_object_start`]. pub fn to_object_start(self) -> Address { use crate::vm::ObjectModel; let object_start = VM::VMObjectModel::ref_to_object_start(self); @@ -557,6 +569,7 @@ impl ObjectReference { } } + /// Can the object be moved? pub fn is_movable(self) -> bool { unsafe { SFT_MAP.get_unchecked(Address(self.0)) }.is_movable() } @@ -566,10 +579,12 @@ impl ObjectReference { unsafe { SFT_MAP.get_unchecked(Address(self.0)) }.get_forwarded_object(self) } + /// Is the object in any MMTk spaces? pub fn is_in_any_space(self) -> bool { unsafe { SFT_MAP.get_unchecked(Address(self.0)) }.is_in_space(self) } + /// Is the object sane? #[cfg(feature = "sanity")] pub fn is_sane(self) -> bool { unsafe { SFT_MAP.get_unchecked(Address(self.0)) }.is_sane() diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index eae4eefbcc..a60d26935c 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -83,6 +83,7 @@ pub fn align_allocation_inner( region + delta } +/// Fill the specified region with the alignment value. pub fn fill_alignment_gap(immut_start: Address, end: Address) { let mut start = immut_start; diff --git a/src/util/alloc/allocators.rs b/src/util/alloc/allocators.rs index f35707e660..58b591b1a8 100644 --- a/src/util/alloc/allocators.rs +++ b/src/util/alloc/allocators.rs @@ -167,27 +167,35 @@ impl Allocators { } } -// This type describe which allocator in the allocators set. -// For VM binding implementors, this type is equivalent to the following native types: -// #[repr(C)] -// struct AllocatorSelector { -// tag: AllocatorSelectorTag, -// payload: u8, -// } -// #[repr(u8)] -// enum AllocatorSelectorTag { -// BumpPointer, -// LargeObject, -// } +/// This type describe an allocator in the [`crate::Mutator`]. +/// For some VM bindings, they may need to access this type from native code. This type is equivalent to the following native types: +/// #[repr(C)] +/// struct AllocatorSelector { +/// tag: AllocatorSelectorTag, +/// payload: u8, +/// } +/// #[repr(u8)] +/// enum AllocatorSelectorTag { +/// BumpPointer, +/// LargeObject, +/// ... +/// } #[repr(C, u8)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] pub enum AllocatorSelector { + /// Represents a [`crate::util::alloc::bumpallocator::BumpAllocator`]. BumpPointer(u8), + /// Represents a [`crate::util::alloc::large_object_allocator::LargeObjectAllocator`]. LargeObject(u8), + /// Represents a [`crate::util::alloc::malloc_allocator::MallocAllocator`]. Malloc(u8), + /// Represents a [`crate::util::alloc::immix_allocator::ImmixAllocator`]. Immix(u8), + /// Represents a [`crate::util::alloc::markcompact_allocator::MarkCompactAllocator`]. MarkCompact(u8), + /// Represents a [`crate::util::alloc::free_list_allocator::FreeListAllocator`]. FreeList(u8), + /// No allocator found. #[default] None, } @@ -197,11 +205,15 @@ pub enum AllocatorSelector { #[repr(C, u8)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] pub enum AllocatorInfo { + /// This allocator uses a [`crate::util::alloc::bumpallocator::BumpPointer`] as its fastpath. BumpPointer { + /// The byte offset from the mutator's pointer to the [`crate::util::alloc::bumpallocator::BumpPointer`]. bump_pointer_offset: usize, }, + /// This allocator uses a fastpath, but we haven't implemented it yet. // FIXME: Add free-list fast-path Unimplemented, + /// This allocator does not have a fastpath. #[default] None, } diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 98c33fc56d..76cc628c89 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -5,7 +5,7 @@ use crate::util::Address; use crate::util::alloc::Allocator; use crate::policy::space::Space; -use crate::util::conversions::bytes_to_pages; +use crate::util::conversions::bytes_to_pages_up; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -13,6 +13,8 @@ const BYTES_IN_PAGE: usize = 1 << 12; const BLOCK_SIZE: usize = 8 * BYTES_IN_PAGE; const BLOCK_MASK: usize = BLOCK_SIZE - 1; +/// A bump pointer allocator. It keeps a thread local allocation buffer, +/// and bumps a cursor to allocate from the buffer. #[repr(C)] pub struct BumpAllocator { /// [`VMThread`] associated with this allocator instance @@ -32,11 +34,14 @@ pub struct BumpAllocator { #[repr(C)] #[derive(Copy, Clone)] pub struct BumpPointer { + /// The cursor inside the allocation buffer where the next object will be allocated. pub cursor: Address, + /// The upperbound of the allocation buffer. pub limit: Address, } impl BumpPointer { + /// Reset the cursor and limit to the given values. pub fn reset(&mut self, start: Address, end: Address) { self.cursor = start; self.limit = end; @@ -46,7 +51,7 @@ impl BumpPointer { impl std::default::Default for BumpPointer { /// Defaults to 0,0. In this case, the first /// allocation would naturally fail the check - /// `cursor + size < limit`, and go to the slowpath. + /// `cursor + size < limit`, and go to the slowpath. fn default() -> Self { BumpPointer { cursor: Address::ZERO, @@ -56,16 +61,16 @@ impl std::default::Default for BumpPointer { } impl BumpAllocator { - pub fn set_limit(&mut self, start: Address, limit: Address) { + pub(crate) fn set_limit(&mut self, start: Address, limit: Address) { self.bump_pointer.reset(start, limit); } - pub fn reset(&mut self) { + pub(crate) fn reset(&mut self) { let zero = unsafe { Address::zero() }; self.bump_pointer.reset(zero, zero); } - pub fn rebind(&mut self, space: &'static dyn Space) { + pub(crate) fn rebind(&mut self, space: &'static dyn Space) { self.reset(); self.space = space; } @@ -194,7 +199,7 @@ impl BumpAllocator { } let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK); - let acquired_start = self.space.acquire(self.tls, bytes_to_pages(block_size)); + let acquired_start = self.space.acquire(self.tls, bytes_to_pages_up(block_size)); if acquired_start.is_zero() { trace!("Failed to acquire a new block"); acquired_start diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 6d302af98d..50ce36bd27 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -15,6 +15,7 @@ use super::allocator::AllocatorContext; /// A MiMalloc free list allocator #[repr(C)] pub struct FreeListAllocator { + /// [`VMThread`] associated with this allocator instance pub tls: VMThread, space: &'static MarkSweepSpace, context: Arc>, diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 077aaefbd9..abff840937 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -16,7 +16,9 @@ use crate::vm::*; /// Immix allocator #[repr(C)] pub struct ImmixAllocator { + /// [`VMThread`] associated with this allocator instance pub tls: VMThread, + /// The fastpath bump pointer. pub bump_pointer: BumpPointer, /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. space: &'static ImmixSpace, @@ -34,7 +36,7 @@ pub struct ImmixAllocator { } impl ImmixAllocator { - pub fn reset(&mut self) { + pub(crate) fn reset(&mut self) { self.bump_pointer.reset(Address::ZERO, Address::ZERO); self.large_bump_pointer.reset(Address::ZERO, Address::ZERO); self.request_for_large = false; @@ -183,7 +185,7 @@ impl ImmixAllocator { } } - pub fn immix_space(&self) -> &'static ImmixSpace { + pub(crate) fn immix_space(&self) -> &'static ImmixSpace { self.space } diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index 9a6bf6cb30..baf0738274 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -9,6 +9,8 @@ use crate::vm::VMBinding; use super::allocator::AllocatorContext; +/// An allocator that only allocates at page granularity. +/// This is intended for large objects. #[repr(C)] pub struct LargeObjectAllocator { /// [`VMThread`] associated with this allocator instance diff --git a/src/util/alloc/malloc_allocator.rs b/src/util/alloc/malloc_allocator.rs index f88ec592f7..f096f0384b 100644 --- a/src/util/alloc/malloc_allocator.rs +++ b/src/util/alloc/malloc_allocator.rs @@ -9,6 +9,8 @@ use crate::vm::VMBinding; use super::allocator::AllocatorContext; +/// The allocator that internally uses malloc for all the allocation requests. +/// This allocator is only intended for experimental uses. #[repr(C)] pub struct MallocAllocator { /// [`VMThread`] associated with this allocator instance diff --git a/src/util/alloc/markcompact_allocator.rs b/src/util/alloc/markcompact_allocator.rs index a26d7dda26..01b2326ac9 100644 --- a/src/util/alloc/markcompact_allocator.rs +++ b/src/util/alloc/markcompact_allocator.rs @@ -16,15 +16,15 @@ pub struct MarkCompactAllocator { } impl MarkCompactAllocator { - pub fn set_limit(&mut self, cursor: Address, limit: Address) { + pub(crate) fn set_limit(&mut self, cursor: Address, limit: Address) { self.bump_allocator.set_limit(cursor, limit); } - pub fn reset(&mut self) { + pub(crate) fn reset(&mut self) { self.bump_allocator.reset(); } - pub fn rebind(&mut self, space: &'static dyn Space) { + pub(crate) fn rebind(&mut self, space: &'static dyn Space) { self.bump_allocator.rebind(space); } } @@ -89,6 +89,7 @@ impl Allocator for MarkCompactAllocator { } impl MarkCompactAllocator { + /// The number of bytes that the allocator reserves for its own header. pub const HEADER_RESERVED_IN_BYTES: usize = crate::policy::markcompactspace::MarkCompactSpace::::HEADER_RESERVED_IN_BYTES; pub(crate) fn new( diff --git a/src/util/alloc/mod.rs b/src/util/alloc/mod.rs index 9e4d8f2881..5f6453b6fe 100644 --- a/src/util/alloc/mod.rs +++ b/src/util/alloc/mod.rs @@ -27,7 +27,7 @@ pub use malloc_allocator::MallocAllocator; pub mod immix_allocator; pub use self::immix_allocator::ImmixAllocator; -// Free list allocator based on Mimalloc +/// Free list allocator based on Mimalloc pub mod free_list_allocator; pub use free_list_allocator::FreeListAllocator; diff --git a/src/util/constants.rs b/src/util/constants.rs index fd30fd1830..b82dce94f1 100644 --- a/src/util/constants.rs +++ b/src/util/constants.rs @@ -1,120 +1,124 @@ -use crate::util::alloc::embedded_meta_data::LOG_BYTES_IN_REGION; - -/** - * Modes. - */ -pub const INSTANCE_FIELD: usize = 0; -pub const ARRAY_ELEMENT: usize = 1; - -/**************************************************************************** - * - * Generic sizes - */ - +/// log2 of the number of bytes in a byte pub const LOG_BYTES_IN_BYTE: u8 = 0; +/// The number of bytes in a byte pub const BYTES_IN_BYTE: usize = 1; +/// log2 of the number of bits in a byte pub const LOG_BITS_IN_BYTE: u8 = 3; +/// The number of bits in a byte pub const BITS_IN_BYTE: usize = 1 << LOG_BITS_IN_BYTE; +/// log2 of the number of bytes in a gigabyte pub const LOG_BYTES_IN_GBYTE: u8 = 30; +/// The number of bytes in a gigabyte pub const BYTES_IN_GBYTE: usize = 1 << LOG_BYTES_IN_GBYTE; +/// log2 of the number of bytes in a megabyte pub const LOG_BYTES_IN_MBYTE: u8 = 20; +/// The number of bytes in a megabyte pub const BYTES_IN_MBYTE: usize = 1 << LOG_BYTES_IN_MBYTE; +/// log2 of the number of bytes in a kilobyte pub const LOG_BYTES_IN_KBYTE: u8 = 10; +/// The number of bytes in a kilobyte pub const BYTES_IN_KBYTE: usize = 1 << LOG_BYTES_IN_KBYTE; -/**************************************************************************** - * - * Card scanning - */ - -pub const SUPPORT_CARD_SCANNING: bool = false; -pub const LOG_CARD_META_SIZE: usize = 2; // each card consumes four bytes of metadata -pub const LOG_CARD_UNITS: usize = 10; // number of units tracked per card -pub const LOG_CARD_GRAIN: usize = 0; // track at byte grain, save shifting -pub const LOG_CARD_BYTES: usize = LOG_CARD_UNITS + LOG_CARD_GRAIN; -pub const LOG_CARD_META_BYTES: usize = LOG_BYTES_IN_REGION - LOG_CARD_BYTES + LOG_CARD_META_SIZE; -pub const LOG_CARD_META_PAGES: usize = LOG_CARD_META_BYTES - LOG_BYTES_IN_PAGE as usize; -/// FIXME: Card scanning is not supported at the moment. Move this to side-metadata in the future. -pub const CARD_META_PAGES_PER_REGION: usize = if SUPPORT_CARD_SCANNING { - 1 << LOG_CARD_META_PAGES -} else { - 0 -}; -pub const CARD_MASK: usize = (1 << LOG_CARD_BYTES) - 1; - -/** - * Lazy sweeping - controlled from here because PlanConstraints needs to - * tell the VM that we need to support linear scan. - */ -pub const LAZY_SWEEP: bool = true; - -/**************************************************************************** - * - * Java-specific sizes currently required by MMTk - * - * TODO MMTk should really become independent of these Java types - */ - -pub const LOG_BYTES_IN_CHAR: u8 = 1; -pub const BYTES_IN_CHAR: usize = 1 << LOG_BYTES_IN_CHAR; -pub const LOG_BITS_IN_CHAR: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_CHAR; -pub const BITS_IN_CHAR: usize = 1 << LOG_BITS_IN_CHAR; - -pub const LOG_BYTES_IN_SHORT: u8 = 1; -pub const BYTES_IN_SHORT: usize = 1 << LOG_BYTES_IN_SHORT; -pub const LOG_BITS_IN_SHORT: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_SHORT; -pub const BITS_IN_SHORT: usize = 1 << LOG_BITS_IN_SHORT; - -pub const LOG_BYTES_IN_INT: u8 = 2; -pub const BYTES_IN_INT: usize = 1 << LOG_BYTES_IN_INT; -pub const LOG_BITS_IN_INT: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_INT; -pub const BITS_IN_INT: usize = 1 << LOG_BITS_IN_INT; - -pub const LOG_BYTES_IN_LONG: u8 = 3; -pub const BYTES_IN_LONG: usize = 1 << LOG_BYTES_IN_LONG; -pub const LOG_BITS_IN_LONG: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_LONG; -pub const BITS_IN_LONG: usize = 1 << LOG_BITS_IN_LONG; - -pub const MAX_INT: usize = i32::max_value() as usize; // 0x7fff_ffff -pub const MIN_INT: usize = i32::min_value() as u32 as usize; // 0x8000_0000 - -/**************************************************************************** - * - * VM-Specific sizes - */ +/// Some card scanning constants ported from Java MMTK. +/// As we haven't implemented card scanning, these are not used at the moment. +mod card_scanning { + use crate::util::alloc::embedded_meta_data::LOG_BYTES_IN_REGION; + + pub const SUPPORT_CARD_SCANNING: bool = false; + /// each card consumes four bytes of metadata + pub const LOG_CARD_META_SIZE: usize = 2; + /// number of units tracked per card + pub const LOG_CARD_UNITS: usize = 10; + /// track at byte grain, save shifting + pub const LOG_CARD_GRAIN: usize = 0; + pub const LOG_CARD_BYTES: usize = LOG_CARD_UNITS + LOG_CARD_GRAIN; + pub const LOG_CARD_META_BYTES: usize = + LOG_BYTES_IN_REGION - LOG_CARD_BYTES + LOG_CARD_META_SIZE; + pub const LOG_CARD_META_PAGES: usize = LOG_CARD_META_BYTES - super::LOG_BYTES_IN_PAGE as usize; + // FIXME: Card scanning is not supported at the moment. Move this to side-metadata in the future. + pub const CARD_META_PAGES_PER_REGION: usize = if SUPPORT_CARD_SCANNING { + 1 << LOG_CARD_META_PAGES + } else { + 0 + }; + pub const CARD_MASK: usize = (1 << LOG_CARD_BYTES) - 1; +} +pub(crate) use card_scanning::*; + +/// Lazy sweeping - controlled from here because PlanConstraints needs to +/// tell the VM that we need to support linear scan. +// FIXME: we are not really using this constant to decide lazy sweep or not. +pub(crate) const LAZY_SWEEP: bool = true; + +// Java-specific sizes currently used by MMTk +// TODO: MMTk should really become independent of these Java types: https://github.com/mmtk/mmtk-core/issues/922 +mod java_specific_constants { + use super::LOG_BITS_IN_BYTE; + + pub const LOG_BYTES_IN_CHAR: u8 = 1; + pub const BYTES_IN_CHAR: usize = 1 << LOG_BYTES_IN_CHAR; + pub const LOG_BITS_IN_CHAR: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_CHAR; + pub const BITS_IN_CHAR: usize = 1 << LOG_BITS_IN_CHAR; + + pub const LOG_BYTES_IN_SHORT: u8 = 1; + pub const BYTES_IN_SHORT: usize = 1 << LOG_BYTES_IN_SHORT; + pub const LOG_BITS_IN_SHORT: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_SHORT; + pub const BITS_IN_SHORT: usize = 1 << LOG_BITS_IN_SHORT; + + pub const LOG_BYTES_IN_INT: u8 = 2; + pub const BYTES_IN_INT: usize = 1 << LOG_BYTES_IN_INT; + pub const LOG_BITS_IN_INT: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_INT; + pub const BITS_IN_INT: usize = 1 << LOG_BITS_IN_INT; + + pub const LOG_BYTES_IN_LONG: u8 = 3; + pub const BYTES_IN_LONG: usize = 1 << LOG_BYTES_IN_LONG; + pub const LOG_BITS_IN_LONG: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_LONG; + pub const BITS_IN_LONG: usize = 1 << LOG_BITS_IN_LONG; + + pub const MAX_INT: usize = i32::max_value() as usize; // 0x7fff_ffff + pub const MIN_INT: usize = i32::min_value() as u32 as usize; // 0x8000_0000 +} +pub(crate) use java_specific_constants::*; #[cfg(target_pointer_width = "32")] +/// log2 of the number of bytes in an address pub const LOG_BYTES_IN_ADDRESS: u8 = 2; #[cfg(target_pointer_width = "64")] +/// log2 of the number of bytes in an address pub const LOG_BYTES_IN_ADDRESS: u8 = 3; +/// The number of bytes in an address pub const BYTES_IN_ADDRESS: usize = 1 << LOG_BYTES_IN_ADDRESS; +/// log2 of the number of bits in an address pub const LOG_BITS_IN_ADDRESS: usize = LOG_BITS_IN_BYTE as usize + LOG_BYTES_IN_ADDRESS as usize; +/// The number of bits in an address pub const BITS_IN_ADDRESS: usize = 1 << LOG_BITS_IN_ADDRESS; -// Note that in MMTk we currently define WORD & ADDRESS to be the same size +/// log2 of the number of bytes in a word pub const LOG_BYTES_IN_WORD: u8 = LOG_BYTES_IN_ADDRESS; +/// The number of bytes in a word pub const BYTES_IN_WORD: usize = 1 << LOG_BYTES_IN_WORD; +/// log2 of the number of bits in a word pub const LOG_BITS_IN_WORD: usize = LOG_BITS_IN_BYTE as usize + LOG_BYTES_IN_WORD as usize; +/// The number of bits in a word pub const BITS_IN_WORD: usize = 1 << LOG_BITS_IN_WORD; -pub const LOG_BYTES_IN_PAGE: u8 = 12; // XXX: This is a lie +/// log2 of the number of bytes in a page +pub const LOG_BYTES_IN_PAGE: u8 = 12; +/// The number of bytes in a page pub const BYTES_IN_PAGE: usize = 1 << LOG_BYTES_IN_PAGE; +/// log2 of the number of bits in a page pub const LOG_BITS_IN_PAGE: usize = LOG_BITS_IN_BYTE as usize + LOG_BYTES_IN_PAGE as usize; +/// The number of bits in a page pub const BITS_IN_PAGE: usize = 1 << LOG_BITS_IN_PAGE; -/* Assume byte-addressability */ +/// log2 of the number of bytes in the address space pub const LOG_BYTES_IN_ADDRESS_SPACE: u8 = BITS_IN_ADDRESS as u8; -// TODO: Should this be VM specific? +/// log2 of the minimal object size in bytes. +// TODO: this should be VM specific. pub const LOG_MIN_OBJECT_SIZE: u8 = LOG_BYTES_IN_WORD; +/// The minimal object size in bytes pub const MIN_OBJECT_SIZE: usize = 1 << LOG_MIN_OBJECT_SIZE; - -/**************************************************************************** - * - * Default options - */ - -pub const DEFAULT_STRESS_FACTOR: usize = usize::max_value(); diff --git a/src/util/conversions.rs b/src/util/conversions.rs index 5121bf05c1..a28b3ac186 100644 --- a/src/util/conversions.rs +++ b/src/util/conversions.rs @@ -2,65 +2,73 @@ use crate::util::constants::*; use crate::util::heap::layout::vm_layout::*; use crate::util::Address; -/* Alignment */ - +/// Is the address aligned to word boundary? pub fn is_address_aligned(addr: Address) -> bool { addr.is_aligned_to(BYTES_IN_ADDRESS) } +/// Align down an address to the nearest page. pub fn page_align_down(address: Address) -> Address { address.align_down(BYTES_IN_PAGE) } +/// Is the address aligned to page boundary? pub fn is_page_aligned(address: Address) -> bool { address.is_aligned_to(BYTES_IN_PAGE) } -// const function cannot have conditional expression +/// Align up an address to the nearest chunk. pub const fn chunk_align_up(addr: Address) -> Address { addr.align_up(BYTES_IN_CHUNK) } -// const function cannot have conditional expression +/// Align down an address to the nearest chunk. pub const fn chunk_align_down(addr: Address) -> Address { addr.align_down(BYTES_IN_CHUNK) } +/// Align up an address to the nearest chunk at which granularity we mmap memory. pub const fn mmap_chunk_align_up(addr: Address) -> Address { addr.align_up(MMAP_CHUNK_BYTES) } +/// Align down an address to the nearest chunk at which granularity we mmap memory. pub const fn mmap_chunk_align_down(addr: Address) -> Address { addr.align_down(MMAP_CHUNK_BYTES) } +/// Convert size in bytes to the number of chunks (aligned up). pub fn bytes_to_chunks_up(bytes: usize) -> usize { (bytes + BYTES_IN_CHUNK - 1) >> LOG_BYTES_IN_CHUNK } +/// Convert an address to the chunk index (aligned down). pub fn address_to_chunk_index(addr: Address) -> usize { addr >> LOG_BYTES_IN_CHUNK } +/// Convert a chunk index to the start address of the chunk. pub fn chunk_index_to_address(chunk: usize) -> Address { unsafe { Address::from_usize(chunk << LOG_BYTES_IN_CHUNK) } } +/// Align up an integer to the given alignment. `align` must be a power of two. pub const fn raw_align_up(val: usize, align: usize) -> usize { // See https://github.com/rust-lang/rust/blob/e620d0f337d0643c757bab791fc7d88d63217704/src/libcore/alloc.rs#L192 val.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1) } +/// Align down an integer to the given alignment. `align` must be a power of two. pub const fn raw_align_down(val: usize, align: usize) -> usize { val & !align.wrapping_sub(1) } +/// Is the integer aligned to the given alignment? `align` must be a power of two. pub const fn raw_is_aligned(val: usize, align: usize) -> bool { val & align.wrapping_sub(1) == 0 } -/* Conversion */ - +/// Convert the number of pages to bytes. pub fn pages_to_bytes(pages: usize) -> usize { pages << LOG_BYTES_IN_PAGE } @@ -70,25 +78,6 @@ pub fn bytes_to_pages_up(bytes: usize) -> usize { raw_align_up(bytes, BYTES_IN_PAGE) >> LOG_BYTES_IN_PAGE } -pub fn bytes_to_pages(bytes: usize) -> usize { - let pages = bytes_to_pages_up(bytes); - - if cfg!(debug = "true") { - let computed_extent = pages << LOG_BYTES_IN_PAGE; - let bytes_match_pages = computed_extent == bytes; - assert!( - bytes_match_pages, - "ERROR: number of bytes computed from pages must match original byte amount!\ - bytes = {}\ - pages = {}\ - bytes computed from pages = {}", - bytes, pages, computed_extent - ); - } - - pages -} - /// Convert size in bytes to a readable short string, such as 1GB, 2TB, etc. It only keeps the major unit and keeps no fraction. pub fn bytes_to_formatted_string(bytes: usize) -> String { const UNITS: [&str; 6] = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"]; diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index e7e1226a41..7d256f830b 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -32,12 +32,12 @@ type CopySpaceMapping = Vec<(CopySelector, &'static dyn Space)>; /// We expect each copying plan to provide a CopyConfig. pub struct CopyConfig { /// Mapping CopySemantics to the actual copying allocators (CopySelector) - pub copy_mapping: EnumMap, + pub(crate) copy_mapping: EnumMap, /// Mapping copying allocators with space - pub space_mapping: CopySpaceMapping, + pub(crate) space_mapping: CopySpaceMapping, /// A reference to the plan constraints. /// GCWorkerCopyContext may have plan-specific behaviors dependson the plan constraints. - pub constraints: &'static PlanConstraints, + pub(crate) constraints: &'static PlanConstraints, } impl Default for CopyConfig { @@ -236,7 +236,7 @@ impl GCWorkerCopyContext { /// This enum may be expanded in the future to describe more semantics. #[derive(Clone, Copy, Enum, Debug)] pub enum CopySemantics { - /// Copy for non generational plans. + /// The default copy behavior. DefaultCopy, /// Copy in nursery generation. Nursery, @@ -247,6 +247,7 @@ pub enum CopySemantics { } impl CopySemantics { + /// Are we copying to a mature space? pub fn is_mature(&self) -> bool { matches!(self, CopySemantics::PromoteToMature | CopySemantics::Mature) } @@ -254,7 +255,7 @@ impl CopySemantics { #[repr(C, u8)] #[derive(Copy, Clone, Debug, Default)] -pub enum CopySelector { +pub(crate) enum CopySelector { CopySpace(u8), Immix(u8), ImmixHybrid(u8), diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 756ccfda03..2013ca6d40 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -180,7 +180,7 @@ impl PageResource for FreeListPageResource { impl FreeListPageResource { pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self { - let pages = conversions::bytes_to_pages(bytes); + let pages = conversions::bytes_to_pages_up(bytes); let common_flpr = { let common_flpr = Box::new(CommonFreeListPageResource { free_list: vm_map.create_parent_freelist(start, pages, PAGES_IN_REGION as _), @@ -317,7 +317,7 @@ impl FreeListPageResource { .grow_discontiguous_space(space_descriptor, required_chunks); if !region.is_zero() { - let region_start = conversions::bytes_to_pages(region - self.start); + let region_start = conversions::bytes_to_pages_up(region - self.start); let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1; self.inner_mut() .free_list @@ -342,7 +342,7 @@ impl FreeListPageResource { unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) { let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk); /* nail down all pages associated with the chunk, so it is no longer on our free list */ - let mut chunk_start = conversions::bytes_to_pages(chunk - self.start); + let mut chunk_start = conversions::bytes_to_pages_up(chunk - self.start); let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK); while chunk_start < chunk_end { self.inner_mut() @@ -364,7 +364,7 @@ impl FreeListPageResource { pub fn release_pages(&self, first: Address) { debug_assert!(conversions::is_page_aligned(first)); - let page_offset = conversions::bytes_to_pages(first - self.start); + let page_offset = conversions::bytes_to_pages_up(first - self.start); let pages = self.free_list.size(page_offset as _); // if (VM.config.ZERO_PAGES_ON_RELEASE) // VM.memory.zero(false, first, Conversions.pagesToBytes(pages)); @@ -392,7 +392,7 @@ impl FreeListPageResource { pages_freed: usize, sync: &mut FreeListPageResourceSync, ) { - let page_offset = conversions::bytes_to_pages(freed_page - self.start); + let page_offset = conversions::bytes_to_pages_up(freed_page - self.start); // may be multiple chunks if pages_freed % PAGES_IN_CHUNK == 0 { diff --git a/src/util/heap/layout/map64.rs b/src/util/heap/layout/map64.rs index 72143b6231..f35aa59c88 100644 --- a/src/util/heap/layout/map64.rs +++ b/src/util/heap/layout/map64.rs @@ -147,8 +147,8 @@ impl VMMap for Map64 { let free_list = self.inner().fl_map[Self::space_index(descriptor.get_start()).unwrap()]; if let Some(mut free_list) = free_list { let free_list = free_list.as_mut(); - free_list.grow_freelist(conversions::bytes_to_pages(extent) as _); - let base_page = conversions::bytes_to_pages(rtn - self.inner().base_address[index]); + free_list.grow_freelist(conversions::bytes_to_pages_up(extent) as _); + let base_page = conversions::bytes_to_pages_up(rtn - self.inner().base_address[index]); for offset in (0..(chunks * PAGES_IN_CHUNK)).step_by(PAGES_IN_CHUNK) { free_list.set_uncoalescable((base_page + offset) as _); /* The 32-bit implementation requires that pages are returned allocated to the caller */ diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index a0c27a6a80..04568e1bd6 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -59,7 +59,7 @@ impl PageResource for MonotonePageResource { fn get_available_physical_pages(&self) -> usize { let sync = self.sync.lock().unwrap(); - let mut rtn = bytes_to_pages(sync.sentinel - sync.cursor); + let mut rtn = bytes_to_pages_up(sync.sentinel - sync.cursor); if !self.common.contiguous { rtn += self.common.vm_map.get_available_discontiguous_chunks() * PAGES_IN_CHUNK; } @@ -264,7 +264,7 @@ impl MonotonePageResource { MonotonePageResourceConditional::Contiguous { start, .. } => start, _ => unreachable!(), }; - let pages = bytes_to_pages(top - space_start); + let pages = bytes_to_pages_up(top - space_start); self.common.accounting.reset(); self.common.accounting.reserve_and_commit(pages); guard.current_chunk = chunk; @@ -297,7 +297,7 @@ impl MonotonePageResource { } chunk_start = next_chunk_start; } - let pages = bytes_to_pages(live_size); + let pages = bytes_to_pages_up(live_size); self.common.accounting.reset(); self.common.accounting.reserve_and_commit(pages); } @@ -370,7 +370,7 @@ impl MonotonePageResource { } fn release_pages_extent(&self, _first: Address, bytes: usize) { - let pages = crate::util::conversions::bytes_to_pages(bytes); + let pages = crate::util::conversions::bytes_to_pages_up(bytes); debug_assert!(bytes == crate::util::conversions::pages_to_bytes(pages)); // FIXME ZERO_PAGES_ON_RELEASE // FIXME Options.protectOnRelease diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index 17a736dcb9..bf391c785d 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -64,6 +64,7 @@ impl std /// Describe object size for linear scan. Different policies may have /// different object sizes (e.g. extra metadata, etc) pub trait LinearScanObjectSize { + /// The object size in bytes for the given object. fn size(object: ObjectReference) -> usize; } @@ -78,7 +79,9 @@ impl LinearScanObjectSize for DefaultObjectSize { /// Region represents a memory region with a properly aligned address as its start and a fixed size for the region. /// Region provides a set of utility methods, along with a RegionIterator that linearly scans at the step of a region. pub trait Region: Copy + PartialEq + PartialOrd { + /// log2 of the size in bytes for the region. const LOG_BYTES: usize; + /// The size in bytes for the region. const BYTES: usize = 1 << Self::LOG_BYTES; /// Create a region from an address that is aligned to the region boundary. The method should panic if the address @@ -124,12 +127,14 @@ pub trait Region: Copy + PartialEq + PartialOrd { } } +/// An iterator for contiguous regions. pub struct RegionIterator { current: R, end: R, } impl RegionIterator { + /// Create an iterator from the start region (inclusive) to the end region (exclusive). pub fn new(start: R, end: R) -> Self { Self { current: start, diff --git a/src/util/malloc/malloc_ms_util.rs b/src/util/malloc/malloc_ms_util.rs index ce6ba7642a..a744111557 100644 --- a/src/util/malloc/malloc_ms_util.rs +++ b/src/util/malloc/malloc_ms_util.rs @@ -16,8 +16,9 @@ pub fn align_alloc(size: usize, align: usize) -> Address { address } -// Beside returning the allocation result, -// this will store the malloc result at (result - BYTES_IN_ADDRESS) +/// Allocate with alignment and offset. +/// Beside returning the allocation result, this will store the malloc result at (result - BYTES_IN_ADDRESS) +/// so we know the original malloc result. pub fn align_offset_alloc(size: usize, align: usize, offset: usize) -> Address { // we allocate extra `align` bytes here, so we are able to handle offset let actual_size = size + align + BYTES_IN_ADDRESS; @@ -37,13 +38,14 @@ pub fn align_offset_alloc(size: usize, align: usize, offset: usiz result } +/// Get the malloc usable size for an address that is returned by [`crate::util::malloc::malloc_ms_util::align_offset_alloc`]. pub fn offset_malloc_usable_size(address: Address) -> usize { let malloc_res_ptr: *mut usize = (address - BYTES_IN_ADDRESS).to_mut_ptr(); let malloc_res = unsafe { malloc_res_ptr.read_unaligned() } as *mut libc::c_void; unsafe { malloc_usable_size(malloc_res) } } -/// free an address that is allocated with some offset +/// Free an address that is allocated with an offset (returned by [`crate::util::malloc::malloc_ms_util::align_offset_alloc`]). pub fn offset_free(address: Address) { let malloc_res_ptr: *mut usize = (address - BYTES_IN_ADDRESS).to_mut_ptr(); let malloc_res = unsafe { malloc_res_ptr.read_unaligned() } as *mut libc::c_void; diff --git a/src/util/memory.rs b/src/util/memory.rs index f89bd6d6bb..4bd68faef4 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -6,17 +6,21 @@ use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE}; use std::io::{Error, Result}; use sysinfo::{RefreshKind, System, SystemExt}; -pub fn result_is_mapped(result: Result<()>) -> bool { +/// Check the result from an mmap function in this module. +/// Return true if the mmap has failed due to an existing conflicting mapping. +pub(crate) fn result_is_mapped(result: Result<()>) -> bool { match result { Ok(_) => false, Err(err) => err.raw_os_error().unwrap() == libc::EEXIST, } } +/// Set a range of memory to 0. pub fn zero(start: Address, len: usize) { set(start, 0, len); } +/// Set a range of memory to the given value. Similar to memset. pub fn set(start: Address, val: u8, len: usize) { unsafe { std::ptr::write_bytes::(start.to_mut_ptr(), val, len); @@ -59,7 +63,9 @@ const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_F /// repetition. #[derive(Debug, Copy, Clone)] pub enum MmapStrategy { + /// The default mmap strategy. Normal, + /// Enable transparent huge pages for the pages that are mapped. This option is only for linux. TransparentHugePages, } @@ -89,7 +95,7 @@ pub fn mmap_noreserve(start: Address, size: usize, strategy: MmapStrategy) -> Re mmap_fixed(start, size, prot, flags, strategy) } -pub fn mmap_fixed( +fn mmap_fixed( start: Address, size: usize, prot: libc::c_int, @@ -119,6 +125,7 @@ pub fn mmap_fixed( } } +/// Unmap the given memory (in page granularity). This wraps the unsafe libc munmap call. pub fn munmap(start: Address, size: usize) -> Result<()> { wrap_libc_call(&|| unsafe { libc::munmap(start.to_mut_ptr(), size) }, 0) } @@ -157,10 +164,10 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { } /// Checks if the memory has already been mapped. If not, we panic. -// Note that the checking has a side effect that it will map the memory if it was unmapped. So we panic if it was unmapped. -// Be very careful about using this function. +/// Note that the checking has a side effect that it will map the memory if it was unmapped. So we panic if it was unmapped. +/// Be very careful about using this function. #[cfg(target_os = "linux")] -pub fn panic_if_unmapped(start: Address, size: usize) { +pub(crate) fn panic_if_unmapped(start: Address, size: usize) { let prot = PROT_READ | PROT_WRITE; let flags = MMAP_FLAGS; match mmap_fixed(start, size, prot, flags, MmapStrategy::Normal) { @@ -175,13 +182,17 @@ pub fn panic_if_unmapped(start: Address, size: usize) { } } +/// Checks if the memory has already been mapped. If not, we panic. +/// This function is currently left empty for non-linux, and should be implemented in the future. +/// As the function is only used for assertions, MMTk will still run even if we never panic. #[cfg(not(target_os = "linux"))] -pub fn panic_if_unmapped(_start: Address, _size: usize) { +pub(crate) fn panic_if_unmapped(_start: Address, _size: usize) { // This is only used for assertions, so MMTk will still run even if we never panic. // TODO: We need a proper implementation for this. As we do not have MAP_FIXED_NOREPLACE, we cannot use the same implementation as Linux. // Possibly we can use posix_mem_offset for both OS/s. } +/// Unprotect the given memory (in page granularity) to allow access (PROT_READ/WRITE/EXEC). pub fn munprotect(start: Address, size: usize) -> Result<()> { wrap_libc_call( &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_READ | PROT_WRITE | PROT_EXEC) }, @@ -189,6 +200,7 @@ pub fn munprotect(start: Address, size: usize) -> Result<()> { ) } +/// Protect the given memory (in page granularity) to forbid any access (PROT_NONE). pub fn mprotect(start: Address, size: usize) -> Result<()> { wrap_libc_call( &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_NONE) }, diff --git a/src/util/metadata/side_metadata/helpers_32.rs b/src/util/metadata/side_metadata/helpers_32.rs index d84394ba59..233ca4d36e 100644 --- a/src/util/metadata/side_metadata/helpers_32.rs +++ b/src/util/metadata/side_metadata/helpers_32.rs @@ -129,6 +129,9 @@ pub(super) fn try_map_per_chunk_metadata_space( } else { start.align_down(BYTES_IN_CHUNK) + BYTES_IN_CHUNK }; + // The code that was intended to deal with the failing cases is commented out. + // See the comment below. Suppress the warning for now. + #[allow(clippy::never_loop)] // Failure: munmap what has been mmapped before while munmap_start < aligned_start { // Commented out the following as we do not have unmap in Mmapper. diff --git a/src/util/mod.rs b/src/util/mod.rs index 0d5d3f86f9..9a5ae0bb23 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -39,6 +39,7 @@ pub(crate) mod erase_vm; pub(crate) mod finalizable_processor; /// Heap implementation, including page resource, mmapper, etc. pub mod heap; +/// Checking if an address is an valid MMTk object. #[cfg(feature = "is_mmtk_object")] pub mod is_mmtk_object; /// Logger initialization diff --git a/src/util/opaque_pointer.rs b/src/util/opaque_pointer.rs index 02b3fcd077..460bd8aa16 100644 --- a/src/util/opaque_pointer.rs +++ b/src/util/opaque_pointer.rs @@ -1,10 +1,9 @@ use crate::util::Address; use libc::c_void; -// OpaquePointer does not provide any method for dereferencing, as we should not dereference it in MMTk. -// However, there are occurrences that we may need to dereference tls in the VM binding code. -// In JikesRVM's implementation of ActivePlan, we need to dereference tls to get mutator and collector context. -// This is done by transmute (unsafe). +/// OpaquePointer represents pointers that MMTk needs to know about but will not deferefence it. +/// For example, a pointer to the thread or the thread local storage is an opaque pointer for MMTK. +/// The type does not provide any method for dereferencing. #[repr(transparent)] #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct OpaquePointer(*mut c_void); @@ -20,16 +19,20 @@ impl Default for OpaquePointer { } impl OpaquePointer { + /// Represents an uninitialized value for [`OpaquePointer`]. pub const UNINITIALIZED: Self = Self(0 as *mut c_void); + /// Cast an [`Address`] type to an [`OpaquePointer`]. pub fn from_address(addr: Address) -> Self { OpaquePointer(addr.to_mut_ptr::()) } + /// Cast the opaque pointer to an [`Address`] type. pub fn to_address(self) -> Address { Address::from_mut_ptr(self.0) } + /// Is this opaque pointer null? pub fn is_null(self) -> bool { self.0.is_null() } @@ -45,6 +48,7 @@ impl OpaquePointer { pub struct VMThread(pub OpaquePointer); impl VMThread { + /// Represents an uninitialized value for [`VMThread`]. pub const UNINITIALIZED: Self = Self(OpaquePointer::UNINITIALIZED); } diff --git a/src/util/options.rs b/src/util/options.rs index a7e8e054dc..11a1b329ae 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -1,5 +1,4 @@ use crate::scheduler::affinity::{get_total_num_cpus, CoreId}; -use crate::util::constants::DEFAULT_STRESS_FACTOR; use crate::util::constants::LOG_BYTES_IN_MBYTE; use crate::util::Address; use std::default::Default; @@ -9,6 +8,10 @@ use strum_macros::EnumString; use super::heap::vm_layout::vm_layout; +/// The default stress factor. This is set to the max usize, +/// which means we will never trigger a stress GC for the default value. +pub const DEFAULT_STRESS_FACTOR: usize = usize::max_value(); + /// The zeroing approach to use for new object allocations. /// Affects each plan differently. #[derive(Copy, Clone, EnumString, Debug)] @@ -113,9 +116,9 @@ pub const NURSERY_SIZE: usize = 32 << LOG_BYTES_IN_MBYTE; /// only used in the GC trigger check. #[cfg(target_pointer_width = "32")] pub const DEFAULT_MIN_NURSERY: usize = 2 << LOG_BYTES_IN_MBYTE; +const DEFAULT_MAX_NURSERY_32: usize = 32 << LOG_BYTES_IN_MBYTE; /// The default max nursery size. This does not affect the actual space we create as nursery. It is /// only used in the GC trigger check. -pub const DEFAULT_MAX_NURSERY_32: usize = 32 << LOG_BYTES_IN_MBYTE; #[cfg(target_pointer_width = "32")] pub const DEFAULT_MAX_NURSERY: usize = DEFAULT_MAX_NURSERY_32; @@ -784,8 +787,8 @@ options! { #[cfg(test)] mod tests { + use super::DEFAULT_STRESS_FACTOR; use super::*; - use crate::util::constants::DEFAULT_STRESS_FACTOR; use crate::util::options::Options; use crate::util::test_util::{serial_test, with_cleanup}; diff --git a/src/util/raw_memory_freelist.rs b/src/util/raw_memory_freelist.rs index 3c87148d6a..58711c654a 100644 --- a/src/util/raw_memory_freelist.rs +++ b/src/util/raw_memory_freelist.rs @@ -115,8 +115,8 @@ impl RawMemoryFreeList { } fn current_capacity(&self) -> i32 { - let list_blocks = - conversions::bytes_to_pages(self.high_water - self.base) as i32 / self.pages_per_block; + let list_blocks = conversions::bytes_to_pages_up(self.high_water - self.base) as i32 + / self.pages_per_block; self.units_in_first_block() + (list_blocks - 1) * self.units_per_block() } diff --git a/src/util/rust_util/rev_group.rs b/src/util/rust_util/rev_group.rs index b192df6575..d96c5c6b17 100644 --- a/src/util/rust_util/rev_group.rs +++ b/src/util/rust_util/rev_group.rs @@ -219,6 +219,7 @@ mod tests { } #[test] + #[allow(clippy::never_loop)] // We are testing with empty slices. The panic in the loop body should not run. fn test_empty_outer_slice() { let slice_of_slices: &[&[i32]] = &[]; for _group in slice_of_slices @@ -233,6 +234,7 @@ mod tests { } #[test] + #[allow(clippy::never_loop)] // We are testing with empty slices. The panic in the loop body should not run. fn test_empty_inner_slice() { let slice_of_slices: &[&[i32]] = &[&[], &[], &[]]; for _group in slice_of_slices diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 27947b3aec..223a02e5cc 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -15,8 +15,6 @@ //! 2. Make sure that the crate type for a VM binding supports LTO. To our knowledge, `staticlib` and `cdylib` support LTO, and //! `rlib` does *not* support LTO. -use crate::util::constants::*; - mod active_plan; mod collection; /// Allows MMTk to access edges in a VM-defined way. @@ -37,8 +35,10 @@ pub use self::scanning::ObjectTracerContext; pub use self::scanning::RootsWorkFactory; pub use self::scanning::Scanning; -const DEFAULT_LOG_MIN_ALIGNMENT: usize = LOG_BYTES_IN_INT as usize; -const DEFAULT_LOG_MAX_ALIGNMENT: usize = LOG_BYTES_IN_LONG as usize; +/// Default min alignment 4 bytes +const DEFAULT_LOG_MIN_ALIGNMENT: usize = 2; +/// Default max alignment 8 bytes +const DEFAULT_LOG_MAX_ALIGNMENT: usize = 3; /// The `VMBinding` trait associates with each trait, and provides VM-specific constants. pub trait VMBinding