From 9a49d6a938f0c3143750e8415622dfe679b3ddb8 Mon Sep 17 00:00:00 2001 From: Kunal Sareen Date: Fri, 26 Jul 2024 12:56:26 +1000 Subject: [PATCH] Only map with executable permissions when using code space (#1176) This PR closes https://github.com/mmtk/mmtk-core/issues/7. This PR * refactors `MmapStrategy` to include protection flags, and allows each space to pass `Mmapstrategy` to the mmapper. * removes exec permission from most spaces. Only code spaces will have exec permission. * adds a feature `exec_permission_on_all_spaces` for bindings that may allocate code into normal spaces. --------- Co-authored-by: Yi Lin --- Cargo.toml | 5 +- .../tutorial/code/mygc_semispace/global.rs | 4 +- src/mmtk.rs | 4 - src/plan/generational/copying/global.rs | 4 +- src/plan/generational/global.rs | 2 +- src/plan/generational/immix/global.rs | 2 +- src/plan/global.rs | 12 +- src/plan/immix/global.rs | 2 +- src/plan/markcompact/global.rs | 8 +- src/plan/marksweep/global.rs | 1 + src/plan/nogc/global.rs | 3 + src/plan/pageprotect/global.rs | 2 +- src/plan/semispace/global.rs | 4 +- src/policy/largeobjectspace.rs | 6 +- src/policy/lockfreeimmortalspace.rs | 9 +- src/policy/space.rs | 37 +++- src/util/heap/freelistpageresource.rs | 20 ++- src/util/heap/layout/byte_map_mmapper.rs | 49 +++--- src/util/heap/layout/fragmented_mapper.rs | 46 ++--- src/util/heap/layout/map64.rs | 2 +- src/util/heap/layout/mmapper.rs | 14 +- src/util/memory.rs | 161 ++++++++++++------ src/util/metadata/side_metadata/global.rs | 5 +- src/util/metadata/side_metadata/helpers.rs | 13 +- src/util/metadata/side_metadata/helpers_32.rs | 12 +- src/util/options.rs | 3 +- src/util/raw_memory_freelist.rs | 2 +- .../mock_test_handle_mmap_conflict.rs | 4 +- .../mock_tests/mock_test_handle_mmap_oom.rs | 2 +- 29 files changed, 280 insertions(+), 158 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e85f0f20e6..d658638c47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,9 +98,12 @@ set_unlog_bits_vm_space = [] # TODO: This is not properly implemented yet. We currently use an immortal space instead, and do not guarantee read-only semantics. ro_space = [] # A code space with execution permission. -# TODO: This is not properly implemented yet. We currently use an immortal space instead, and all our spaces have execution permission at the moment. code_space = [] +# By default, we only allow execution permission for code spaces. With this feature, all the spaces have execution permission. +# Use with care. +exec_permission_on_all_spaces = [] + # Global valid object (VO) bit metadata. # The VO bit is set when an object is allocated, and cleared when the GC determines it is dead. # See `src/util/metadata/vo_bit/mod.rs` diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index 8aa745b3e4..98a9f3c934 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -175,9 +175,9 @@ impl MyGC { let res = MyGC { hi: AtomicBool::new(false), // ANCHOR: copyspace_new - copyspace0: CopySpace::new(plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false), + copyspace0: CopySpace::new(plan_args.get_space_args("copyspace0", true, false, VMRequest::discontiguous()), false), // ANCHOR_END: copyspace_new - copyspace1: CopySpace::new(plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true), + copyspace1: CopySpace::new(plan_args.get_space_args("copyspace1", true, false, VMRequest::discontiguous()), true), common: CommonPlan::new(plan_args), }; diff --git a/src/mmtk.rs b/src/mmtk.rs index eea56e7e45..fa76302c1b 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -203,10 +203,6 @@ impl MMTK { }, ); - if *options.transparent_hugepages { - MMAPPER.set_mmap_strategy(crate::util::memory::MmapStrategy::TransparentHugePages); - } - MMTK { options, state, diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 2e5da551ba..2c920fda7d 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -209,11 +209,11 @@ impl GenCopy { }; let copyspace0 = CopySpace::new( - plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace0", true, false, VMRequest::discontiguous()), false, ); let copyspace1 = CopySpace::new( - plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace1", true, false, VMRequest::discontiguous()), true, ); diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 55054c7388..91f6f7e911 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -41,7 +41,7 @@ pub struct CommonGenPlan { impl CommonGenPlan { pub fn new(mut args: CreateSpecificPlanArgs) -> Self { let nursery = CopySpace::new( - args.get_space_args("nursery", true, VMRequest::discontiguous()), + args.get_space_args("nursery", true, false, VMRequest::discontiguous()), true, ); let full_heap_gc_count = args diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 8a34aadc48..ba2850b3a4 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -247,7 +247,7 @@ impl GenImmix { crate::plan::generational::new_generational_global_metadata_specs::(), }; let immix_space = ImmixSpace::new( - plan_args.get_space_args("immix_mature", true, VMRequest::discontiguous()), + plan_args.get_space_args("immix_mature", true, false, VMRequest::discontiguous()), ImmixSpaceArgs { reset_log_bit_in_major_gc: false, // We don't need to unlog objects at tracing. Instead, we unlog objects at copying. diff --git a/src/plan/global.rs b/src/plan/global.rs index 90b7452c40..14695c2d00 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -396,11 +396,13 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { &mut self, name: &'static str, zeroed: bool, + permission_exec: bool, vmrequest: VMRequest, ) -> PlanCreateSpaceArgs { PlanCreateSpaceArgs { name, zeroed, + permission_exec, vmrequest, global_side_metadata_specs: self.global_side_metadata_specs.clone(), vm_map: self.global_args.vm_map, @@ -409,7 +411,7 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { constraints: self.constraints, gc_trigger: self.global_args.gc_trigger.clone(), scheduler: self.global_args.scheduler.clone(), - options: &self.global_args.options, + options: self.global_args.options.clone(), global_state: self.global_args.state.clone(), } } @@ -423,24 +425,28 @@ impl BasePlan { code_space: ImmortalSpace::new(args.get_space_args( "code_space", true, + true, VMRequest::discontiguous(), )), #[cfg(feature = "code_space")] code_lo_space: ImmortalSpace::new(args.get_space_args( "code_lo_space", true, + true, VMRequest::discontiguous(), )), #[cfg(feature = "ro_space")] ro_space: ImmortalSpace::new(args.get_space_args( "ro_space", true, + false, VMRequest::discontiguous(), )), #[cfg(feature = "vm_space")] vm_space: VMSpace::new(args.get_space_args( "vm_space", false, + false, // it doesn't matter -- we are not mmapping for VM space. VMRequest::discontiguous(), )), @@ -585,15 +591,17 @@ impl CommonPlan { immortal: ImmortalSpace::new(args.get_space_args( "immortal", true, + false, VMRequest::discontiguous(), )), los: LargeObjectSpace::new( - args.get_space_args("los", true, VMRequest::discontiguous()), + args.get_space_args("los", true, false, VMRequest::discontiguous()), false, ), nonmoving: ImmortalSpace::new(args.get_space_args( "nonmoving", true, + false, VMRequest::discontiguous(), )), base: BasePlan::new(args), diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index d651183a69..9dc6380660 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -150,7 +150,7 @@ impl Immix { ) -> Self { let immix = Immix { immix_space: ImmixSpace::new( - plan_args.get_space_args("immix", true, VMRequest::discontiguous()), + plan_args.get_space_args("immix", true, false, VMRequest::discontiguous()), space_args, ), common: CommonPlan::new(plan_args), diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 7804d9757d..fb68100ddf 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -195,8 +195,12 @@ impl MarkCompact { global_side_metadata_specs, }; - let mc_space = - MarkCompactSpace::new(plan_args.get_space_args("mc", true, VMRequest::discontiguous())); + let mc_space = MarkCompactSpace::new(plan_args.get_space_args( + "mc", + true, + false, + VMRequest::discontiguous(), + )); let res = MarkCompact { mc_space, diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index b814de1f03..fb6c271cab 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -109,6 +109,7 @@ impl MarkSweep { ms: MarkSweepSpace::new(plan_args.get_space_args( "ms", true, + false, VMRequest::discontiguous(), )), common: CommonPlan::new(plan_args), diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 8cc8334cba..a05034c1c4 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -98,16 +98,19 @@ impl NoGC { nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), + false, VMRequest::discontiguous(), )), immortal: ImmortalSpace::new(plan_args.get_space_args( "immortal", true, + false, VMRequest::discontiguous(), )), los: ImmortalSpace::new(plan_args.get_space_args( "los", true, + false, VMRequest::discontiguous(), )), base: BasePlan::new(plan_args), diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index cc5efb78b0..65a6b2eab8 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -105,7 +105,7 @@ impl PageProtect { let ret = PageProtect { space: LargeObjectSpace::new( - plan_args.get_space_args("pageprotect", true, VMRequest::discontiguous()), + plan_args.get_space_args("pageprotect", true, false, VMRequest::discontiguous()), true, ), common: CommonPlan::new(plan_args), diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index f1ea3483df..361003ef59 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -143,11 +143,11 @@ impl SemiSpace { let res = SemiSpace { hi: AtomicBool::new(false), copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace0", true, false, VMRequest::discontiguous()), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace1", true, false, VMRequest::discontiguous()), true, ), common: CommonPlan::new(plan_args), diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index b64a5371c6..f2b9ec34d1 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -212,7 +212,11 @@ impl LargeObjectSpace { } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }; - pr.protect_memory_on_release = protect_memory_on_release; + pr.protect_memory_on_release = if protect_memory_on_release { + Some(common.mmap_strategy().prot) + } else { + None + }; LargeObjectSpace { pr, common, diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 858d07fd5f..80b65e288d 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -232,11 +232,10 @@ impl LockFreeImmortalSpace { }; // Eagerly memory map the entire heap (also zero all the memory) - let strategy = if *args.options.transparent_hugepages { - MmapStrategy::TransparentHugePages - } else { - MmapStrategy::Normal - }; + let strategy = MmapStrategy::new( + *args.options.transparent_hugepages, + crate::util::memory::MmapProtection::ReadWrite, + ); crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap(); if space .metadata diff --git a/src/policy/space.rs b/src/policy/space.rs index c5663dfa01..556e0c6bfe 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -28,7 +28,7 @@ use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::heap::HeapMeta; -use crate::util::memory; +use crate::util::memory::{self, HugePageSupport, MmapProtection, MmapStrategy}; use crate::vm::VMBinding; use std::marker::PhantomData; @@ -137,13 +137,13 @@ pub trait Space: 'static + SFT + Sync + Downcast { ); let bytes = conversions::pages_to_bytes(res.pages); - let map_sidemetadata = || { + let mmap = || { // Mmap the pages and the side metadata, and handle error. In case of any error, // we will either call back to the VM for OOM, or simply panic. if let Err(mmap_error) = self .common() .mmapper - .ensure_mapped(res.start, res.pages) + .ensure_mapped(res.start, res.pages, self.common().mmap_strategy()) .and( self.common() .metadata @@ -160,7 +160,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { // The scope of the lock is important in terms of performance when we have many allocator threads. if SFT_MAP.get_side_metadata().is_some() { // If the SFT map uses side metadata, so we have to initialize side metadata first. - map_sidemetadata(); + mmap(); // then grow space, which will use the side metadata we mapped above grow_space(); // then we can drop the lock after grow_space() @@ -170,7 +170,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { grow_space(); drop(lock); // and map side metadata without holding the lock - map_sidemetadata(); + mmap(); } // TODO: Concurrent zeroing @@ -421,11 +421,13 @@ pub struct CommonSpace { // the copy semantics for the space. pub copy: Option, - immortal: bool, - movable: bool, + pub immortal: bool, + pub movable: bool, pub contiguous: bool, pub zeroed: bool, + pub permission_exec: bool, + pub start: Address, pub extent: usize, @@ -443,6 +445,7 @@ pub struct CommonSpace { pub gc_trigger: Arc>, pub global_state: Arc, + pub options: Arc, p: PhantomData, } @@ -459,6 +462,7 @@ pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> { pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub name: &'static str, pub zeroed: bool, + pub permission_exec: bool, pub vmrequest: VMRequest, pub global_side_metadata_specs: Vec, pub vm_map: &'static dyn VMMap, @@ -467,7 +471,7 @@ pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub constraints: &'a PlanConstraints, pub gc_trigger: Arc>, pub scheduler: Arc>, - pub options: &'a Options, + pub options: Arc, pub global_state: Arc, } @@ -498,6 +502,7 @@ impl CommonSpace { immortal: args.immortal, movable: args.movable, contiguous: true, + permission_exec: args.plan_args.permission_exec, zeroed: args.plan_args.zeroed, start: unsafe { Address::zero() }, extent: 0, @@ -511,6 +516,7 @@ impl CommonSpace { }, acquire_lock: Mutex::new(()), global_state: args.plan_args.global_state, + options: args.plan_args.options.clone(), p: PhantomData, }; @@ -619,6 +625,21 @@ impl CommonSpace { pub fn vm_map(&self) -> &'static dyn VMMap { self.vm_map } + + pub fn mmap_strategy(&self) -> MmapStrategy { + MmapStrategy { + huge_page: if *self.options.transparent_hugepages { + HugePageSupport::TransparentHugePages + } else { + HugePageSupport::No + }, + prot: if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") { + MmapProtection::ReadWriteExec + } else { + MmapProtection::ReadWrite + }, + } + } } fn get_frac_available(frac: f32) -> usize { diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 5eeda9745f..aa66fe232d 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -27,7 +27,7 @@ pub struct FreeListPageResource { sync: Mutex, _p: PhantomData, /// Protect memory on release, and unprotect on re-allocate. - pub(crate) protect_memory_on_release: bool, + pub(crate) protect_memory_on_release: Option, } unsafe impl Send for FreeListPageResource {} @@ -111,7 +111,7 @@ impl PageResource for FreeListPageResource { let rtn = sync.start + conversions::pages_to_bytes(page_offset as _); // The meta-data portion of reserved Pages was committed above. self.commit_pages(reserved_pages, required_pages, tls); - if self.protect_memory_on_release { + if self.protect_memory_on_release.is_some() { if !new_chunk { // This check is necessary to prevent us from mprotecting an address that is not yet mapped by mmapper. // See https://github.com/mmtk/mmtk-core/issues/400. @@ -168,7 +168,7 @@ impl FreeListPageResource { highwater_mark: UNINITIALIZED_WATER_MARK, }), _p: PhantomData, - protect_memory_on_release: false, + protect_memory_on_release: None, } } @@ -206,7 +206,7 @@ impl FreeListPageResource { highwater_mark: UNINITIALIZED_WATER_MARK, }), _p: PhantomData, - protect_memory_on_release: false, + protect_memory_on_release: None, } } @@ -218,7 +218,7 @@ impl FreeListPageResource { // > the total number of mappings with distinct attributes // > (e.g., read versus read/write protection) exceeding the // > allowed maximum. - assert!(self.protect_memory_on_release); + assert!(self.protect_memory_on_release.is_some()); // We are not using mmapper.protect(). mmapper.protect() protects the whole chunk and // may protect memory that is still in use. if let Err(e) = memory::mprotect(start, conversions::pages_to_bytes(pages)) { @@ -231,8 +231,12 @@ impl FreeListPageResource { /// Unprotect the memory fn munprotect(&self, start: Address, pages: usize) { - assert!(self.protect_memory_on_release); - if let Err(e) = memory::munprotect(start, conversions::pages_to_bytes(pages)) { + assert!(self.protect_memory_on_release.is_some()); + if let Err(e) = memory::munprotect( + start, + conversions::pages_to_bytes(pages), + self.protect_memory_on_release.unwrap(), + ) { panic!( "Failed at unprotecting memory (starting at {}): {:?}", start, e @@ -337,7 +341,7 @@ impl FreeListPageResource { // VM.memory.zero(false, first, Conversions.pagesToBytes(pages)); debug_assert!(pages as usize <= self.common.accounting.get_committed_pages()); - if self.protect_memory_on_release { + if self.protect_memory_on_release.is_some() { self.mprotect(first, pages as _); } diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index ed96aaa89f..ff5dc8c8fe 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -23,7 +23,6 @@ pub const VERBOSE: bool = true; pub struct ByteMapMmapper { lock: Mutex<()>, mapped: [Atomic; MMAP_NUM_CHUNKS], - strategy: Atomic, } impl fmt::Debug for ByteMapMmapper { @@ -33,10 +32,6 @@ impl fmt::Debug for ByteMapMmapper { } impl Mmapper for ByteMapMmapper { - fn set_mmap_strategy(&self, strategy: MmapStrategy) { - self.strategy.store(strategy, Ordering::Relaxed); - } - fn eagerly_mmap_all_spaces(&self, _space_map: &[Address]) { unimplemented!() } @@ -49,7 +44,7 @@ impl Mmapper for ByteMapMmapper { } } - fn ensure_mapped(&self, start: Address, pages: usize) -> Result<()> { + fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()> { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); trace!( @@ -67,18 +62,18 @@ impl Mmapper for ByteMapMmapper { let mmap_start = Self::mmap_chunks_to_address(chunk); let _guard = self.lock.lock().unwrap(); - MapState::transition_to_mapped( - &self.mapped[chunk], - mmap_start, - self.strategy.load(Ordering::Relaxed), - ) - .unwrap(); + MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy).unwrap(); } Ok(()) } - fn quarantine_address_range(&self, start: Address, pages: usize) -> Result<()> { + fn quarantine_address_range( + &self, + start: Address, + pages: usize, + strategy: MmapStrategy, + ) -> Result<()> { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); trace!( @@ -96,12 +91,7 @@ impl Mmapper for ByteMapMmapper { let mmap_start = Self::mmap_chunks_to_address(chunk); let _guard = self.lock.lock().unwrap(); - MapState::transition_to_quarantined( - &self.mapped[chunk], - mmap_start, - self.strategy.load(Ordering::Relaxed), - ) - .unwrap(); + MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy).unwrap(); } Ok(()) @@ -149,7 +139,6 @@ impl ByteMapMmapper { ByteMapMmapper { lock: Mutex::new(()), mapped: [INITIAL_ENTRY; MMAP_NUM_CHUNKS], - strategy: Atomic::new(MmapStrategy::Normal), } } @@ -190,7 +179,7 @@ mod tests { use crate::util::conversions::pages_to_bytes; use crate::util::heap::layout::mmapper::MapState; use crate::util::heap::layout::vm_layout::MMAP_CHUNK_BYTES; - use crate::util::memory; + use crate::util::memory::{self, MmapStrategy}; use crate::util::test_util::BYTE_MAP_MMAPPER_TEST_REGION; use crate::util::test_util::{serial_test, with_cleanup}; use std::sync::atomic::Ordering; @@ -247,7 +236,9 @@ mod tests { with_cleanup( || { let mmapper = ByteMapMmapper::new(); - mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap(); + mmapper + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .unwrap(); for chunk in start_chunk..end_chunk { assert_eq!( @@ -274,7 +265,9 @@ mod tests { with_cleanup( || { let mmapper = ByteMapMmapper::new(); - mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap(); + mmapper + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .unwrap(); for chunk in start_chunk..end_chunk { assert_eq!( @@ -301,7 +294,9 @@ mod tests { with_cleanup( || { let mmapper = ByteMapMmapper::new(); - mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap(); + mmapper + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .unwrap(); let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up( @@ -334,7 +329,7 @@ mod tests { // map 2 chunks let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, test_memory_pages) + .ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST) .unwrap(); // protect 1 chunk @@ -369,7 +364,7 @@ mod tests { // map 2 chunks let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, test_memory_pages) + .ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST) .unwrap(); // protect 1 chunk @@ -387,7 +382,7 @@ mod tests { // ensure mapped - this will unprotect the previously protected chunk mmapper - .ensure_mapped(FIXED_ADDRESS, protect_memory_pages_2) + .ensure_mapped(FIXED_ADDRESS, protect_memory_pages_2, MmapStrategy::TEST) .unwrap(); assert_eq!( mmapper.mapped[chunk].load(Ordering::Relaxed), diff --git a/src/util/heap/layout/fragmented_mapper.rs b/src/util/heap/layout/fragmented_mapper.rs index 93bd626a25..53ce26c441 100644 --- a/src/util/heap/layout/fragmented_mapper.rs +++ b/src/util/heap/layout/fragmented_mapper.rs @@ -57,7 +57,6 @@ struct InnerFragmentedMapper { free_slabs: Vec>>, slab_table: Vec>>, slab_map: Vec
, - strategy: Atomic, } impl fmt::Debug for FragmentedMapper { @@ -67,10 +66,6 @@ impl fmt::Debug for FragmentedMapper { } impl Mmapper for FragmentedMapper { - fn set_mmap_strategy(&self, strategy: MmapStrategy) { - self.inner().strategy.store(strategy, Ordering::Relaxed); - } - fn eagerly_mmap_all_spaces(&self, _space_map: &[Address]) {} fn mark_as_mapped(&self, mut start: Address, bytes: usize) { @@ -94,7 +89,12 @@ impl Mmapper for FragmentedMapper { } } - fn quarantine_address_range(&self, mut start: Address, pages: usize) -> Result<()> { + fn quarantine_address_range( + &self, + mut start: Address, + pages: usize, + strategy: MmapStrategy, + ) -> Result<()> { debug_assert!(start.is_aligned_to(BYTES_IN_PAGE)); let end = start + conversions::pages_to_bytes(pages); @@ -139,14 +139,19 @@ impl Mmapper for FragmentedMapper { MapState::bulk_transition_to_quarantined( state_slices.as_slice(), mmap_start, - self.inner().strategy.load(Ordering::Relaxed), + strategy, )?; } Ok(()) } - fn ensure_mapped(&self, mut start: Address, pages: usize) -> Result<()> { + fn ensure_mapped( + &self, + mut start: Address, + pages: usize, + strategy: MmapStrategy, + ) -> Result<()> { let end = start + conversions::pages_to_bytes(pages); // Iterate over the slabs covered while start < end { @@ -171,11 +176,7 @@ impl Mmapper for FragmentedMapper { let mmap_start = Self::chunk_index_to_address(base, chunk); let _guard = self.lock.lock().unwrap(); - MapState::transition_to_mapped( - entry, - mmap_start, - self.inner().strategy.load(Ordering::Relaxed), - )?; + MapState::transition_to_mapped(entry, mmap_start, strategy)?; } start = high; } @@ -235,7 +236,6 @@ impl FragmentedMapper { free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(), slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(), slab_map: vec![SENTINEL; SLAB_TABLE_SIZE], - strategy: Atomic::new(MmapStrategy::Normal), }), } } @@ -445,7 +445,9 @@ mod tests { with_cleanup( || { let mmapper = FragmentedMapper::new(); - mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap(); + mmapper + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .unwrap(); let chunks = pages_to_chunks_up(pages); for i in 0..chunks { @@ -471,7 +473,9 @@ mod tests { with_cleanup( || { let mmapper = FragmentedMapper::new(); - mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap(); + mmapper + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .unwrap(); let chunks = pages_to_chunks_up(pages); for i in 0..chunks { @@ -498,7 +502,9 @@ mod tests { with_cleanup( || { let mmapper = FragmentedMapper::new(); - mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap(); + mmapper + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .unwrap(); let chunks = pages_to_chunks_up(pages); for i in 0..chunks { @@ -527,7 +533,7 @@ mod tests { let mmapper = FragmentedMapper::new(); let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; mmapper - .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2) + .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, MmapStrategy::TEST) .unwrap(); // protect 1 chunk @@ -558,7 +564,7 @@ mod tests { let mmapper = FragmentedMapper::new(); let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; mmapper - .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2) + .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, MmapStrategy::TEST) .unwrap(); // protect 1 chunk @@ -575,7 +581,7 @@ mod tests { // ensure mapped - this will unprotect the previously protected chunk mmapper - .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2) + .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, MmapStrategy::TEST) .unwrap(); assert_eq!( get_chunk_map_state(&mmapper, FIXED_ADDRESS), diff --git a/src/util/heap/layout/map64.rs b/src/util/heap/layout/map64.rs index 8cf593c936..08de2cde37 100644 --- a/src/util/heap/layout/map64.rs +++ b/src/util/heap/layout/map64.rs @@ -100,7 +100,7 @@ impl VMMap for Map64 { units as _, grain, heads, - MmapStrategy::Normal, + MmapStrategy::INTERNAL_MEMORY, )); /* Adjust the base address and highwater to account for the allocated chunks for the map */ diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs index 898f3a167b..9039bfe261 100644 --- a/src/util/heap/layout/mmapper.rs +++ b/src/util/heap/layout/mmapper.rs @@ -8,9 +8,6 @@ use std::io::Result; /// Generic mmap and protection functionality pub trait Mmapper: Sync { - /// Set mmap strategy - fn set_mmap_strategy(&self, strategy: MmapStrategy); - /// Given an address array describing the regions of virtual memory to be used /// by MMTk, demand zero map all of them if they are not already mapped. /// @@ -35,7 +32,12 @@ pub trait Mmapper: Sync { /// Arguments: /// * `start`: Address of the first page to be quarantined /// * `bytes`: Number of bytes to quarantine from the start - fn quarantine_address_range(&self, start: Address, pages: usize) -> Result<()>; + fn quarantine_address_range( + &self, + start: Address, + pages: usize, + strategy: MmapStrategy, + ) -> Result<()>; /// Ensure that a range of pages is mmapped (or equivalent). If the /// pages are not yet mapped, demand-zero map them. Note that mapping @@ -47,7 +49,7 @@ pub trait Mmapper: Sync { // NOTE: There is a monotonicity assumption so that only updates require lock // acquisition. // TODO: Fix the above to support unmapping. - fn ensure_mapped(&self, start: Address, pages: usize) -> Result<()>; + fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()>; /// Is the page pointed to by this address mapped? Returns true if /// the page at the given address is mapped. @@ -94,7 +96,7 @@ impl MapState { ); let res = match state.load(Ordering::Relaxed) { MapState::Unmapped => dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES, strategy), - MapState::Protected => munprotect(mmap_start, MMAP_CHUNK_BYTES), + MapState::Protected => munprotect(mmap_start, MMAP_CHUNK_BYTES, strategy.prot), MapState::Quarantined => unsafe { dzmmap(mmap_start, MMAP_CHUNK_BYTES, strategy) }, // might have become MapState::Mapped here MapState::Mapped => Ok(()), diff --git a/src/util/memory.rs b/src/util/memory.rs index 35950ea4f7..bd550c599a 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -8,6 +8,82 @@ use std::io::{Error, Result}; use sysinfo::MemoryRefreshKind; use sysinfo::{RefreshKind, System}; +#[cfg(target_os = "linux")] +// MAP_FIXED_NOREPLACE returns EEXIST if already mapped +const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE; +#[cfg(target_os = "macos")] +// MAP_FIXED is used instead of MAP_FIXED_NOREPLACE (which is not available on macOS). We are at the risk of overwriting pre-existing mappings. +const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED; + +/// Strategy for performing mmap +#[derive(Debug, Copy, Clone)] +pub struct MmapStrategy { + /// Do we support huge pages? + pub huge_page: HugePageSupport, + /// The protection flags for mmap + pub prot: MmapProtection, +} + +impl MmapStrategy { + /// Create a new strategy + pub fn new(transparent_hugepages: bool, prot: MmapProtection) -> Self { + Self { + huge_page: if transparent_hugepages { + HugePageSupport::TransparentHugePages + } else { + HugePageSupport::No + }, + prot, + } + } + + /// The strategy for MMTk's own internal memory + pub const INTERNAL_MEMORY: Self = Self { + huge_page: HugePageSupport::No, + prot: MmapProtection::ReadWrite, + }; + + /// The strategy for MMTk side metadata + pub const SIDE_METADATA: Self = Self::INTERNAL_MEMORY; + + /// The strategy for MMTk's test memory + #[cfg(test)] + pub const TEST: Self = Self::INTERNAL_MEMORY; +} + +/// The protection flags for Mmap +#[repr(i32)] +#[derive(Debug, Copy, Clone)] +pub enum MmapProtection { + /// Allow read + write + ReadWrite, + /// Allow read + write + code execution + ReadWriteExec, + /// Do not allow any access + NoAccess, +} + +impl MmapProtection { + /// Turn the protection enum into the native flags + pub fn into_native_flags(self) -> libc::c_int { + match self { + Self::ReadWrite => PROT_READ | PROT_WRITE, + Self::ReadWriteExec => PROT_READ | PROT_WRITE | PROT_EXEC, + Self::NoAccess => PROT_NONE, + } + } +} + +/// Support for huge pages +#[repr(u8)] +#[derive(Debug, Copy, Clone, NoUninit)] +pub enum HugePageSupport { + /// No support for huge page + No, + /// Enable transparent huge pages for the pages that are mapped. This option is only for linux. + TransparentHugePages, +} + /// Check the result from an mmap function in this module. /// Return true if the mmap has failed due to an existing conflicting mapping. pub(crate) fn result_is_mapped(result: Result<()>) -> bool { @@ -40,9 +116,8 @@ pub fn set(start: Address, val: u8, len: usize) { /// may corrupt others' data. #[allow(clippy::let_and_return)] // Zeroing is not neceesary for some OS/s pub unsafe fn dzmmap(start: Address, size: usize, strategy: MmapStrategy) -> Result<()> { - let prot = MMAP_PROT; let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED; - let ret = mmap_fixed(start, size, prot, flags, strategy); + let ret = mmap_fixed(start, size, flags, strategy); // We do not need to explicitly zero for Linux (memory is guaranteed to be zeroed) #[cfg(not(target_os = "linux"))] if ret.is_ok() { @@ -50,42 +125,13 @@ pub unsafe fn dzmmap(start: Address, size: usize, strategy: MmapStrategy) -> Res } ret } - -#[cfg(target_os = "linux")] -// MAP_FIXED_NOREPLACE returns EEXIST if already mapped -const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE; -#[cfg(target_os = "macos")] -// MAP_FIXED is used instead of MAP_FIXED_NOREPLACE (which is not available on macOS). We are at the risk of overwriting pre-existing mappings. -const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED; - -#[cfg(target_os = "linux")] -const MMAP_PROT: libc::c_int = PROT_READ | PROT_WRITE | PROT_EXEC; -#[cfg(target_os = "macos")] -// PROT_EXEC cannot be used with PROT_READ on Apple Silicon -const MMAP_PROT: libc::c_int = PROT_READ | PROT_WRITE; - -/// Strategy for performing mmap -/// -/// This currently supports switching between different huge page allocation -/// methods. However, this can later be refactored to reduce other code -/// repetition. -#[repr(u8)] -#[derive(Debug, Copy, Clone, NoUninit)] -pub enum MmapStrategy { - /// The default mmap strategy. - Normal, - /// Enable transparent huge pages for the pages that are mapped. This option is only for linux. - TransparentHugePages, -} - /// Demand-zero mmap (no replace): /// This function mmaps the memory and guarantees to zero all mapped memory. /// This function will not overwrite existing memory mapping, and it will result Err if there is an existing mapping. #[allow(clippy::let_and_return)] // Zeroing is not neceesary for some OS/s pub fn dzmmap_noreplace(start: Address, size: usize, strategy: MmapStrategy) -> Result<()> { - let prot = MMAP_PROT; let flags = MMAP_FLAGS; - let ret = mmap_fixed(start, size, prot, flags, strategy); + let ret = mmap_fixed(start, size, flags, strategy); // We do not need to explicitly zero for Linux (memory is guaranteed to be zeroed) #[cfg(not(target_os = "linux"))] if ret.is_ok() { @@ -98,27 +144,27 @@ pub fn dzmmap_noreplace(start: Address, size: usize, strategy: MmapStrategy) -> /// This function does not reserve swap space for this mapping, which means there is no guarantee that writes to the /// mapping can always be successful. In case of out of physical memory, one may get a segfault for writing to the mapping. /// We can use this to reserve the address range, and then later overwrites the mapping with dzmmap(). -pub fn mmap_noreserve(start: Address, size: usize, strategy: MmapStrategy) -> Result<()> { - let prot = PROT_NONE; +pub fn mmap_noreserve(start: Address, size: usize, mut strategy: MmapStrategy) -> Result<()> { + strategy.prot = MmapProtection::NoAccess; let flags = MMAP_FLAGS | libc::MAP_NORESERVE; - mmap_fixed(start, size, prot, flags, strategy) + mmap_fixed(start, size, flags, strategy) } fn mmap_fixed( start: Address, size: usize, - prot: libc::c_int, flags: libc::c_int, strategy: MmapStrategy, ) -> Result<()> { let ptr = start.to_mut_ptr(); + let prot = strategy.prot.into_native_flags(); wrap_libc_call( &|| unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }, ptr, )?; - match strategy { - MmapStrategy::Normal => Ok(()), - MmapStrategy::TransparentHugePages => { + match strategy.huge_page { + HugePageSupport::No => Ok(()), + HugePageSupport::TransparentHugePages => { #[cfg(target_os = "linux")] { wrap_libc_call( @@ -174,6 +220,7 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { } _ => {} } + eprintln!("{}", get_process_memory_maps()); panic!("Unexpected mmap failure: {:?}", error) } @@ -182,9 +229,16 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { /// Be very careful about using this function. #[cfg(target_os = "linux")] pub(crate) fn panic_if_unmapped(start: Address, size: usize) { - let prot = PROT_READ | PROT_WRITE; let flags = MMAP_FLAGS; - match mmap_fixed(start, size, prot, flags, MmapStrategy::Normal) { + match mmap_fixed( + start, + size, + flags, + MmapStrategy { + huge_page: HugePageSupport::No, + prot: MmapProtection::ReadWrite, + }, + ) { Ok(_) => panic!("{} of size {} is not mapped", start, size), Err(e) => { assert!( @@ -207,9 +261,10 @@ pub(crate) fn panic_if_unmapped(_start: Address, _size: usize) { } /// Unprotect the given memory (in page granularity) to allow access (PROT_READ/WRITE/EXEC). -pub fn munprotect(start: Address, size: usize) -> Result<()> { +pub fn munprotect(start: Address, size: usize, prot: MmapProtection) -> Result<()> { + let prot = prot.into_native_flags(); wrap_libc_call( - &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_READ | PROT_WRITE | PROT_EXEC) }, + &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, prot) }, 0, ) } @@ -285,10 +340,10 @@ mod tests { serial_test(|| { with_cleanup( || { - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::Normal) }; + let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; assert!(res.is_ok()); // We can overwrite with dzmmap - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::Normal) }; + let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; assert!(res.is_ok()); }, || { @@ -303,7 +358,7 @@ mod tests { serial_test(|| { with_cleanup( || { - let res = dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::Normal); + let res = dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST); assert!(res.is_ok()); let res = munmap(START, BYTES_IN_PAGE); assert!(res.is_ok()); @@ -322,10 +377,10 @@ mod tests { with_cleanup( || { // Make sure we mmapped the memory - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::Normal) }; + let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; assert!(res.is_ok()); // Use dzmmap_noreplace will fail - let res = dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::Normal); + let res = dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST); assert!(res.is_err()); }, || { @@ -340,10 +395,10 @@ mod tests { serial_test(|| { with_cleanup( || { - let res = mmap_noreserve(START, BYTES_IN_PAGE, MmapStrategy::Normal); + let res = mmap_noreserve(START, BYTES_IN_PAGE, MmapStrategy::TEST); assert!(res.is_ok()); // Try reserve it - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::Normal) }; + let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; assert!(res.is_ok()); }, || { @@ -375,7 +430,7 @@ mod tests { serial_test(|| { with_cleanup( || { - assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::Normal).is_ok()); + assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST).is_ok()); panic_if_unmapped(START, BYTES_IN_PAGE); }, || { @@ -393,7 +448,7 @@ mod tests { with_cleanup( || { // map 1 page from START - assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::Normal).is_ok()); + assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST).is_ok()); // check if the next page is mapped - which should panic panic_if_unmapped(START + BYTES_IN_PAGE, BYTES_IN_PAGE); @@ -415,7 +470,7 @@ mod tests { with_cleanup( || { // map 1 page from START - assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::Normal).is_ok()); + assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST).is_ok()); // check if the 2 pages from START are mapped. The second page is unmapped, so it should panic. panic_if_unmapped(START, BYTES_IN_PAGE * 2); diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 123698aa6e..39df588b41 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1588,6 +1588,7 @@ mod tests { use crate::util::heap::layout::vm_layout; use crate::util::test_util::{serial_test, with_cleanup}; + use memory::MmapStrategy; use paste::paste; const TEST_LOG_BYTES_IN_REGION: usize = 12; @@ -1613,7 +1614,9 @@ mod tests { let data_addr = vm_layout::vm_layout().heap_start; // Make sure the address is mapped. - crate::MMAPPER.ensure_mapped(data_addr, 1).unwrap(); + crate::MMAPPER + .ensure_mapped(data_addr, 1, MmapStrategy::TEST) + .unwrap(); let meta_addr = address_to_meta_address(&spec, data_addr); with_cleanup( || { diff --git a/src/util/metadata/side_metadata/helpers.rs b/src/util/metadata/side_metadata/helpers.rs index b9bf197dfb..845ad79666 100644 --- a/src/util/metadata/side_metadata/helpers.rs +++ b/src/util/metadata/side_metadata/helpers.rs @@ -2,6 +2,7 @@ use super::SideMetadataSpec; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout::VMLayout; +use crate::util::memory::MmapStrategy; #[cfg(target_pointer_width = "32")] use crate::util::metadata::side_metadata::address_to_chunked_meta_address; use crate::util::Address; @@ -105,9 +106,17 @@ pub(super) fn try_mmap_contiguous_metadata_space( let mmap_size = (metadata_start + metadata_size).align_up(BYTES_IN_PAGE) - mmap_start; if mmap_size > 0 { if !no_reserve { - MMAPPER.ensure_mapped(mmap_start, mmap_size >> LOG_BYTES_IN_PAGE) + MMAPPER.ensure_mapped( + mmap_start, + mmap_size >> LOG_BYTES_IN_PAGE, + MmapStrategy::SIDE_METADATA, + ) } else { - MMAPPER.quarantine_address_range(mmap_start, mmap_size >> LOG_BYTES_IN_PAGE) + MMAPPER.quarantine_address_range( + mmap_start, + mmap_size >> LOG_BYTES_IN_PAGE, + MmapStrategy::SIDE_METADATA, + ) } .map(|_| mmap_size) } else { diff --git a/src/util/metadata/side_metadata/helpers_32.rs b/src/util/metadata/side_metadata/helpers_32.rs index 233ca4d36e..ef413c0cc6 100644 --- a/src/util/metadata/side_metadata/helpers_32.rs +++ b/src/util/metadata/side_metadata/helpers_32.rs @@ -181,8 +181,16 @@ pub(super) fn try_mmap_metadata_chunk( let pages = crate::util::conversions::bytes_to_pages_up(local_per_chunk); if !no_reserve { // We have reserved the memory - MMAPPER.ensure_mapped(policy_meta_start, pages) + MMAPPER.ensure_mapped( + policy_meta_start, + pages, + memory::MmapStrategy::SIDE_METADATA, + ) } else { - MMAPPER.quarantine_address_range(policy_meta_start, pages) + MMAPPER.quarantine_address_range( + policy_meta_start, + pages, + memory::MmapStrategy::SIDE_METADATA, + ) } } diff --git a/src/util/options.rs b/src/util/options.rs index dd773989bd..cad38c8d5c 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -862,7 +862,8 @@ options! { /// Set the GC trigger. This defines the heap size and how MMTk triggers a GC. /// Default to a fixed heap size of 0.5x physical memory. gc_trigger: GCTriggerSelector [env_var: true, command_line: true] [|v: &GCTriggerSelector| v.validate()] = GCTriggerSelector::FixedHeapSize((crate::util::memory::get_system_total_memory() as f64 * 0.5f64) as usize), - /// Enable transparent hugepage support via madvise (only Linux is supported) + /// Enable transparent hugepage support for MMTk spaces via madvise (only Linux is supported) + /// This only affects the memory for MMTk spaces. transparent_hugepages: bool [env_var: true, command_line: true] [|v: &bool| !v || cfg!(target_os = "linux")] = false } diff --git a/src/util/raw_memory_freelist.rs b/src/util/raw_memory_freelist.rs index 58711c654a..0b496ab47e 100644 --- a/src/util/raw_memory_freelist.rs +++ b/src/util/raw_memory_freelist.rs @@ -273,7 +273,7 @@ mod tests { list_size as _, grain, 1, - MmapStrategy::Normal, + MmapStrategy::TEST, ); // Grow the free-list to do the actual memory-mapping. l.grow_freelist(list_size as _); diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs index fa9400e451..b36ccaef7d 100644 --- a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs @@ -12,12 +12,12 @@ pub fn test_handle_mmap_conflict() { let start = unsafe { Address::from_usize(0x100_0000) }; let one_megabyte = 1000000; let mmap1_res = - memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::Normal); + memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::TEST); assert!(mmap1_res.is_ok()); let panic_res = std::panic::catch_unwind(|| { let mmap2_res = - memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::Normal); + memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::TEST); assert!(mmap2_res.is_err()); memory::handle_mmap_error::( mmap2_res.err().unwrap(), diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs index c2886d745c..357169c78c 100644 --- a/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs @@ -19,7 +19,7 @@ pub fn test_handle_mmap_oom() { // mmap 1 terabyte memory - we expect this will fail due to out of memory. // If that's not the case, increase the size we mmap. let mmap_res = - memory::dzmmap_noreplace(start, LARGE_SIZE, memory::MmapStrategy::Normal); + memory::dzmmap_noreplace(start, LARGE_SIZE, memory::MmapStrategy::TEST); memory::handle_mmap_error::( mmap_res.err().unwrap(),