From 0e7ad478b8b39397b713b238a6c7904e884729d7 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 4 Jul 2023 01:21:36 +0000 Subject: [PATCH 1/7] Add ExternalPageResource and allow discontiguous VM space --- src/memory_manager.rs | 8 +- src/plan/global.rs | 6 +- src/plan/mod.rs | 2 - src/policy/vmspace.rs | 266 +++++++++++++------------- src/util/heap/externalpageresource.rs | 83 ++++++++ src/util/heap/mod.rs | 1 + 6 files changed, 230 insertions(+), 136 deletions(-) create mode 100644 src/util/heap/externalpageresource.rs diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 90fcbb7228..8c7f6b0f12 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -88,9 +88,13 @@ pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { Box::new(mmtk) } +/// Add an externally mmapped region to the VM space. A VM space can be set through MMTk options (`vm_space_start` and `vm_space_size`), +/// and can also be set through this function call. A VM space can be discontiguous. This function can be called multiple times, +/// and all the address ranges passed as arguments in the function will be considered as part of the VM space. +/// Currently we do not allow remove regions from VM space. #[cfg(feature = "vm_space")] -pub fn lazy_init_vm_space(mmtk: &'static mut MMTK, start: Address, size: usize) { - mmtk.plan.base_mut().vm_space.lazy_initialize(start, size); +pub fn set_vm_space(mmtk: &'static mut MMTK, start: Address, size: usize) { + mmtk.plan.base_mut().vm_space.set_vm_region(start, size); } /// Request MMTk to create a mutator for the given thread. The ownership diff --git a/src/plan/global.rs b/src/plan/global.rs index 0a1dbb5f45..faefbc171b 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -501,7 +501,11 @@ impl BasePlan { VMRequest::discontiguous(), )), #[cfg(feature = "vm_space")] - vm_space: VMSpace::new(&mut args), + vm_space: VMSpace::new(args.get_space_args( + "vm_space", + false, + VMRequest::discontiguous(), + )), initialized: AtomicBool::new(false), trigger_gc_when_heap_is_full: AtomicBool::new(true), diff --git a/src/plan/mod.rs b/src/plan/mod.rs index 15c16182fa..b2c415464d 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -26,8 +26,6 @@ pub use global::AllocationSemantics; pub(crate) use global::GcStatus; pub use global::Plan; pub(crate) use global::PlanTraceObject; -#[cfg(feature = "vm_space")] // This is used for creating VM space -pub(crate) use global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; mod mutator_context; pub use mutator_context::Mutator; diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 387e0ef116..f511e2e152 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -1,52 +1,79 @@ -use crate::plan::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; +use crate::mmtk::SFT_MAP; use crate::plan::{ObjectQueue, VectorObjectQueue}; -use crate::policy::immortalspace::ImmortalSpace; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; use crate::util::address::Address; -use crate::util::heap::HeapMeta; +use crate::util::constants::BYTES_IN_PAGE; +use crate::util::heap::externalpageresource::{ExternalPageResource, ExternalPages}; +use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::heap::PageResource; -use crate::util::heap::VMRequest; -use crate::util::metadata::side_metadata::SideMetadataContext; -use crate::util::metadata::side_metadata::SideMetadataSanity; +use crate::util::metadata::mark_bit::MarkState; +use crate::util::opaque_pointer::*; use crate::util::ObjectReference; -use crate::vm::VMBinding; +use crate::vm::{ObjectModel, VMBinding}; -use delegate::delegate; +use std::sync::atomic::Ordering; +/// A special space for VM/Runtime managed memory. The implementation is similar to [`crate::policy::immortalspace::ImmortalSpace`], +/// except that VM space does not allocate. Instead, the runtime can add regions that are externally managed +/// and mmapped to the space, and allow objects in those regions to be traced in the same way +/// as other MMTk objects allocated by MMTk. pub struct VMSpace { - inner: Option>, - // Save it - args: CreateSpecificPlanArgs, + mark_state: MarkState, + common: CommonSpace, + pr: ExternalPageResource, } impl SFT for VMSpace { - delegate! { - // Delegate every call to the inner space. Given that we have acquired SFT, we can assume there are objects in the space and the space is initialized. - to self.space() { - fn name(&self) -> &str; - fn is_live(&self, object: ObjectReference) -> bool; - fn is_reachable(&self, object: ObjectReference) -> bool; - #[cfg(feature = "object_pinning")] - fn pin_object(&self, object: ObjectReference) -> bool; - #[cfg(feature = "object_pinning")] - fn unpin_object(&self, object: ObjectReference) -> bool; - #[cfg(feature = "object_pinning")] - fn is_object_pinned(&self, object: ObjectReference) -> bool; - fn is_movable(&self) -> bool; - #[cfg(feature = "sanity")] - fn is_sane(&self) -> bool; - fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool); - #[cfg(feature = "is_mmtk_object")] - fn is_mmtk_object(&self, addr: Address) -> bool; - fn sft_trace_object( - &self, - queue: &mut VectorObjectQueue, - object: ObjectReference, - worker: GCWorkerMutRef, - ) -> ObjectReference; + fn name(&self) -> &str { + self.common.name + } + fn is_live(&self, _object: ObjectReference) -> bool { + true + } + fn is_reachable(&self, object: ObjectReference) -> bool { + self.mark_state.is_marked::(object) + } + #[cfg(feature = "object_pinning")] + fn pin_object(&self, _object: ObjectReference) -> bool { + false + } + #[cfg(feature = "object_pinning")] + fn unpin_object(&self, _object: ObjectReference) -> bool { + false + } + #[cfg(feature = "object_pinning")] + fn is_object_pinned(&self, _object: ObjectReference) -> bool { + true + } + fn is_movable(&self) -> bool { + false + } + #[cfg(feature = "sanity")] + fn is_sane(&self) -> bool { + true + } + fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { + self.mark_state + .on_object_metadata_initialization::(object); + if self.common.needs_log_bit { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(object); + } + #[cfg(feature = "is_mmtk_object")] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() + } + fn sft_trace_object( + &self, + queue: &mut VectorObjectQueue, + object: ObjectReference, + _worker: GCWorkerMutRef, + ) -> ObjectReference { + self.trace_object(queue, object) } } @@ -58,38 +85,29 @@ impl Space for VMSpace { self } fn get_page_resource(&self) -> &dyn PageResource { - self.space().get_page_resource() + &self.pr } fn common(&self) -> &CommonSpace { - self.space().common() + &self.common } fn initialize_sft(&self) { - if self.inner.is_some() { - self.common().initialize_sft(self.as_sft()) - } + // Do nothing here. We always initialize SFT when we know any external pages } fn release_multiple_pages(&mut self, _start: Address) { - panic!("immortalspace only releases pages enmasse") + unreachable!() } - fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) { - side_metadata_sanity_checker.verify_metadata_context( - std::any::type_name::(), - &SideMetadataContext { - global: self.args.global_side_metadata_specs.clone(), - local: vec![], - }, - ) + fn acquire(&self, _tls: VMThread, _pages: usize) -> Address { + unreachable!() } fn address_in_space(&self, start: Address) -> bool { - if let Some(space) = self.space_maybe() { - space.address_in_space(start) - } else { - false - } + // The default implementation checks with vm map. But vm map has some assumptions about + // the address range for spaces and the VM space breaks those assumptions (as the space is + // mmapped by the runtime rather than us). So we we use SFT here. + SFT_MAP.get_checked(start).name() == self.name() } } @@ -112,107 +130,87 @@ impl crate::policy::gc_work::PolicyTraceObject for VMSpace VMSpace { - pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { - let args_clone = CreateSpecificPlanArgs { - global_args: CreateGeneralPlanArgs { - vm_map: args.global_args.vm_map, - mmapper: args.global_args.mmapper, - heap: HeapMeta::new(), // we do not use this - options: args.global_args.options.clone(), - gc_trigger: args.global_args.gc_trigger.clone(), - scheduler: args.global_args.scheduler.clone(), - }, - constraints: args.constraints, - global_side_metadata_specs: args.global_side_metadata_specs.clone(), + pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { + let (vm_space_start, vm_space_size) = + (*args.options.vm_space_start, *args.options.vm_space_size); + let space = Self { + mark_state: MarkState::new(), + pr: ExternalPageResource::new(args.vm_map), + common: CommonSpace::new(args.into_policy_args( + false, + true, + crate::util::metadata::extract_side_metadata(&[ + *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, + ]), + )), }; - // Create the space if the VM space start/size is set. Otherwise, use None. - let inner = (!args.global_args.options.vm_space_start.is_zero()) - .then(|| Self::create_space(args, None)); - Self { - inner, - args: args_clone, + + if !vm_space_start.is_zero() { + space.add_external_pages(vm_space_start, vm_space_size); } - } - pub fn lazy_initialize(&mut self, start: Address, size: usize) { - assert!(self.inner.is_none(), "VM space has been initialized"); - self.inner = Some(Self::create_space(&mut self.args, Some((start, size)))); + space + } - self.common().initialize_sft(self.as_sft()); + pub fn set_vm_region(&mut self, start: Address, size: usize) { + self.add_external_pages(start, size); } - fn create_space( - args: &mut CreateSpecificPlanArgs, - location: Option<(Address, usize)>, - ) -> ImmortalSpace { - use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; + pub fn add_external_pages(&self, start: Address, size: usize) { + let start = start.align_down(BYTES_IN_PAGE); + let end = (start + size).align_up(BYTES_IN_PAGE); + let size = end - start; - // If the location of the VM space is not supplied, find them in the options. - let (vm_space_start, vm_space_bytes) = location.unwrap_or(( - *args.global_args.options.vm_space_start, - *args.global_args.options.vm_space_size, - )); - // Verify the start and the size is valid - assert!(!vm_space_start.is_zero()); - assert!(vm_space_bytes > 0); + assert!(!start.is_zero()); + assert!(size > 0); - // We only map on chunk granularity. Align them. - let vm_space_start_aligned = vm_space_start.align_down(BYTES_IN_CHUNK); - let vm_space_end_aligned = (vm_space_start + vm_space_bytes).align_up(BYTES_IN_CHUNK); - let vm_space_bytes_aligned = vm_space_end_aligned - vm_space_start_aligned; + let chunk_start = start.align_down(BYTES_IN_CHUNK); + let chunk_end = end.align_up(BYTES_IN_CHUNK); + let chunk_size = chunk_end - chunk_start; // For simplicity, VMSpace has to be outside our available heap range. // TODO: Allow VMSpace in our available heap range. assert!(Address::range_intersection( - &(vm_space_start_aligned..vm_space_end_aligned), + &(chunk_start..chunk_end), &crate::util::heap::layout::available_range() ) .is_empty()); debug!( "Align VM space ({}, {}) to chunk ({}, {})", - vm_space_start, - vm_space_start + vm_space_bytes, - vm_space_start_aligned, - vm_space_end_aligned - ); - - let space_args = args.get_space_args( - "vm_space", - false, - VMRequest::fixed(vm_space_start_aligned, vm_space_bytes_aligned), + start, end, chunk_start, chunk_end ); - let space = - ImmortalSpace::new_vm_space(space_args, vm_space_start_aligned, vm_space_bytes_aligned); - - // The space is mapped externally by the VM. We need to update our mmapper to mark the range as mapped. - space.ensure_mapped(); - space - } - - fn space_maybe(&self) -> Option<&ImmortalSpace> { - self.inner.as_ref() - } + // Mark as mapped in mmapper + self.common.mmapper.mark_as_mapped(chunk_start, chunk_size); + // Map side metadata + self.common + .metadata + .try_map_metadata_space(chunk_start, chunk_size) + .unwrap(); + // Insert to vm map + // self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor); + // Update SFT + assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); + unsafe { + SFT_MAP.eager_initialize(self.as_sft(), chunk_start, chunk_size); + } - fn space(&self) -> &ImmortalSpace { - self.inner.as_ref().unwrap() + self.pr.add_new_external_pages(ExternalPages { start, end }); } - // fn space_mut(&mut self) -> &mut ImmortalSpace { - // self.inner.as_mut().unwrap() - // } - pub fn prepare(&mut self) { - if let Some(ref mut space) = &mut self.inner { - space.prepare() + self.mark_state.on_global_prepare::(); + for external_pages in self.pr.get_external_pages().iter() { + self.mark_state.on_block_reset::( + external_pages.start, + external_pages.end - external_pages.start, + ); } } pub fn release(&mut self) { - if let Some(ref mut space) = &mut self.inner { - space.release() - } + self.mark_state.on_global_release::(); } pub fn trace_object( @@ -220,10 +218,16 @@ impl VMSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - if let Some(ref space) = &self.inner { - space.trace_object(queue, object) - } else { - panic!("We haven't initialized vm space, but we tried to trace the object {} and thought it was in vm space?", object) + #[cfg(feature = "vo_bit")] + debug_assert!( + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", + object + ); + debug_assert!(self.in_space(object)); + if self.mark_state.test_and_mark::(object) { + queue.enqueue(object); } + object } } diff --git a/src/util/heap/externalpageresource.rs b/src/util/heap/externalpageresource.rs new file mode 100644 index 0000000000..613042b8ba --- /dev/null +++ b/src/util/heap/externalpageresource.rs @@ -0,0 +1,83 @@ +use super::layout::VMMap; +use super::pageresource::{PRAllocFail, PRAllocResult}; +use super::PageResource; +use crate::util::address::Address; +use crate::util::constants::BYTES_IN_PAGE; +use crate::util::constants::LOG_BYTES_IN_PAGE; +use crate::util::heap::pageresource::CommonPageResource; +use crate::util::heap::space_descriptor::SpaceDescriptor; +use crate::util::opaque_pointer::*; +use crate::vm::VMBinding; + +use std::marker::PhantomData; +use std::sync::{Mutex, MutexGuard}; + +/// A special page resource that records some external pages that are not mmapped by us, +/// but are used by our space (namely VM space). Unlike other page resources, we cannot +/// allocate from this page resource. +pub struct ExternalPageResource { + common: CommonPageResource, + ranges: Mutex>, + _p: PhantomData, +} + +#[derive(Copy, Clone, Debug)] +pub struct ExternalPages { + pub start: Address, + pub end: Address, +} + +impl PageResource for ExternalPageResource { + fn common(&self) -> &CommonPageResource { + &self.common + } + + fn common_mut(&mut self) -> &mut CommonPageResource { + &mut self.common + } + + fn reserve_pages(&self, _pages: usize) -> usize { + unreachable!() + } + fn commit_pages(&self, _reserved_pages: usize, _actual_pages: usize, _tls: VMThread) { + unreachable!() + } + + fn get_available_physical_pages(&self) -> usize { + 0 + } + + fn alloc_pages( + &self, + _space_descriptor: SpaceDescriptor, + _reserved_pages: usize, + _required_pages: usize, + _tls: VMThread, + ) -> Result { + panic!("Cannot allocate from ExternalPageResource") + } +} + +impl ExternalPageResource { + pub fn new(vm_map: &'static dyn VMMap) -> Self { + Self { + common: CommonPageResource::new(false, false, vm_map), + ranges: Mutex::new(vec![]), + _p: PhantomData, + } + } + + pub fn add_new_external_pages(&self, pages: ExternalPages) { + assert!(pages.start.is_aligned_to(BYTES_IN_PAGE)); + assert!(pages.end.is_aligned_to(BYTES_IN_PAGE)); + + let mut lock = self.ranges.lock().unwrap(); + let n_pages = (pages.end - pages.start) >> LOG_BYTES_IN_PAGE; + self.common.accounting.reserve_and_commit(n_pages); + lock.push(pages); + } + + pub fn get_external_pages(&self) -> MutexGuard> { + self.ranges.lock().unwrap() + } +} diff --git a/src/util/heap/mod.rs b/src/util/heap/mod.rs index 903d0c8476..9597d71c17 100644 --- a/src/util/heap/mod.rs +++ b/src/util/heap/mod.rs @@ -3,6 +3,7 @@ mod accounting; pub mod layout; pub mod blockpageresource; pub mod chunk_map; +pub mod externalpageresource; pub mod freelistpageresource; pub mod gc_trigger; mod heap_meta; From 7fc052e86ddf7ceff15701a22aa7c81fcd2c390e Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 4 Jul 2023 05:25:12 +0000 Subject: [PATCH 2/7] Fix version on is_terminal --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index db4da419d8..dc47df85b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,9 @@ delegate = "0.9.0" downcast-rs = "1.1.1" enum-map = "2.4.2" env_logger = "0.10.0" +# We do not use this crate, but env_logger uses it. env_logger uses is_terminal 0.4.0. However, since 0.4.8, is_terminal requires Rust 1.63. +# So we fix on 0.4.7 here. Once we bump our MSRV, we can remove this. +is-terminal = "=0.4.7" itertools = "0.10.5" jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true } lazy_static = "1.1" From cb3b1fd2034ba56005097922f16c4e3e6fb0d3d6 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 27 Jul 2023 01:25:59 +0000 Subject: [PATCH 3/7] Update comment --- src/policy/vmspace.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index f511e2e152..d8757c3919 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -105,8 +105,22 @@ impl Space for VMSpace { fn address_in_space(&self, start: Address) -> bool { // The default implementation checks with vm map. But vm map has some assumptions about - // the address range for spaces and the VM space breaks those assumptions (as the space is + // the address range for spaces and the VM space may break those assumptions (as the space is // mmapped by the runtime rather than us). So we we use SFT here. + + // However, SFT map may not be an ideal solution either for 64 bits. The default + // implementation of SFT map on 64 bits is `SFTSpaceMap`, which maps the entire address + // space into an index between 0 and 31, and assumes any address with the same index + // is in the same space (with the same SFT). MMTk spaces uses 1-16. We guarantee that + // VM space does not overlap with the address range that MMTk spaces may use. So + // any region used as VM space will have an index of 0, or 17-31, and all the addresses + // that are mapped to the same index will be considered as in the VM space. That means, + // after we map a region as VM space, the nearby addresses will also be considered + // as in the VM space if we use the default `SFTSpaceMap`. We can guarantee the nearby + // addresses are not MMTk spaces, but we cannot tell whether they really in the VM space + // or not. + // A solution to this is to use `SFTDenseChunkMap` if `vm_space` is enabled on 64 bits. + // `SFTDenseChunkMap` has an overhead of a few percentages (~3%) compared to `SFTSpaceMap`. SFT_MAP.get_checked(start).name() == self.name() } } @@ -188,7 +202,7 @@ impl VMSpace { .metadata .try_map_metadata_space(chunk_start, chunk_size) .unwrap(); - // Insert to vm map + // Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation. // self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor); // Update SFT assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); From 86735eaa9cc97d1e084c364c70062029dae552d4 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 15 Aug 2023 06:44:13 +0000 Subject: [PATCH 4/7] Do not overwrite start/size in add_external_pages --- src/policy/vmspace.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index d8757c3919..aba67f8530 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -171,12 +171,10 @@ impl VMSpace { } pub fn add_external_pages(&self, start: Address, size: usize) { - let start = start.align_down(BYTES_IN_PAGE); - let end = (start + size).align_up(BYTES_IN_PAGE); - let size = end - start; - - assert!(!start.is_zero()); assert!(size > 0); + assert!(!start.is_zero()); + + let end = start + size; let chunk_start = start.align_down(BYTES_IN_CHUNK); let chunk_end = end.align_up(BYTES_IN_CHUNK); @@ -210,7 +208,10 @@ impl VMSpace { SFT_MAP.eager_initialize(self.as_sft(), chunk_start, chunk_size); } - self.pr.add_new_external_pages(ExternalPages { start, end }); + self.pr.add_new_external_pages(ExternalPages { + start: start.align_down(BYTES_IN_PAGE), + end: end.align_up(BYTES_IN_PAGE), + }); } pub fn prepare(&mut self) { From 92b07495b6d3d66714c0a8de5da005fdeb13fabb Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 28 Aug 2023 14:47:54 +1200 Subject: [PATCH 5/7] Update src/memory_manager.rs Co-authored-by: Kunal Sareen --- src/memory_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 6e3242bf45..90a4a8f751 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -89,7 +89,7 @@ pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { /// Add an externally mmapped region to the VM space. A VM space can be set through MMTk options (`vm_space_start` and `vm_space_size`), /// and can also be set through this function call. A VM space can be discontiguous. This function can be called multiple times, /// and all the address ranges passed as arguments in the function will be considered as part of the VM space. -/// Currently we do not allow remove regions from VM space. +/// Currently we do not allow removing regions from VM space. #[cfg(feature = "vm_space")] pub fn set_vm_space(mmtk: &'static mut MMTK, start: Address, size: usize) { mmtk.plan.base_mut().vm_space.set_vm_region(start, size); From 979a119d1d28c9cfa69acaa312496355b65f9834 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 29 Aug 2023 01:18:25 +0000 Subject: [PATCH 6/7] If VM space is set by options, postpone SFT initialization until a later stage. --- src/policy/vmspace.rs | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 27c0609817..e2964c52c9 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -92,7 +92,20 @@ impl Space for VMSpace { } fn initialize_sft(&self) { - // Do nothing here. We always initialize SFT when we know any external pages + // Initialize sft for current external pages. This method is called at the end of plan creation. + // So we only set SFT for VM regions that are set by options (we skipped sft initialization for them earlier). + for external_pages in self.pr.get_external_pages().iter() { + // Chunk align things. + let start = external_pages.start.align_down(BYTES_IN_CHUNK); + let size = external_pages.end.align_up(BYTES_IN_CHUNK) - start; + // The region should be empty in SFT map -- if they were set before this point, there could be invalid SFT pointers. + debug_assert_eq!( + SFT_MAP.get_checked(start).name(), + crate::policy::sft::EMPTY_SFT_NAME + ); + // Set SFT + self.set_sft_for_vm_region(start, size); + } } fn release_multiple_pages(&mut self, _start: Address) { @@ -160,17 +173,18 @@ impl VMSpace { }; if !vm_space_start.is_zero() { - space.add_external_pages(vm_space_start, vm_space_size); + // Do not set sft here, as the space may be moved. We do so for those regions in `initialize_sft`. + space.set_vm_region_inner(vm_space_start, vm_space_size, false); } space } pub fn set_vm_region(&mut self, start: Address, size: usize) { - self.add_external_pages(start, size); + self.set_vm_region_inner(start, size, true); } - pub fn add_external_pages(&self, start: Address, size: usize) { + fn set_vm_region_inner(&self, start: Address, size: usize, set_sft: bool) { assert!(size > 0); assert!(!start.is_zero()); @@ -202,10 +216,9 @@ impl VMSpace { .unwrap(); // Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation. // self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor); - // Update SFT - assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); - unsafe { - SFT_MAP.eager_initialize(self.as_sft(), chunk_start, chunk_size); + // Set SFT if we should + if set_sft { + self.set_sft_for_vm_region(chunk_start, chunk_size); } self.pr.add_new_external_pages(ExternalPages { @@ -214,6 +227,18 @@ impl VMSpace { }); } + fn set_sft_for_vm_region(&self, chunk_start: Address, chunk_size: usize) { + assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK)); + assert!(crate::util::conversions::raw_is_aligned( + chunk_size, + BYTES_IN_CHUNK + )); + assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); + unsafe { + SFT_MAP.eager_initialize(self.as_sft(), chunk_start, chunk_size); + } + } + pub fn prepare(&mut self) { self.mark_state.on_global_prepare::(); for external_pages in self.pr.get_external_pages().iter() { From 99d2d9ad25b3a36cea52b0f4ffdc24b16e0ee720 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 29 Aug 2023 01:37:49 +0000 Subject: [PATCH 7/7] Fix default value for RUBY_BINDING_REPO --- .github/workflows/pr-binding-refs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-binding-refs.yml b/.github/workflows/pr-binding-refs.yml index ddb726162f..a4ab694a57 100644 --- a/.github/workflows/pr-binding-refs.yml +++ b/.github/workflows/pr-binding-refs.yml @@ -86,7 +86,7 @@ jobs: with: pull_request: ${{ inputs.pull_request }} token: ${{ secrets.GITHUB_TOKEN }} - default_env: 'OPENJDK_BINDING_REPO=mmtk/mmtk-openjdk,OPENJDK_BINDING_REF=master,JIKESRVM_BINDING_REPO=mmtk/mmtk-jikesrvm,JIKESRVM_BINDING_REF=master,V8_BINDING_REPO=mmtk/mmtk-v8,V8_BINDING_REF=master,JULIA_BINDING_REPO=mmtk/mmtk-julia,JULIA_BINDING_REF=master,RUBY_BINDING_REPO=mmtk/mmtk-julia,RUBY_BINDING_REF=master' + default_env: 'OPENJDK_BINDING_REPO=mmtk/mmtk-openjdk,OPENJDK_BINDING_REF=master,JIKESRVM_BINDING_REPO=mmtk/mmtk-jikesrvm,JIKESRVM_BINDING_REF=master,V8_BINDING_REPO=mmtk/mmtk-v8,V8_BINDING_REF=master,JULIA_BINDING_REPO=mmtk/mmtk-julia,JULIA_BINDING_REF=master,RUBY_BINDING_REPO=mmtk/mmtk-ruby,RUBY_BINDING_REF=master' - id: print run: | echo "::set-output name=openjdk_binding_repo::${{ env.OPENJDK_BINDING_REPO }}"