diff --git a/.github/workflows/micro-bm.yml b/.github/workflows/micro-bm.yml index afced4fd1d..53563745bc 100644 --- a/.github/workflows/micro-bm.yml +++ b/.github/workflows/micro-bm.yml @@ -61,7 +61,7 @@ jobs: with: repository: mmtk/ci-perf-kit token: ${{ secrets.GITHUB_TOKEN }} - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit submodules: true # Use rust-toolchain in the trunk (it doesnt matter much - if the toolchains defined in the trunk and the branch are different, we cant run anyway) diff --git a/.github/workflows/perf-compare-ci.yml b/.github/workflows/perf-compare-ci.yml index 3274bd1602..9ee12ca137 100644 --- a/.github/workflows/perf-compare-ci.yml +++ b/.github/workflows/perf-compare-ci.yml @@ -101,7 +101,7 @@ jobs: with: repository: mmtk/ci-perf-kit token: ${{ secrets.CI_ACCESS_TOKEN }} - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit submodules: true # setup @@ -203,7 +203,7 @@ jobs: with: repository: mmtk/ci-perf-kit token: ${{ secrets.CI_ACCESS_TOKEN }} - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit submodules: true # setup diff --git a/.github/workflows/perf-jikesrvm-baseline.yml b/.github/workflows/perf-jikesrvm-baseline.yml index b0a62bcfc2..0145ad4952 100644 --- a/.github/workflows/perf-jikesrvm-baseline.yml +++ b/.github/workflows/perf-jikesrvm-baseline.yml @@ -25,7 +25,7 @@ jobs: with: token: ${{ secrets.CI_ACCESS_TOKEN }} repository: mmtk/ci-perf-kit - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit submodules: true # setup diff --git a/.github/workflows/perf-openjdk-baseline.yml b/.github/workflows/perf-openjdk-baseline.yml index 64f245ae55..f4a97b8127 100644 --- a/.github/workflows/perf-openjdk-baseline.yml +++ b/.github/workflows/perf-openjdk-baseline.yml @@ -25,7 +25,7 @@ jobs: with: token: ${{ secrets.CI_ACCESS_TOKEN }} repository: mmtk/ci-perf-kit - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit submodules: true # setup diff --git a/.github/workflows/perf-regression-ci.yml b/.github/workflows/perf-regression-ci.yml index ca1f1a7263..dcdafb350f 100644 --- a/.github/workflows/perf-regression-ci.yml +++ b/.github/workflows/perf-regression-ci.yml @@ -41,7 +41,7 @@ jobs: uses: actions/checkout@v2 with: repository: mmtk/ci-perf-kit - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit token: ${{ secrets.CI_ACCESS_TOKEN }} submodules: true @@ -101,7 +101,7 @@ jobs: uses: actions/checkout@v2 with: repository: mmtk/ci-perf-kit - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit token: ${{ secrets.CI_ACCESS_TOKEN }} submodules: true @@ -165,7 +165,7 @@ jobs: uses: actions/checkout@v2 with: repository: mmtk/ci-perf-kit - ref: "0.6.6" + ref: "0.6.8" path: ci-perf-kit token: ${{ secrets.CI_ACCESS_TOKEN }} submodules: true diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index ba2ea1558f..e788f689fb 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -86,7 +86,7 @@ impl Plan for GenCopy { } fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { - let is_full_heap = self.request_full_heap_collection(); + let is_full_heap = self.requires_full_heap_collection(); self.base().set_collection_kind::(self); self.base().set_gc_status(GcStatus::GcPrepare); if is_full_heap { @@ -127,6 +127,11 @@ impl Plan for GenCopy { self.fromspace().release(); } + // TODO: Refactor so that we set the next_gc_full_heap in gen.release(). Currently have to fight with Rust borrow checker + // NOTE: We have to take care that the `Gen::should_next_gc_be_full_heap()` function is + // called _after_ all spaces have been released (including ones in `gen`) as otherwise we + // may get incorrect results since the function uses values such as available pages that + // will change dependant on which spaces have been released self.gen .set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self)); } @@ -139,7 +144,7 @@ impl Plan for GenCopy { self.gen.get_used_pages() + self.tospace().reserved_pages() } - /// Return the number of pages avilable for allocation. Assuming all future allocations goes to nursery. + /// Return the number of pages available for allocation. Assuming all future allocations goes to nursery. fn get_available_pages(&self) -> usize { // super.get_pages_avail() / 2 to reserve pages for copying (self @@ -148,6 +153,10 @@ impl Plan for GenCopy { >> 1 } + fn get_mature_physical_pages_available(&self) -> usize { + self.tospace().available_physical_pages() + } + fn base(&self) -> &BasePlan { &self.gen.common.base } @@ -222,9 +231,8 @@ impl GenCopy { res } - fn request_full_heap_collection(&self) -> bool { - self.gen - .request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages()) + fn requires_full_heap_collection(&self) -> bool { + self.gen.requires_full_heap_collection(self) } pub fn tospace(&self) -> &CopySpace { diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index d4515ad8a2..63918292d7 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -14,12 +14,13 @@ use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::options::Options; +use crate::util::statistics::counter::EventCounter; use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::vm::VMBinding; +use crate::vm::{ObjectModel, VMBinding}; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use mmtk_macros::PlanTraceObject; @@ -37,6 +38,7 @@ pub struct Gen { pub gc_full_heap: AtomicBool, /// Is next GC full heap? pub next_gc_full_heap: AtomicBool, + pub full_heap_gc_count: Arc>, } impl Gen { @@ -48,27 +50,33 @@ impl Gen { mmapper: &'static Mmapper, options: Arc, ) -> Self { + let nursery = CopySpace::new( + "nursery", + false, + true, + VMRequest::fixed_extent(options.get_max_nursery(), false), + global_metadata_specs.clone(), + vm_map, + mmapper, + &mut heap, + ); + let common = CommonPlan::new( + vm_map, + mmapper, + options, + heap, + constraints, + global_metadata_specs, + ); + + let full_heap_gc_count = common.base.stats.new_event_counter("majorGC", true, true); + Gen { - nursery: CopySpace::new( - "nursery", - false, - true, - VMRequest::fixed_extent(crate::util::options::NURSERY_SIZE, false), - global_metadata_specs.clone(), - vm_map, - mmapper, - &mut heap, - ), - common: CommonPlan::new( - vm_map, - mmapper, - options, - heap, - constraints, - global_metadata_specs, - ), + nursery, + common, gc_full_heap: AtomicBool::default(), next_gc_full_heap: AtomicBool::new(false), + full_heap_gc_count, } } @@ -88,6 +96,9 @@ impl Gen { /// Prepare Gen. This should be called by a single thread in GC prepare work. pub fn prepare(&mut self, tls: VMWorkerThread) { let full_heap = !self.is_current_gc_nursery(); + if full_heap { + self.full_heap_gc_count.lock().unwrap().inc(); + } self.common.prepare(tls, full_heap); self.nursery.prepare(true); self.nursery @@ -101,6 +112,18 @@ impl Gen { self.nursery.release(); } + /// Independent of how many pages remain in the page budget (a function of heap size), we must + /// ensure we never exhaust virtual memory. Therefore we must never let the nursery grow to the + /// extent that it can't be copied into the mature space. + /// + /// Returns `true` if the nursery has grown to the extent that it may not be able to be copied + /// into the mature space. + fn virtual_memory_exhausted(&self, plan: &P) -> bool { + ((plan.get_collection_reserved_pages() as f64 + * VM::VMObjectModel::VM_WORST_CASE_COPY_EXPANSION) as usize) + > plan.get_mature_physical_pages_available() + } + /// Check if we need a GC based on the nursery space usage. This method may mark /// the following GC as a full heap GC. pub fn collection_required( @@ -110,11 +133,16 @@ impl Gen { space: Option<&dyn Space>, ) -> bool { let nursery_full = self.nursery.reserved_pages() - >= (conversions::bytes_to_pages_up(*self.common.base.options.max_nursery)); + >= (conversions::bytes_to_pages_up(self.common.base.options.get_max_nursery())); + if nursery_full { return true; } + if self.virtual_memory_exhausted(plan) { + return true; + } + // Is the GC triggered by nursery? // - if space is none, it is not. Return false immediately. // - if space is some, we further check its descriptor. @@ -139,7 +167,7 @@ impl Gen { /// Check if we should do a full heap GC. It returns true if we should have a full heap GC. /// It also sets gc_full_heap based on the result. - pub fn request_full_heap_collection(&self, total_pages: usize, reserved_pages: usize) -> bool { + pub fn requires_full_heap_collection(&self, plan: &P) -> bool { // Allow the same 'true' block for if-else. // The conditions are complex, and it is easier to read if we put them to separate if blocks. #[allow(clippy::if_same_then_else)] @@ -165,8 +193,10 @@ impl Gen { { // Forces full heap collection true + } else if self.virtual_memory_exhausted(plan) { + true } else { - total_pages <= reserved_pages + plan.get_total_pages() <= plan.get_reserved_pages() }; self.gc_full_heap.store(is_full_heap, Ordering::SeqCst); @@ -176,7 +206,7 @@ impl Gen { if is_full_heap { "Full heap GC" } else { - "nursery GC" + "Nursery GC" } ); @@ -230,9 +260,13 @@ impl Gen { } /// Check a plan to see if the next GC should be a full heap GC. + /// + /// Note that this function should be called after all spaces have been released. This is + /// required as we may get incorrect values since this function uses [`get_available_pages`] + /// whose value depends on which spaces have been released. pub fn should_next_gc_be_full_heap(plan: &dyn Plan) -> bool { plan.get_available_pages() - < conversions::bytes_to_pages_up(*plan.base().options.min_nursery) + < conversions::bytes_to_pages_up(plan.base().options.get_min_nursery()) } /// Set next_gc_full_heap to the given value. diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 8461bc1d5c..d73a5b1acb 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -114,7 +114,7 @@ impl Plan for GenImmix { #[allow(clippy::if_same_then_else)] #[allow(clippy::branches_sharing_code)] fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { - let is_full_heap = self.request_full_heap_collection(); + let is_full_heap = self.requires_full_heap_collection(); self.base().set_collection_kind::(self); self.base().set_gc_status(GcStatus::GcPrepare); @@ -167,6 +167,14 @@ impl Plan for GenImmix { } self.last_gc_was_full_heap .store(full_heap, Ordering::Relaxed); + + // TODO: Refactor so that we set the next_gc_full_heap in gen.release(). Currently have to fight with Rust borrow checker + // NOTE: We have to take care that the `Gen::should_next_gc_be_full_heap()` function is + // called _after_ all spaces have been released (including ones in `gen`) as otherwise we + // may get incorrect results since the function uses values such as available pages that + // will change dependant on which spaces have been released + self.gen + .set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self)); } fn get_collection_reserved_pages(&self) -> usize { @@ -186,6 +194,10 @@ impl Plan for GenImmix { >> 1 } + fn get_mature_physical_pages_available(&self) -> usize { + self.immix.available_physical_pages() + } + fn base(&self) -> &BasePlan { &self.gen.common.base } @@ -253,8 +265,7 @@ impl GenImmix { genimmix } - fn request_full_heap_collection(&self) -> bool { - self.gen - .request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages()) + fn requires_full_heap_collection(&self) -> bool { + self.gen.requires_full_heap_collection(self) } } diff --git a/src/plan/global.rs b/src/plan/global.rs index 86621cf8ba..f4fab74196 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -287,7 +287,7 @@ pub trait Plan: 'static + Sync + Downcast { /// should always be positive or 0. fn get_available_pages(&self) -> usize { // It is possible that the reserved pages is larger than the total pages so we are doing - // a saturating substraction to make sure we return a non-negative number. + // a saturating subtraction to make sure we return a non-negative number. // For example, // 1. our GC trigger checks if reserved pages is more than total pages. // 2. when the heap is almost full of live objects (such as in the case of an OOM) and we are doing a copying GC, it is possible @@ -298,6 +298,12 @@ pub trait Plan: 'static + Sync + Downcast { .saturating_sub(self.get_reserved_pages()) } + /// Return the number of pages available for allocation into the mature space. Only + /// generational plans have to implement this function. + fn get_mature_physical_pages_available(&self) -> usize { + panic!("This is not a generational plan.") + } + /// Get the number of pages that are reserved for collection. By default, we return 0. /// For copying plans, they need to override this and calculate required pages to complete /// a copying GC. @@ -566,7 +572,7 @@ impl BasePlan { /// The application code has requested a collection. pub fn handle_user_collection_request(&self, tls: VMMutatorThread, force: bool) { - if force || !*self.options.ignore_system_g_c { + if force || !*self.options.ignore_system_gc { info!("User triggering collection"); self.user_triggered_collection .store(true, Ordering::Relaxed); diff --git a/src/policy/space.rs b/src/policy/space.rs index eb176ca666..f66bc6ebea 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -539,8 +539,24 @@ pub trait Space: 'static + SFT + Sync + Downcast { // If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized. #[cfg(debug_assertions)] if !new_chunk { - debug_assert!(SFT_MAP.get(start).name() != EMPTY_SFT_NAME, "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})", start, bytes, new_chunk, start, SFT_MAP.get(start).name()); - debug_assert!(SFT_MAP.get(start + bytes - 1).name() != EMPTY_SFT_NAME, "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {}", start, bytes, new_chunk, start + bytes - 1, SFT_MAP.get(start + bytes - 1).name()); + debug_assert!( + SFT_MAP.get(start).name() != EMPTY_SFT_NAME, + "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})", + start, + bytes, + new_chunk, + start, + SFT_MAP.get(start).name() + ); + debug_assert!( + SFT_MAP.get(start + bytes - 1).name() != EMPTY_SFT_NAME, + "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})", + start, + bytes, + new_chunk, + start + bytes - 1, + SFT_MAP.get(start + bytes - 1).name() + ); } if new_chunk { @@ -573,6 +589,11 @@ pub trait Space: 'static + SFT + Sync + Downcast { data_pages + meta_pages } + /// Return the number of physical pages available. + fn available_physical_pages(&self) -> usize { + self.get_page_resource().get_available_physical_pages() + } + fn get_name(&self) -> &'static str { self.common().name } diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 334ce646ef..a1d9bc1c9f 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -2,6 +2,7 @@ use std::ops::{Deref, DerefMut}; use std::sync::{Mutex, MutexGuard}; use super::layout::map::Map; +use super::layout::vm_layout_constants::{PAGES_IN_CHUNK, PAGES_IN_SPACE64}; use super::pageresource::{PRAllocFail, PRAllocResult}; use super::PageResource; use crate::util::address::Address; @@ -75,6 +76,22 @@ impl PageResource for FreeListPageResource { &mut self.common } + fn get_available_physical_pages(&self) -> usize { + let mut rtn = self.sync.lock().unwrap().pages_currently_on_freelist; + if !self.common.contiguous { + let chunks: usize = self + .common + .vm_map + .get_available_discontiguous_chunks() + .saturating_sub(self.common.vm_map.get_chunk_consumer_count()); + rtn += chunks * (PAGES_IN_CHUNK - self.meta_data_pages_per_region); + } else if self.common.growable && cfg!(target_pointer_width = "64") { + rtn = PAGES_IN_SPACE64 - self.reserved_pages(); + } + + rtn + } + fn alloc_pages( &self, space_descriptor: SpaceDescriptor, diff --git a/src/util/heap/layout/map.rs b/src/util/heap/layout/map.rs index 2df96c30b1..459bb629d8 100644 --- a/src/util/heap/layout/map.rs +++ b/src/util/heap/layout/map.rs @@ -32,6 +32,14 @@ pub trait Map: Sized { fn get_contiguous_region_size(&self, start: Address) -> usize; + /// Return the total number of chunks available (unassigned) within the range of virtual memory + /// apportioned to discontiguous spaces. + fn get_available_discontiguous_chunks(&self) -> usize; + + /// Return the total number of clients contending for chunks. This is useful when establishing + /// conservative bounds on the number of remaining chunks. + fn get_chunk_consumer_count(&self) -> usize; + fn free_all_chunks(&self, any_chunk: Address); fn free_contiguous_chunks(&self, start: Address) -> usize; diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index 7916a84012..259ec1ac0b 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -133,6 +133,14 @@ impl Map for Map32 { self.get_contiguous_region_chunks(start) << LOG_BYTES_IN_CHUNK } + fn get_available_discontiguous_chunks(&self) -> usize { + self.total_available_discontiguous_chunks + } + + fn get_chunk_consumer_count(&self) -> usize { + self.shared_discontig_fl_count + } + fn free_all_chunks(&self, any_chunk: Address) { debug!("free_all_chunks: {}", any_chunk); let (_sync, self_mut) = self.mut_self_with_sync(); diff --git a/src/util/heap/layout/map64.rs b/src/util/heap/layout/map64.rs index b2ba3d7299..43838f7648 100644 --- a/src/util/heap/layout/map64.rs +++ b/src/util/heap/layout/map64.rs @@ -152,6 +152,14 @@ impl Map for Map64 { unreachable!() } + fn get_available_discontiguous_chunks(&self) -> usize { + panic!("We don't use discontiguous chunks for 64-bit!"); + } + + fn get_chunk_consumer_count(&self) -> usize { + panic!("We don't use discontiguous chunks for 64-bit!"); + } + fn free_all_chunks(&self, _any_chunk: Address) { unreachable!() } diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index 13fef33d7a..70e504ba0d 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -1,4 +1,4 @@ -use super::layout::vm_layout_constants::BYTES_IN_CHUNK; +use super::layout::vm_layout_constants::{BYTES_IN_CHUNK, PAGES_IN_CHUNK}; use crate::policy::space::required_chunks; use crate::util::address::Address; use crate::util::conversions::*; @@ -59,6 +59,15 @@ impl PageResource for MonotonePageResource { pages } + fn get_available_physical_pages(&self) -> usize { + let sync = self.sync.lock().unwrap(); + let mut rtn = bytes_to_pages(sync.sentinel - sync.cursor); + if !self.common.contiguous { + rtn += self.common.vm_map.get_available_discontiguous_chunks() * PAGES_IN_CHUNK; + } + rtn + } + fn alloc_pages( &self, space_descriptor: SpaceDescriptor, diff --git a/src/util/heap/pageresource.rs b/src/util/heap/pageresource.rs index ba4a8b11c3..e475280dd7 100644 --- a/src/util/heap/pageresource.rs +++ b/src/util/heap/pageresource.rs @@ -91,6 +91,20 @@ pub trait PageResource: 'static { self.common().accounting.get_committed_pages() } + /// Return the number of available physical pages by this resource. This includes all pages + /// currently unused by this resource. If the resource is using a discontiguous space, it also + /// includes the currently unassigned discontiguous space. + /// + /// Note: This just considers physical pages (i.e. virtual memory pages allocated for use by + /// this resource). This calculation is orthogonal to and does not consider any restrictions on + /// the number of pages this resource may actually use at any time (i.e. the number of + /// committed and reserved pages). + /// + /// Note: The calculation is made on the assumption that all space that could be assigned to + /// this resource would be assigned to this resource (i.e. the unused discontiguous space could + /// just as likely be assigned to another competing resource). + fn get_available_physical_pages(&self) -> usize; + fn common(&self) -> &CommonPageResource; fn common_mut(&mut self) -> &mut CommonPageResource; fn vm_map(&self) -> &'static VMMap { @@ -111,7 +125,7 @@ pub struct CommonPageResource { pub contiguous: bool, pub growable: bool, - vm_map: &'static VMMap, + pub vm_map: &'static VMMap, head_discontiguous_region: Mutex
, } diff --git a/src/util/options.rs b/src/util/options.rs index 5d0cf1349b..212844e639 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -70,12 +70,27 @@ impl FromStr for PerfEventOptions { } /// The default nursery space size. +#[cfg(target_pointer_width = "64")] +pub const NURSERY_SIZE: usize = (1 << 20) << LOG_BYTES_IN_MBYTE; +/// The default min nursery size. This does not affect the actual space we create as nursery. It is +/// only used in the GC trigger check. +#[cfg(target_pointer_width = "64")] +pub const DEFAULT_MIN_NURSERY: usize = 2 << LOG_BYTES_IN_MBYTE; +/// The default max nursery size. This does not affect the actual space we create as nursery. It is +/// only used in the GC trigger check. +#[cfg(target_pointer_width = "64")] +pub const DEFAULT_MAX_NURSERY: usize = (1 << 20) << LOG_BYTES_IN_MBYTE; + +/// The default nursery space size. +#[cfg(target_pointer_width = "32")] pub const NURSERY_SIZE: usize = 32 << LOG_BYTES_IN_MBYTE; -/// The default min nursery size. This can be set through command line options. -/// This does not affect the actual space we create as nursery. It is only used in GC trigger check. -pub const DEFAULT_MIN_NURSERY: usize = 32 << LOG_BYTES_IN_MBYTE; -/// The default max nursery size. This can be set through command line options. -/// This does not affect the actual space we create as nursery. It is only used in GC trigger check. +/// The default min nursery size. This does not affect the actual space we create as nursery. It is +/// only used in the GC trigger check. +#[cfg(target_pointer_width = "32")] +pub const DEFAULT_MIN_NURSERY: usize = 2 << LOG_BYTES_IN_MBYTE; +/// The default max nursery size. This does not affect the actual space we create as nursery. It is +/// only used in the GC trigger check. +#[cfg(target_pointer_width = "32")] pub const DEFAULT_MAX_NURSERY: usize = 32 << LOG_BYTES_IN_MBYTE; fn always_valid(_: &T) -> bool { @@ -109,7 +124,7 @@ impl MMTKOption { // However, we cannot enable it now. For options like perf events, the validator checks // if the perf event feature is enabled. So when the perf event features are not enabled, // the validator will fail whatever value we try to set (including the initial value). - // Ideally, we conditionally compile options based on the feature. But options! marcro + // Ideally, we conditionally compile options based on the feature. But options! macro // does not allow attributes in it, so we cannot conditionally compile options. // let is_valid = validator(&value); // assert!( @@ -197,7 +212,7 @@ macro_rules! options { } /// Set an option and run its validator for its value. - fn set_inner(&mut self, s: &str, val: &str)->bool { + fn set_inner(&mut self, s: &str, val: &str) -> bool { match s { // Parse the given value from str (by env vars or by calling process()) to the right type $(stringify!($name) => if let Ok(typed_val) = val.parse::<$type>() { @@ -207,7 +222,7 @@ macro_rules! options { } is_set } else { - eprintln!("Warn: unable to set {}={:?}. Cant parse value. Default value will be used.", s, val); + eprintln!("Warn: unable to set {}={:?}. Can't parse value. Default value will be used.", s, val); false })* _ => panic!("Invalid Options key: {}", s) @@ -239,6 +254,83 @@ macro_rules! options { ] } +#[derive(Copy, Clone, EnumString, Debug)] +/// Different nursery types. +pub enum NurseryKind { + /// A Bounded nursery has different upper and lower bounds. The size only controls the upper + /// bound. Hence, it is considered to be a "variable size" nursery. By default, a Bounded + /// nursery has a lower bound of 2 MB and an upper bound of 32 MB for 32-bit systems and 1 TB + /// for 64-bit systems. + Bounded, + /// A Fixed nursery has the same upper and lower bounds. The size controls both the upper and + /// lower bounds. Note that this is considered less performant than a Bounded nursery since a + /// Fixed nursery size can be too restrictive and cause more GCs. + Fixed, +} + +#[derive(Copy, Clone, Debug)] +/// An option that provides a min/max interface to MMTk and a Bounded/Fixed interface to the +/// user/VM. +pub struct NurserySize { + /// The nursery type + pub kind: NurseryKind, + /// Minimum nursery size (in bytes) + pub min: usize, + /// Maximum nursery size (in bytes) + pub max: usize, +} + +impl NurserySize { + pub fn new(kind: NurseryKind, value: usize) -> Self { + match kind { + NurseryKind::Bounded => NurserySize { + kind, + min: DEFAULT_MIN_NURSERY, + max: value, + }, + NurseryKind::Fixed => NurserySize { + kind, + min: value, + max: value, + }, + } + } + + /// Returns a NurserySize or String containing error. Expects nursery size to be formatted as + /// ":". For example, "Fixed:8192" creates a Fixed nursery of size + /// 8192 bytes. + pub fn parse(s: &str) -> Result { + let ns: Vec<&str> = s.split(':').into_iter().collect(); + let kind = ns[0].parse::().map_err(|_| { + String::from("Please specify one of \"Bounded\" or \"Fixed\" nursery type") + })?; + let value = ns[1] + .parse() + .map_err(|_| String::from("Failed to parse size"))?; + Ok(NurserySize::new(kind, value)) + } +} + +impl FromStr for NurserySize { + type Err = String; + + fn from_str(s: &str) -> Result { + NurserySize::parse(s) + } +} + +impl Options { + /// Return upper bound of the nursery size (in number of bytes) + pub fn get_max_nursery(&self) -> usize { + self.nursery.max + } + + /// Return lower bound of the nursery size (in number of bytes) + pub fn get_min_nursery(&self) -> usize { + self.nursery.min + } +} + // Currently we allow all the options to be set by env var for the sake of convenience. // At some point, we may disallow this and all the options can only be set by command line. options! { @@ -259,11 +351,14 @@ options! { // Should we eagerly finish sweeping at the start of a collection? (not supported) eager_complete_sweep: bool [env_var: true, command_line: true] [always_valid] = false, // Should we ignore GCs requested by the user (e.g. java.lang.System.gc)? - ignore_system_g_c: bool [env_var: true, command_line: true] [always_valid] = false, - // The upper bound of nursery size. - max_nursery: usize [env_var: true, command_line: true] [|v: &usize| *v > 0 ] = DEFAULT_MAX_NURSERY, - // The lower bound of nusery size. - min_nursery: usize [env_var: true, command_line: true] [|v: &usize| *v > 0 ] = DEFAULT_MIN_NURSERY, + ignore_system_gc: bool [env_var: true, command_line: true] [always_valid] = false, + // FIXME: This is not a good way to have conflicting options -- we should refactor this + // The nursery size for generational plans. It can be one of Bounded or Fixed. The size for a + // Bounded nursery only controls the upper bound, whereas the size for a Fixed nursery controls + // both the upper and lower bounds. The nursery size can be set like "Fixed:8192", for example, + // to have a Fixed nursery size of 8192 bytes + nursery: NurserySize [env_var: true, command_line: true] [|v: &NurserySize| v.min > 0 && v.max > 0 && v.max >= v.min] + = NurserySize { kind: NurseryKind::Bounded, min: DEFAULT_MIN_NURSERY, max: DEFAULT_MAX_NURSERY }, // Should a major GC be performed when a system GC is required? full_heap_system_gc: bool [env_var: true, command_line: true] [always_valid] = false, // Should we shrink/grow the heap to adjust to application working set? (not supported) diff --git a/src/util/statistics/stats.rs b/src/util/statistics/stats.rs index f89ec538e4..6cd477c005 100644 --- a/src/util/statistics/stats.rs +++ b/src/util/statistics/stats.rs @@ -239,7 +239,10 @@ impl Stats { self.shared.set_gathering_stats(true); for c in &(*counters) { - c.lock().unwrap().start(); + let mut ctr = c.lock().unwrap(); + if ctr.implicitly_start() { + ctr.start(); + } } } diff --git a/src/vm/object_model.rs b/src/vm/object_model.rs index efab7a9581..b20aa958ba 100644 --- a/src/vm/object_model.rs +++ b/src/vm/object_model.rs @@ -248,6 +248,11 @@ pub trait ObjectModel { /// * `reference`: The object to be queried. fn get_type_descriptor(reference: ObjectReference) -> &'static [i8]; + /// This is the worst case expansion that can occur due to object size increasing while + /// copying. This constant is used to calculate whether a nursery has grown larger than the + /// mature space for generational plans. + const VM_WORST_CASE_COPY_EXPANSION: f64 = 1.5; + /// For our allocation result `[cell, cell + bytes)`, if a binding's /// definition of `ObjectReference` may point outside the cell (i.e. `object_ref >= cell + bytes`), /// the binding needs to provide a `Some` value for this constant and