Skip to content

Commit

Permalink
Introduce Bounded and Fixed nurseries
Browse files Browse the repository at this point in the history
This commit adds Bounded and Fixed nursery types and changes how the
nursery size is set. A Bounded nursery has a lower bound of 2 MB but a
variable upper bound (set to be 1 TB on 64-bit by default), whereas a
Fixed nursery controls both the upper and lower bound of the nursery and
sets them to be the same value. By default, MMTk uses a Bounded nursery.
The nursery size and type can be set via command line arguments or
environment variables, for example, setting MMTK_NURSERY="Fixed:8192"
will create a Fixed nursery of size 8192 bytes.

This commit also changes how minor and major GCs are triggered to be
more in line with the Java MMTk.

**Note**: VM bindings may want to change the
`ObjectModel::VM_WORST_CASE_COPY_EXPANSION` constant dependant on the
worst case expansion that can occur due to object sizes changing when
copied.
  • Loading branch information
k-sareen committed Jul 27, 2022
1 parent 9a3ebff commit 89078b9
Show file tree
Hide file tree
Showing 14 changed files with 301 additions and 54 deletions.
18 changes: 13 additions & 5 deletions src/plan/generational/copying/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
}

fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
let is_full_heap = self.request_full_heap_collection();
let is_full_heap = self.requires_full_heap_collection();
self.base().set_collection_kind::<Self>(self);
self.base().set_gc_status(GcStatus::GcPrepare);
if is_full_heap {
Expand Down Expand Up @@ -127,6 +127,11 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
self.fromspace().release();
}

// TODO: Refactor so that we set the next_gc_full_heap in gen.release(). Currently have to fight with Rust borrow checker
// NOTE: We have to take care that the `Gen::should_next_gc_be_full_heap()` function is
// called _after_ all spaces have been released (including ones in `gen`) as otherwise we
// may get incorrect results since the function uses values such as available pages that
// will change dependant on which spaces have been released
self.gen
.set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self));
}
Expand All @@ -139,7 +144,7 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
self.gen.get_used_pages() + self.tospace().reserved_pages()
}

/// Return the number of pages avilable for allocation. Assuming all future allocations goes to nursery.
/// Return the number of pages available for allocation. Assuming all future allocations goes to nursery.
fn get_available_pages(&self) -> usize {
// super.get_pages_avail() / 2 to reserve pages for copying
(self
Expand All @@ -148,6 +153,10 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
>> 1
}

fn get_mature_physical_pages_available(&self) -> usize {
self.tospace().available_physical_pages()
}

fn base(&self) -> &BasePlan<VM> {
&self.gen.common.base
}
Expand Down Expand Up @@ -222,9 +231,8 @@ impl<VM: VMBinding> GenCopy<VM> {
res
}

fn request_full_heap_collection(&self) -> bool {
self.gen
.request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages())
fn requires_full_heap_collection(&self) -> bool {
self.gen.requires_full_heap_collection(self)
}

pub fn tospace(&self) -> &CopySpace<VM> {
Expand Down
84 changes: 59 additions & 25 deletions src/plan/generational/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@ use crate::util::heap::VMRequest;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::options::Options;
use crate::util::statistics::counter::EventCounter;
use crate::util::ObjectReference;
use crate::util::VMWorkerThread;
use crate::vm::VMBinding;
use crate::vm::{ObjectModel, VMBinding};
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::{Arc, Mutex};

use mmtk_macros::PlanTraceObject;

Expand All @@ -37,6 +38,7 @@ pub struct Gen<VM: VMBinding> {
pub gc_full_heap: AtomicBool,
/// Is next GC full heap?
pub next_gc_full_heap: AtomicBool,
pub full_heap_gc_count: Arc<Mutex<EventCounter>>,
}

impl<VM: VMBinding> Gen<VM> {
Expand All @@ -48,27 +50,33 @@ impl<VM: VMBinding> Gen<VM> {
mmapper: &'static Mmapper,
options: Arc<Options>,
) -> Self {
let nursery = CopySpace::new(
"nursery",
false,
true,
VMRequest::fixed_extent(options.get_max_nursery(), false),
global_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
);
let common = CommonPlan::new(
vm_map,
mmapper,
options,
heap,
constraints,
global_metadata_specs,
);

let full_heap_gc_count = common.base.stats.new_event_counter("majorGC", true, true);

Gen {
nursery: CopySpace::new(
"nursery",
false,
true,
VMRequest::fixed_extent(crate::util::options::NURSERY_SIZE, false),
global_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
),
common: CommonPlan::new(
vm_map,
mmapper,
options,
heap,
constraints,
global_metadata_specs,
),
nursery,
common,
gc_full_heap: AtomicBool::default(),
next_gc_full_heap: AtomicBool::new(false),
full_heap_gc_count,
}
}

Expand All @@ -87,6 +95,9 @@ impl<VM: VMBinding> Gen<VM> {
/// Prepare Gen. This should be called by a single thread in GC prepare work.
pub fn prepare(&mut self, tls: VMWorkerThread) {
let full_heap = !self.is_current_gc_nursery();
if full_heap {
self.full_heap_gc_count.lock().unwrap().inc();
}
self.common.prepare(tls, full_heap);
self.nursery.prepare(true);
self.nursery
Expand All @@ -100,6 +111,18 @@ impl<VM: VMBinding> Gen<VM> {
self.nursery.release();
}

/// Independent of how many pages remain in the page budget (a function of heap size), we must
/// ensure we never exhaust virtual memory. Therefore we must never let the nursery grow to the
/// extent that it can't be copied into the mature space.
///
/// Returns `true` if the nursery has grown to the extent that it may not be able to be copied
/// into the mature space.
fn virtual_memory_exhausted<P: Plan>(&self, plan: &P) -> bool {
((plan.get_collection_reserved_pages() as f64
* VM::VMObjectModel::VM_WORST_CASE_COPY_EXPANSION) as usize)
> plan.get_mature_physical_pages_available()
}

/// Check if we need a GC based on the nursery space usage. This method may mark
/// the following GC as a full heap GC.
pub fn collection_required<P: Plan>(
Expand All @@ -109,11 +132,16 @@ impl<VM: VMBinding> Gen<VM> {
space: Option<&dyn Space<VM>>,
) -> bool {
let nursery_full = self.nursery.reserved_pages()
>= (conversions::bytes_to_pages_up(*self.common.base.options.max_nursery));
>= (conversions::bytes_to_pages_up(self.common.base.options.get_max_nursery()));

if nursery_full {
return true;
}

if self.virtual_memory_exhausted(plan) {
return true;
}

// Is the GC triggered by nursery?
// - if space is none, it is not. Return false immediately.
// - if space is some, we further check its descriptor.
Expand All @@ -138,7 +166,7 @@ impl<VM: VMBinding> Gen<VM> {

/// Check if we should do a full heap GC. It returns true if we should have a full heap GC.
/// It also sets gc_full_heap based on the result.
pub fn request_full_heap_collection(&self, total_pages: usize, reserved_pages: usize) -> bool {
pub fn requires_full_heap_collection<P: Plan>(&self, plan: &P) -> bool {
// Allow the same 'true' block for if-else.
// The conditions are complex, and it is easier to read if we put them to separate if blocks.
#[allow(clippy::if_same_then_else)]
Expand All @@ -164,8 +192,10 @@ impl<VM: VMBinding> Gen<VM> {
{
// Forces full heap collection
true
} else if self.virtual_memory_exhausted(plan) {
true
} else {
total_pages <= reserved_pages
plan.get_total_pages() <= plan.get_reserved_pages()
};

self.gc_full_heap.store(is_full_heap, Ordering::SeqCst);
Expand All @@ -175,7 +205,7 @@ impl<VM: VMBinding> Gen<VM> {
if is_full_heap {
"Full heap GC"
} else {
"nursery GC"
"Nursery GC"
}
);

Expand Down Expand Up @@ -229,9 +259,13 @@ impl<VM: VMBinding> Gen<VM> {
}

/// Check a plan to see if the next GC should be a full heap GC.
///
/// Note that this function should be called after all spaces have been released. This is
/// required as we may get incorrect values since this function uses [`get_available_pages`]
/// whose value depends on which spaces have been released.
pub fn should_next_gc_be_full_heap(plan: &dyn Plan<VM = VM>) -> bool {
plan.get_available_pages()
< conversions::bytes_to_pages_up(*plan.base().options.min_nursery)
< conversions::bytes_to_pages_up(plan.base().options.get_min_nursery())
}

/// Set next_gc_full_heap to the given value.
Expand Down
19 changes: 15 additions & 4 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
#[allow(clippy::if_same_then_else)]
#[allow(clippy::branches_sharing_code)]
fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<Self::VM>) {
let is_full_heap = self.request_full_heap_collection();
let is_full_heap = self.requires_full_heap_collection();

self.base().set_collection_kind::<Self>(self);
self.base().set_gc_status(GcStatus::GcPrepare);
Expand Down Expand Up @@ -167,6 +167,14 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
}
self.last_gc_was_full_heap
.store(full_heap, Ordering::Relaxed);

// TODO: Refactor so that we set the next_gc_full_heap in gen.release(). Currently have to fight with Rust borrow checker
// NOTE: We have to take care that the `Gen::should_next_gc_be_full_heap()` function is
// called _after_ all spaces have been released (including ones in `gen`) as otherwise we
// may get incorrect results since the function uses values such as available pages that
// will change dependant on which spaces have been released
self.gen
.set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self));
}

fn get_collection_reserved_pages(&self) -> usize {
Expand All @@ -186,6 +194,10 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
>> 1
}

fn get_mature_physical_pages_available(&self) -> usize {
self.immix.available_physical_pages()
}

fn base(&self) -> &BasePlan<VM> {
&self.gen.common.base
}
Expand Down Expand Up @@ -253,8 +265,7 @@ impl<VM: VMBinding> GenImmix<VM> {
genimmix
}

fn request_full_heap_collection(&self) -> bool {
self.gen
.request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages())
fn requires_full_heap_collection(&self) -> bool {
self.gen.requires_full_heap_collection(self)
}
}
10 changes: 8 additions & 2 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ pub trait Plan: 'static + Sync + Downcast {
/// should always be positive or 0.
fn get_available_pages(&self) -> usize {
// It is possible that the reserved pages is larger than the total pages so we are doing
// a saturating substraction to make sure we return a non-negative number.
// a saturating subtraction to make sure we return a non-negative number.
// For example,
// 1. our GC trigger checks if reserved pages is more than total pages.
// 2. when the heap is almost full of live objects (such as in the case of an OOM) and we are doing a copying GC, it is possible
Expand All @@ -291,6 +291,12 @@ pub trait Plan: 'static + Sync + Downcast {
.saturating_sub(self.get_reserved_pages())
}

/// Return the number of pages available for allocation into the mature space. Only
/// generational plans have to implement this function.
fn get_mature_physical_pages_available(&self) -> usize {
panic!("This is not a generational plan.")
}

/// Get the number of pages that are reserved for collection. By default, we return 0.
/// For copying plans, they need to override this and calculate required pages to complete
/// a copying GC.
Expand Down Expand Up @@ -564,7 +570,7 @@ impl<VM: VMBinding> BasePlan<VM> {

/// The application code has requested a collection.
pub fn handle_user_collection_request(&self, tls: VMMutatorThread, force: bool) {
if force || !*self.options.ignore_system_g_c {
if force || !*self.options.ignore_system_gc {
info!("User triggering collection");
self.user_triggered_collection
.store(true, Ordering::Relaxed);
Expand Down
25 changes: 23 additions & 2 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -535,8 +535,24 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
// If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized.
#[cfg(debug_assertions)]
if !new_chunk {
debug_assert!(SFT_MAP.get(start).name() != EMPTY_SFT_NAME, "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})", start, bytes, new_chunk, start, SFT_MAP.get(start).name());
debug_assert!(SFT_MAP.get(start + bytes - 1).name() != EMPTY_SFT_NAME, "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {}", start, bytes, new_chunk, start + bytes - 1, SFT_MAP.get(start + bytes - 1).name());
debug_assert!(
SFT_MAP.get(start).name() != EMPTY_SFT_NAME,
"In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
start,
bytes,
new_chunk,
start,
SFT_MAP.get(start).name()
);
debug_assert!(
SFT_MAP.get(start + bytes - 1).name() != EMPTY_SFT_NAME,
"In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
start,
bytes,
new_chunk,
start + bytes - 1,
SFT_MAP.get(start + bytes - 1).name()
);
}

if new_chunk {
Expand Down Expand Up @@ -571,6 +587,11 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
data_pages + meta_pages
}

/// Return the number of physical pages available.
fn available_physical_pages(&self) -> usize {
self.get_page_resource().get_available_physical_pages()
}

fn get_name(&self) -> &'static str {
self.common().name
}
Expand Down
17 changes: 17 additions & 0 deletions src/util/heap/freelistpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use std::ops::{Deref, DerefMut};
use std::sync::{Mutex, MutexGuard};

use super::layout::map::Map;
use super::layout::vm_layout_constants::{PAGES_IN_CHUNK, PAGES_IN_SPACE64};
use super::pageresource::{PRAllocFail, PRAllocResult};
use super::PageResource;
use crate::util::address::Address;
Expand Down Expand Up @@ -75,6 +76,22 @@ impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
&mut self.common
}

fn get_available_physical_pages(&self) -> usize {
let mut rtn = self.sync.lock().unwrap().pages_currently_on_freelist;
if !self.common.contiguous {
let chunks: usize = self
.common
.vm_map
.get_available_discontiguous_chunks()
.saturating_sub(self.common.vm_map.get_chunk_consumer_count());
rtn += chunks * (PAGES_IN_CHUNK - self.meta_data_pages_per_region);
} else if self.common.growable && cfg!(target_pointer_width = "64") {
rtn = PAGES_IN_SPACE64 - self.reserved_pages();
}

rtn
}

fn alloc_pages(
&self,
space_descriptor: SpaceDescriptor,
Expand Down
8 changes: 8 additions & 0 deletions src/util/heap/layout/map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ pub trait Map: Sized {

fn get_contiguous_region_size(&self, start: Address) -> usize;

/// Return the total number of chunks available (unassigned) within the range of virtual memory
/// apportioned to discontiguous spaces.
fn get_available_discontiguous_chunks(&self) -> usize;

/// Return the total number of clients contending for chunks. This is useful when establishing
/// conservative bounds on the number of remaining chunks.
fn get_chunk_consumer_count(&self) -> usize;

fn free_all_chunks(&self, any_chunk: Address);

fn free_contiguous_chunks(&self, start: Address) -> usize;
Expand Down
8 changes: 8 additions & 0 deletions src/util/heap/layout/map32.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,14 @@ impl Map for Map32 {
self.get_contiguous_region_chunks(start) << LOG_BYTES_IN_CHUNK
}

fn get_available_discontiguous_chunks(&self) -> usize {
self.total_available_discontiguous_chunks
}

fn get_chunk_consumer_count(&self) -> usize {
self.shared_discontig_fl_count
}

fn free_all_chunks(&self, any_chunk: Address) {
debug!("free_all_chunks: {}", any_chunk);
let (_sync, self_mut) = self.mut_self_with_sync();
Expand Down
Loading

0 comments on commit 89078b9

Please sign in to comment.