Skip to content

Commit

Permalink
Introduce Bounded and Fixed nurseries (#630)
Browse files Browse the repository at this point in the history
This commit adds Bounded and Fixed nursery types and changes how the
nursery size is set. A Bounded nursery has a lower bound of 2 MB but a
variable upper bound (set to be 1 TB on 64-bit by default), whereas a
Fixed nursery controls both the upper and lower bound of the nursery and
sets them to be the same value. By default, MMTk uses a Bounded nursery.
The nursery size and type can be set via command line arguments or
environment variables, for example, setting MMTK_NURSERY="Fixed:8192"
will create a Fixed nursery of size 8192 bytes.

This commit also changes how minor and major GCs are triggered to be
more in line with the Java MMTk.

**Note**: VM bindings may want to change the
`ObjectModel::VM_WORST_CASE_COPY_EXPANSION` constant depending on the
worst case expansion that can occur due to object sizes changing when
copied.

This commit updates ci-perf-kit to 0.6.8 (for ignore_system_gc option name change).

Co-authored-by: Yi Lin <qinsoon@gmail.com>
  • Loading branch information
k-sareen and qinsoon authored Aug 17, 2022
1 parent 4904004 commit 8afe96d
Show file tree
Hide file tree
Showing 19 changed files with 309 additions and 62 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/micro-bm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
with:
repository: mmtk/ci-perf-kit
token: ${{ secrets.GITHUB_TOKEN }}
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
submodules: true
# Use rust-toolchain in the trunk (it doesnt matter much - if the toolchains defined in the trunk and the branch are different, we cant run anyway)
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/perf-compare-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ jobs:
with:
repository: mmtk/ci-perf-kit
token: ${{ secrets.CI_ACCESS_TOKEN }}
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
submodules: true
# setup
Expand Down Expand Up @@ -203,7 +203,7 @@ jobs:
with:
repository: mmtk/ci-perf-kit
token: ${{ secrets.CI_ACCESS_TOKEN }}
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
submodules: true
# setup
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/perf-jikesrvm-baseline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
with:
token: ${{ secrets.CI_ACCESS_TOKEN }}
repository: mmtk/ci-perf-kit
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
submodules: true
# setup
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/perf-openjdk-baseline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
with:
token: ${{ secrets.CI_ACCESS_TOKEN }}
repository: mmtk/ci-perf-kit
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
submodules: true
# setup
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/perf-regression-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
uses: actions/checkout@v2
with:
repository: mmtk/ci-perf-kit
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
token: ${{ secrets.CI_ACCESS_TOKEN }}
submodules: true
Expand Down Expand Up @@ -101,7 +101,7 @@ jobs:
uses: actions/checkout@v2
with:
repository: mmtk/ci-perf-kit
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
token: ${{ secrets.CI_ACCESS_TOKEN }}
submodules: true
Expand Down Expand Up @@ -165,7 +165,7 @@ jobs:
uses: actions/checkout@v2
with:
repository: mmtk/ci-perf-kit
ref: "0.6.6"
ref: "0.6.8"
path: ci-perf-kit
token: ${{ secrets.CI_ACCESS_TOKEN }}
submodules: true
Expand Down
18 changes: 13 additions & 5 deletions src/plan/generational/copying/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
}

fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
let is_full_heap = self.request_full_heap_collection();
let is_full_heap = self.requires_full_heap_collection();
self.base().set_collection_kind::<Self>(self);
self.base().set_gc_status(GcStatus::GcPrepare);
if is_full_heap {
Expand Down Expand Up @@ -127,6 +127,11 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
self.fromspace().release();
}

// TODO: Refactor so that we set the next_gc_full_heap in gen.release(). Currently have to fight with Rust borrow checker
// NOTE: We have to take care that the `Gen::should_next_gc_be_full_heap()` function is
// called _after_ all spaces have been released (including ones in `gen`) as otherwise we
// may get incorrect results since the function uses values such as available pages that
// will change dependant on which spaces have been released
self.gen
.set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self));
}
Expand All @@ -139,7 +144,7 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
self.gen.get_used_pages() + self.tospace().reserved_pages()
}

/// Return the number of pages avilable for allocation. Assuming all future allocations goes to nursery.
/// Return the number of pages available for allocation. Assuming all future allocations goes to nursery.
fn get_available_pages(&self) -> usize {
// super.get_pages_avail() / 2 to reserve pages for copying
(self
Expand All @@ -148,6 +153,10 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
>> 1
}

fn get_mature_physical_pages_available(&self) -> usize {
self.tospace().available_physical_pages()
}

fn base(&self) -> &BasePlan<VM> {
&self.gen.common.base
}
Expand Down Expand Up @@ -222,9 +231,8 @@ impl<VM: VMBinding> GenCopy<VM> {
res
}

fn request_full_heap_collection(&self) -> bool {
self.gen
.request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages())
fn requires_full_heap_collection(&self) -> bool {
self.gen.requires_full_heap_collection(self)
}

pub fn tospace(&self) -> &CopySpace<VM> {
Expand Down
84 changes: 59 additions & 25 deletions src/plan/generational/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@ use crate::util::heap::VMRequest;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::util::options::Options;
use crate::util::statistics::counter::EventCounter;
use crate::util::ObjectReference;
use crate::util::VMWorkerThread;
use crate::vm::VMBinding;
use crate::vm::{ObjectModel, VMBinding};
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::{Arc, Mutex};

use mmtk_macros::PlanTraceObject;

Expand All @@ -37,6 +38,7 @@ pub struct Gen<VM: VMBinding> {
pub gc_full_heap: AtomicBool,
/// Is next GC full heap?
pub next_gc_full_heap: AtomicBool,
pub full_heap_gc_count: Arc<Mutex<EventCounter>>,
}

impl<VM: VMBinding> Gen<VM> {
Expand All @@ -48,27 +50,33 @@ impl<VM: VMBinding> Gen<VM> {
mmapper: &'static Mmapper,
options: Arc<Options>,
) -> Self {
let nursery = CopySpace::new(
"nursery",
false,
true,
VMRequest::fixed_extent(options.get_max_nursery(), false),
global_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
);
let common = CommonPlan::new(
vm_map,
mmapper,
options,
heap,
constraints,
global_metadata_specs,
);

let full_heap_gc_count = common.base.stats.new_event_counter("majorGC", true, true);

Gen {
nursery: CopySpace::new(
"nursery",
false,
true,
VMRequest::fixed_extent(crate::util::options::NURSERY_SIZE, false),
global_metadata_specs.clone(),
vm_map,
mmapper,
&mut heap,
),
common: CommonPlan::new(
vm_map,
mmapper,
options,
heap,
constraints,
global_metadata_specs,
),
nursery,
common,
gc_full_heap: AtomicBool::default(),
next_gc_full_heap: AtomicBool::new(false),
full_heap_gc_count,
}
}

Expand All @@ -88,6 +96,9 @@ impl<VM: VMBinding> Gen<VM> {
/// Prepare Gen. This should be called by a single thread in GC prepare work.
pub fn prepare(&mut self, tls: VMWorkerThread) {
let full_heap = !self.is_current_gc_nursery();
if full_heap {
self.full_heap_gc_count.lock().unwrap().inc();
}
self.common.prepare(tls, full_heap);
self.nursery.prepare(true);
self.nursery
Expand All @@ -101,6 +112,18 @@ impl<VM: VMBinding> Gen<VM> {
self.nursery.release();
}

/// Independent of how many pages remain in the page budget (a function of heap size), we must
/// ensure we never exhaust virtual memory. Therefore we must never let the nursery grow to the
/// extent that it can't be copied into the mature space.
///
/// Returns `true` if the nursery has grown to the extent that it may not be able to be copied
/// into the mature space.
fn virtual_memory_exhausted<P: Plan>(&self, plan: &P) -> bool {
((plan.get_collection_reserved_pages() as f64
* VM::VMObjectModel::VM_WORST_CASE_COPY_EXPANSION) as usize)
> plan.get_mature_physical_pages_available()
}

/// Check if we need a GC based on the nursery space usage. This method may mark
/// the following GC as a full heap GC.
pub fn collection_required<P: Plan>(
Expand All @@ -110,11 +133,16 @@ impl<VM: VMBinding> Gen<VM> {
space: Option<&dyn Space<VM>>,
) -> bool {
let nursery_full = self.nursery.reserved_pages()
>= (conversions::bytes_to_pages_up(*self.common.base.options.max_nursery));
>= (conversions::bytes_to_pages_up(self.common.base.options.get_max_nursery()));

if nursery_full {
return true;
}

if self.virtual_memory_exhausted(plan) {
return true;
}

// Is the GC triggered by nursery?
// - if space is none, it is not. Return false immediately.
// - if space is some, we further check its descriptor.
Expand All @@ -139,7 +167,7 @@ impl<VM: VMBinding> Gen<VM> {

/// Check if we should do a full heap GC. It returns true if we should have a full heap GC.
/// It also sets gc_full_heap based on the result.
pub fn request_full_heap_collection(&self, total_pages: usize, reserved_pages: usize) -> bool {
pub fn requires_full_heap_collection<P: Plan>(&self, plan: &P) -> bool {
// Allow the same 'true' block for if-else.
// The conditions are complex, and it is easier to read if we put them to separate if blocks.
#[allow(clippy::if_same_then_else)]
Expand All @@ -165,8 +193,10 @@ impl<VM: VMBinding> Gen<VM> {
{
// Forces full heap collection
true
} else if self.virtual_memory_exhausted(plan) {
true
} else {
total_pages <= reserved_pages
plan.get_total_pages() <= plan.get_reserved_pages()
};

self.gc_full_heap.store(is_full_heap, Ordering::SeqCst);
Expand All @@ -176,7 +206,7 @@ impl<VM: VMBinding> Gen<VM> {
if is_full_heap {
"Full heap GC"
} else {
"nursery GC"
"Nursery GC"
}
);

Expand Down Expand Up @@ -230,9 +260,13 @@ impl<VM: VMBinding> Gen<VM> {
}

/// Check a plan to see if the next GC should be a full heap GC.
///
/// Note that this function should be called after all spaces have been released. This is
/// required as we may get incorrect values since this function uses [`get_available_pages`]
/// whose value depends on which spaces have been released.
pub fn should_next_gc_be_full_heap(plan: &dyn Plan<VM = VM>) -> bool {
plan.get_available_pages()
< conversions::bytes_to_pages_up(*plan.base().options.min_nursery)
< conversions::bytes_to_pages_up(plan.base().options.get_min_nursery())
}

/// Set next_gc_full_heap to the given value.
Expand Down
19 changes: 15 additions & 4 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
#[allow(clippy::if_same_then_else)]
#[allow(clippy::branches_sharing_code)]
fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<Self::VM>) {
let is_full_heap = self.request_full_heap_collection();
let is_full_heap = self.requires_full_heap_collection();

self.base().set_collection_kind::<Self>(self);
self.base().set_gc_status(GcStatus::GcPrepare);
Expand Down Expand Up @@ -167,6 +167,14 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
}
self.last_gc_was_full_heap
.store(full_heap, Ordering::Relaxed);

// TODO: Refactor so that we set the next_gc_full_heap in gen.release(). Currently have to fight with Rust borrow checker
// NOTE: We have to take care that the `Gen::should_next_gc_be_full_heap()` function is
// called _after_ all spaces have been released (including ones in `gen`) as otherwise we
// may get incorrect results since the function uses values such as available pages that
// will change dependant on which spaces have been released
self.gen
.set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self));
}

fn get_collection_reserved_pages(&self) -> usize {
Expand All @@ -186,6 +194,10 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
>> 1
}

fn get_mature_physical_pages_available(&self) -> usize {
self.immix.available_physical_pages()
}

fn base(&self) -> &BasePlan<VM> {
&self.gen.common.base
}
Expand Down Expand Up @@ -253,8 +265,7 @@ impl<VM: VMBinding> GenImmix<VM> {
genimmix
}

fn request_full_heap_collection(&self) -> bool {
self.gen
.request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages())
fn requires_full_heap_collection(&self) -> bool {
self.gen.requires_full_heap_collection(self)
}
}
10 changes: 8 additions & 2 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ pub trait Plan: 'static + Sync + Downcast {
/// should always be positive or 0.
fn get_available_pages(&self) -> usize {
// It is possible that the reserved pages is larger than the total pages so we are doing
// a saturating substraction to make sure we return a non-negative number.
// a saturating subtraction to make sure we return a non-negative number.
// For example,
// 1. our GC trigger checks if reserved pages is more than total pages.
// 2. when the heap is almost full of live objects (such as in the case of an OOM) and we are doing a copying GC, it is possible
Expand All @@ -298,6 +298,12 @@ pub trait Plan: 'static + Sync + Downcast {
.saturating_sub(self.get_reserved_pages())
}

/// Return the number of pages available for allocation into the mature space. Only
/// generational plans have to implement this function.
fn get_mature_physical_pages_available(&self) -> usize {
panic!("This is not a generational plan.")
}

/// Get the number of pages that are reserved for collection. By default, we return 0.
/// For copying plans, they need to override this and calculate required pages to complete
/// a copying GC.
Expand Down Expand Up @@ -566,7 +572,7 @@ impl<VM: VMBinding> BasePlan<VM> {

/// The application code has requested a collection.
pub fn handle_user_collection_request(&self, tls: VMMutatorThread, force: bool) {
if force || !*self.options.ignore_system_g_c {
if force || !*self.options.ignore_system_gc {
info!("User triggering collection");
self.user_triggered_collection
.store(true, Ordering::Relaxed);
Expand Down
Loading

0 comments on commit 8afe96d

Please sign in to comment.