Skip to content

Commit

Permalink
Notify GC trigger about pending allocation (mmtk#755)
Browse files Browse the repository at this point in the history
This PR reverts a change about where to call `pr.clear_request()` in
`Space.acquire()` in mmtk#712. The
change caused issues like mmtk#734.
This PR reverts the change. We now clear the reserved pages before GC
(the same as before, and the same as Java MMTk), and additionally we
inform the GC trigger about pending allocation pages. This closes
mmtk#734.
  • Loading branch information
qinsoon authored and wenyuzhao committed Mar 20, 2023
1 parent 68d353c commit b77b42d
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 6 deletions.
16 changes: 14 additions & 2 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,14 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) {
debug!("Collection required");
assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");

// Clear the request, and inform GC trigger about the pending allocation.
pr.clear_request(pages_reserved);
self.get_gc_trigger()
.policy
.on_pending_allocation(pages_reserved);

VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
pr.clear_request(pages_reserved); // clear the pages after GC. We need those reserved pages so we can compute new heap size properly.
unsafe { Address::zero() }
} else {
debug!("Collection not required");
Expand Down Expand Up @@ -175,8 +181,14 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {

let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
debug_assert!(gc_performed, "GC not performed when forced.");

// Clear the request, and inform GC trigger about the pending allocation.
pr.clear_request(pages_reserved);
self.get_gc_trigger()
.policy
.on_pending_allocation(pages_reserved);

VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator.
pr.clear_request(pages_reserved); // clear the pages after GC. We need those reserved pages so we can compute new heap size properly.
unsafe { Address::zero() }
}
}
Expand Down
10 changes: 7 additions & 3 deletions src/util/heap/accounting.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ impl PageAccounting {
/// the allocation cannot be satisfied. We can call this to clear the number of reserved pages,
/// so later we can reserve and attempt again.
pub fn clear_reserved(&self, pages: usize) {
self.reserved.fetch_sub(pages, Ordering::Relaxed);
let _prev = self.reserved.fetch_sub(pages, Ordering::Relaxed);
debug_assert!(_prev >= pages);
}

/// Inform of successfully committing a certain number of pages. This is used after we have reserved
Expand All @@ -48,8 +49,11 @@ impl PageAccounting {
/// Inform of releasing a certain number of pages. The number of pages will be deducted from
/// both reserved and committed pages.
pub fn release(&self, pages: usize) {
self.reserved.fetch_sub(pages, Ordering::Relaxed);
self.committed.fetch_sub(pages, Ordering::Relaxed);
let _prev_reserved = self.reserved.fetch_sub(pages, Ordering::Relaxed);
debug_assert!(_prev_reserved >= pages);

let _prev_committed = self.committed.fetch_sub(pages, Ordering::Relaxed);
debug_assert!(_prev_committed >= pages);
}

/// Set both reserved and committed pages to zero. This is only used when we completely clear a space.
Expand Down
20 changes: 19 additions & 1 deletion src/util/heap/gc_trigger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,11 @@ impl<VM: VMBinding> GCTrigger<VM> {
/// GC start/end so they can collect some statistics about GC and allocation. The policy needs to
/// decide the (current) heap limit and decide whether a GC should be performed.
pub trait GCTriggerPolicy<VM: VMBinding>: Sync + Send {
/// Inform the triggering policy that we have pending allocation.
/// Any GC trigger policy with dynamic heap size should take this into account when calculating a new heap size.
/// Failing to do so may result in unnecessay GCs, or result in an infinite loop if the new heap size
/// can never accomodate the pending allocation.
fn on_pending_allocation(&self, _pages: usize) {}
/// Inform the triggering policy that a GC starts.
fn on_gc_start(&self, _mmtk: &'static MMTK<VM>) {}
/// Inform the triggering policy that a GC is about to start the release work. This is called
Expand Down Expand Up @@ -147,6 +152,9 @@ pub struct MemBalancerTrigger {
max_heap_pages: usize,
/// The current heap size
current_heap_pages: AtomicUsize,
/// The number of pending allocation pages. The allocation requests for them have failed, and a GC is triggered.
/// We will need to take them into consideration so that the new heap size can accomodate those allocations.
pending_pages: AtomicUsize,
/// Statistics
stats: AtomicRefCell<MemBalancerStats>,
}
Expand Down Expand Up @@ -291,6 +299,10 @@ impl MemBalancerStats {
}

impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
fn on_pending_allocation(&self, pages: usize) {
self.pending_pages.fetch_add(pages, Ordering::SeqCst);
}

fn on_gc_start(&self, mmtk: &'static MMTK<VM>) {
trace!("=== on_gc_start ===");
self.access_stats(|stats| {
Expand Down Expand Up @@ -352,6 +364,8 @@ impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
);
}
});
// Clear pending allocation pages at the end of GC, no matter we used it or not.
self.pending_pages.store(0, Ordering::SeqCst);
}

fn is_gc_required(
Expand Down Expand Up @@ -382,6 +396,7 @@ impl MemBalancerTrigger {
Self {
min_heap_pages,
max_heap_pages,
pending_pages: AtomicUsize::new(0),
// start with min heap
current_heap_pages: AtomicUsize::new(min_heap_pages),
stats: AtomicRefCell::new(Default::default()),
Expand Down Expand Up @@ -467,8 +482,11 @@ impl MemBalancerTrigger {
(live as f64 * 4096f64).sqrt()
};

// Get pending allocations
let pending_pages = self.pending_pages.load(Ordering::SeqCst);

// This is the optimal heap limit due to mem balancer. We will need to clamp the value to the defined min/max range.
let optimal_heap = live + e as usize + extra_reserve;
let optimal_heap = live + e as usize + extra_reserve + pending_pages;
trace!(
"optimal = live {} + sqrt(live) {} + extra {}",
live,
Expand Down

0 comments on commit b77b42d

Please sign in to comment.