Skip to content

Commit

Permalink
Keep pending allocation pages in reserved_pages. Use reserved_pages as
Browse files Browse the repository at this point in the history
live for mem balancer
  • Loading branch information
qinsoon committed Dec 16, 2022
1 parent fad2e77 commit 90bd3e5
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 3 deletions.
4 changes: 2 additions & 2 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) {
debug!("Collection required");
assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
pr.clear_request(pages_reserved);
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
pr.clear_request(pages_reserved); // clear the pages after GC. We need those reserved pages so we can compute new heap size properly.
unsafe { Address::zero() }
} else {
debug!("Collection not required");
Expand Down Expand Up @@ -176,8 +176,8 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {

let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
debug_assert!(gc_performed, "GC not performed when forced.");
pr.clear_request(pages_reserved);
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator.
pr.clear_request(pages_reserved); // clear the pages after GC. We need those reserved pages so we can compute new heap size properly.
unsafe { Address::zero() }
}
}
Expand Down
8 changes: 7 additions & 1 deletion src/util/heap/gc_trigger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,13 @@ pub struct MemBalancerTrigger {
impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
fn on_gc_end(&self, mmtk: &'static MMTK<VM>) {
// live memory after a GC
let live = mmtk.plan.get_used_pages() as f64;
// Use reserved pages here: reserved pages includes the pending allocation requests that haven't been completed. Using
// reserved pages makes sure that the new heap size could accomodate those pending allocation.
// Otherwise, we may get into a stuck state where our computed heap size does not accomodate the next allocation,
// and a GC is triggered. But the GC cannot collect anything, thus live bytes does not change, and the heap size
// does not update. And we still cannot accomodate the next allocation. We have to avoid this, and make sure
// our computed heap size works for the currently pending allocation.
let live = mmtk.plan.get_reserved_pages() as f64;
// We use a simplified version of mem balancer. Instead of collecting allocation/collection speed and a constant c,
// we use a fixed constant 4096 instead.
let optimal_heap = (live + (live * 4096f64).sqrt()) as usize;
Expand Down

0 comments on commit 90bd3e5

Please sign in to comment.