Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement get_gc_trigger() for LockFreeImmortalSpace #1003

Merged
merged 3 commits into from
Oct 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 31 additions & 16 deletions src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,18 @@
use atomic::Atomic;

use std::marker::PhantomData;
use std::sync::atomic::Ordering;
use std::sync::Arc;

use crate::policy::sft::GCWorkerMutRef;
use crate::policy::sft::SFT;
use crate::policy::space::{CommonSpace, Space};
use crate::util::address::Address;

use crate::util::conversions;
use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::heap::PageResource;
use crate::util::heap::VMRequest;
use crate::util::memory::MmapStrategy;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSanity;
Expand All @@ -34,11 +36,11 @@ pub struct LockFreeImmortalSpace<VM: VMBinding> {
/// start of this space
start: Address,
/// Total bytes for the space
extent: usize,
total_bytes: usize,
wks marked this conversation as resolved.
Show resolved Hide resolved
/// Zero memory after slow-path allocation
slow_path_zeroing: bool,
metadata: SideMetadataContext,
phantom: PhantomData<VM>,
gc_trigger: Arc<GCTrigger<VM>>,
}

impl<VM: VMBinding> SFT for LockFreeImmortalSpace<VM> {
Expand Down Expand Up @@ -99,12 +101,16 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
unimplemented!()
}

fn get_gc_trigger(&self) -> &GCTrigger<VM> {
&self.gc_trigger
}

fn release_multiple_pages(&mut self, _start: Address) {
panic!("immortalspace only releases pages enmasse")
}

fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.extent) };
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
}

fn reserved_pages(&self) -> usize {
Expand All @@ -115,6 +121,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
}

fn acquire(&self, _tls: VMThread, pages: usize) -> Address {
trace!("LockFreeImmortalSpace::acquire");
let bytes = conversions::pages_to_bytes(pages);
let start = self
.cursor
Expand Down Expand Up @@ -170,8 +177,8 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
#[allow(dead_code)] // Only used with certain features.
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
let slow_path_zeroing = args.zeroed;
// FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
// https://github.com/mmtk/mmtk-core/issues/314

// Get the total bytes for the heap.
let total_bytes = match *args.options.gc_trigger {
crate::util::options::GCTriggerSelector::FixedHeapSize(bytes) => bytes,
_ => unimplemented!(),
Expand All @@ -182,21 +189,30 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
total_bytes,
vm_layout().available_bytes()
);
// Align up to chunks
let aligned_total_bytes = crate::util::conversions::raw_align_up(
total_bytes,
crate::util::heap::vm_layout::BYTES_IN_CHUNK,
);

// Create a VM request of fixed size
let vmrequest = VMRequest::fixed_size(aligned_total_bytes);
// Reserve the space
let VMRequest::Extent{ extent, top } = vmrequest else { unreachable!() };
let start = args.heap.reserve(extent, top);

// FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
// https://github.com/mmtk/mmtk-core/issues/314
let space = Self {
name: args.name,
cursor: Atomic::new(vm_layout().available_start()),
limit: vm_layout().available_start() + total_bytes,
start: vm_layout().available_start(),
extent: total_bytes,
cursor: Atomic::new(start),
limit: start + aligned_total_bytes,
start,
total_bytes: aligned_total_bytes,
slow_path_zeroing,
metadata: SideMetadataContext {
global: args.global_side_metadata_specs,
local: vec![],
},
phantom: PhantomData,
gc_trigger: args.gc_trigger,
};

// Eagerly memory map the entire heap (also zero all the memory)
Expand All @@ -205,11 +221,10 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
} else {
MmapStrategy::Normal
};
crate::util::memory::dzmmap_noreplace(vm_layout().available_start(), total_bytes, strategy)
.unwrap();
crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap();
if space
.metadata
.try_map_metadata_space(vm_layout().available_start(), total_bytes)
.try_map_metadata_space(start, aligned_total_bytes)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
Expand Down
1 change: 1 addition & 0 deletions vmbindings/dummyvm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ malloc_counted_size = ["mmtk/malloc_counted_size"]
malloc_mark_sweep = ["mmtk/malloc_mark_sweep"]
vo_bit = ["mmtk/vo_bit"]
extreme_assertions = ["mmtk/extreme_assertions"]
nogc_lock_free=["mmtk/nogc_lock_free"]

# Feature to control which benchmarks to run. See benches/main.rs
bench_sft = []
Expand Down
2 changes: 2 additions & 0 deletions vmbindings/dummyvm/src/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ mod malloc_api;
#[cfg(feature = "malloc_counted_size")]
mod malloc_counted;
mod malloc_ms;
#[cfg(feature = "nogc_lock_free")]
mod nogc_lock_free;
#[cfg(target_pointer_width = "64")]
mod vm_layout_compressed_pointer_64;
mod vm_layout_default;
Expand Down
35 changes: 35 additions & 0 deletions vmbindings/dummyvm/src/tests/nogc_lock_free.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// GITHUB-CI: MMTK_PLAN=NoGC
// GITHUB-CI: FEATURES=nogc_lock_free

use crate::api;
use crate::test_fixtures::{MutatorFixture, SerialFixture};
use crate::DummyVM;
use log::info;
use mmtk::plan::AllocationSemantics;
use mmtk::vm::VMBinding;

lazy_static! {
static ref MUTATOR: SerialFixture<MutatorFixture> = SerialFixture::new();
}

#[test]
pub fn nogc_lock_free_allocate() {
MUTATOR.with_fixture(|fixture| {
let min = DummyVM::MIN_ALIGNMENT;
let max = DummyVM::MAX_ALIGNMENT;
info!("Allowed alignment between {} and {}", min, max);
let mut align = min;
while align <= max {
info!("Test allocation with alignment {}", align);
let addr = api::mmtk_alloc(fixture.mutator, 8, align, 0, AllocationSemantics::Default);
info!("addr = {}", addr);
assert!(
addr.is_aligned_to(align),
"Expected allocation alignment {}, returned address is {:?}",
align,
addr
);
align *= 2;
}
})
}
Loading