Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce VMSpace, and allow VMSpace to be set lazily #802

Merged
merged 25 commits into from
May 8, 2023
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
a368846
Add MarkState. Use MarkState for ImmortalSpace
qinsoon Apr 27, 2023
2115150
Merge branch 'master' into immortal-space-mark-state
qinsoon Apr 27, 2023
13e7b9b
Change Fn to FnMut
qinsoon Apr 28, 2023
0ee7bbb
Merge branch 'master' into immortal-space-mark-state
qinsoon Apr 28, 2023
80dc955
Duplicate old ImmortalSpace as VMSpace
qinsoon May 1, 2023
b0c71b5
Improve comments
qinsoon May 1, 2023
52d79b5
Revert changes about VMSpace. Properly reset mark bit if an immortal
qinsoon May 2, 2023
76c6c1e
WIP
qinsoon May 2, 2023
cecab2f
Add VMSpace, allow lazy set VM space
qinsoon May 3, 2023
a58027f
Work around an assertion
qinsoon May 3, 2023
21a0c97
Merge branch 'master' into lazy-set-vm-space
qinsoon May 3, 2023
e41b90b
Fix build/style
qinsoon May 3, 2023
c1e55fd
Add base_mut for tutorial code
qinsoon May 3, 2023
df90f22
Move our heap range for 32bits so it won't overlap with JikesRVM's VM
qinsoon May 4, 2023
d0c6314
Merge branch 'master' into lazy-set-vm-space
qinsoon May 4, 2023
a113b54
cargo fmt
qinsoon May 4, 2023
aa03a5b
Fix overflow in 32bits
qinsoon May 4, 2023
cf36256
Use Range<Address>
qinsoon May 5, 2023
9455b23
Use wrapping_sub_unsigned in align_allocation
qinsoon May 7, 2023
18cc140
Use delegate! for VMSpace
qinsoon May 7, 2023
8bad6c5
Merge branch 'master' into lazy-set-vm-space
qinsoon May 7, 2023
f451887
Revert "Use wrapping_sub_unsigned in align_allocation"
qinsoon May 7, 2023
5504fab
Add comments about wrapping_sub_unsigned
qinsoon May 7, 2023
4993816
Cargo fmt
qinsoon May 7, 2023
052c69b
Use bool.then() in VMSpace::new()
qinsoon May 8, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions docs/tutorial/code/mygc_semispace/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,10 @@ impl<VM: VMBinding> Plan for MyGC<VM> {
fn base(&self) -> &BasePlan<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}
// ANCHOR_END: plan_base

// Add
Expand Down
5 changes: 5 additions & 0 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,11 @@ pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
Box::new(mmtk)
}

#[cfg(feature = "vm_space")]
pub fn lazy_init_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
mmtk.plan.base_mut().vm_space.lazy_initialize(start, size);
}

/// Request MMTk to create a mutator for the given thread. The ownership
/// of returned boxed mutator is transferred to the binding, and the binding needs to take care of its
/// lifetime. For performance reasons, A VM should store the returned mutator in a thread local storage
Expand Down
4 changes: 4 additions & 0 deletions src/plan/generational/copying/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,10 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
&self.gen.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.gen.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.gen.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,10 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
&self.gen.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.gen.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.gen.common
}
Expand Down
29 changes: 5 additions & 24 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ use crate::plan::Mutator;
use crate::policy::immortalspace::ImmortalSpace;
use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::space::{PlanCreateSpaceArgs, Space};
#[cfg(feature = "vm_space")]
use crate::policy::vmspace::VMSpace;
use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
#[cfg(feature = "analysis")]
Expand Down Expand Up @@ -176,6 +178,7 @@ pub trait Plan: 'static + Sync + Downcast {
}

fn base(&self) -> &BasePlan<Self::VM>;
fn base_mut(&mut self) -> &mut BasePlan<Self::VM>;
wks marked this conversation as resolved.
Show resolved Hide resolved
fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);
fn common(&self) -> &CommonPlan<Self::VM> {
panic!("Common Plan not handled!")
Expand Down Expand Up @@ -423,29 +426,7 @@ pub struct BasePlan<VM: VMBinding> {
/// the VM space.
#[cfg(feature = "vm_space")]
#[trace]
pub vm_space: ImmortalSpace<VM>,
}

#[cfg(feature = "vm_space")]
pub fn create_vm_space<VM: VMBinding>(args: &mut CreateSpecificPlanArgs<VM>) -> ImmortalSpace<VM> {
use crate::util::constants::LOG_BYTES_IN_MBYTE;
let boot_segment_bytes = *args.global_args.options.vm_space_size;
debug_assert!(boot_segment_bytes > 0);

use crate::util::conversions::raw_align_up;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
let boot_segment_mb = raw_align_up(boot_segment_bytes, BYTES_IN_CHUNK) >> LOG_BYTES_IN_MBYTE;

let space = ImmortalSpace::new_vm_space(args.get_space_args(
"boot",
false,
VMRequest::fixed_size(boot_segment_mb),
));

// The space is mapped externally by the VM. We need to update our mmapper to mark the range as mapped.
space.ensure_mapped();

space
pub vm_space: VMSpace<VM>,
}

/// Args needed for creating any plan. This includes a set of contexts from MMTK or global. This
Expand Down Expand Up @@ -520,7 +501,7 @@ impl<VM: VMBinding> BasePlan<VM> {
VMRequest::discontiguous(),
)),
#[cfg(feature = "vm_space")]
vm_space: create_vm_space(&mut args),
vm_space: VMSpace::new(&mut args),

initialized: AtomicBool::new(false),
trigger_gc_when_heap_is_full: AtomicBool::new(true),
Expand Down
4 changes: 4 additions & 0 deletions src/plan/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ impl<VM: VMBinding> Plan for Immix<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/markcompact/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ impl<VM: VMBinding> Plan for MarkCompact<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/marksweep/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,10 @@ impl<VM: VMBinding> Plan for MarkSweep<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
2 changes: 2 additions & 0 deletions src/plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ pub use global::AllocationSemantics;
pub(crate) use global::GcStatus;
pub use global::Plan;
pub(crate) use global::PlanTraceObject;
#[cfg(feature = "vm_space")] // This is used for creating VM space
pub(crate) use global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs};

mod mutator_context;
pub use mutator_context::Mutator;
Expand Down
4 changes: 4 additions & 0 deletions src/plan/nogc/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ impl<VM: VMBinding> Plan for NoGC<VM> {
&self.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.base
}

fn prepare(&mut self, _tls: VMWorkerThread) {
unreachable!()
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/pageprotect/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ impl<VM: VMBinding> Plan for PageProtect<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/semispace/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,10 @@ impl<VM: VMBinding> Plan for SemiSpace<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/sticky/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ impl<VM: VMBinding> Plan for StickyImmix<VM> {
self.immix.base()
}

fn base_mut(&mut self) -> &mut crate::plan::global::BasePlan<Self::VM> {
self.immix.base_mut()
}

fn generational(
&self,
) -> Option<&dyn crate::plan::generational::global::GenerationalPlan<VM = Self::VM>> {
Expand Down
30 changes: 20 additions & 10 deletions src/policy/immortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,15 +120,6 @@ impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for ImmortalSp

impl<VM: VMBinding> ImmortalSpace<VM> {
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
Self::new_inner(args, false)
}

#[cfg(feature = "vm_space")]
pub fn new_vm_space(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
Self::new_inner(args, true)
}

pub fn new_inner(args: crate::policy::space::PlanCreateSpaceArgs<VM>, vm_space: bool) -> Self {
let vm_map = args.vm_map;
let is_discontiguous = args.vmrequest.is_discontiguous();
let common = CommonSpace::new(args.into_policy_args(
Expand All @@ -144,7 +135,26 @@ impl<VM: VMBinding> ImmortalSpace<VM> {
MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
},
common,
vm_space,
vm_space: false,
}
}

#[cfg(feature = "vm_space")]
pub fn new_vm_space(
args: crate::policy::space::PlanCreateSpaceArgs<VM>,
start: Address,
size: usize,
) -> Self {
assert!(!args.vmrequest.is_discontiguous());
ImmortalSpace {
mark_state: MarkState::new(),
pr: MonotonePageResource::new_contiguous(start, size, args.vm_map),
common: CommonSpace::new(args.into_policy_args(
false,
true,
metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]),
)),
vm_space: true,
}
}

Expand Down
2 changes: 2 additions & 0 deletions src/policy/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,5 @@ pub mod largeobjectspace;
pub mod lockfreeimmortalspace;
pub mod markcompactspace;
pub mod marksweepspace;
#[cfg(feature = "vm_space")]
pub mod vmspace;
12 changes: 9 additions & 3 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,13 @@ mod space_map {
assert!(old.name() == EMPTY_SFT_NAME || old.name() == space.name());
// Make sure the range is in the space
let space_start = Self::index_to_space_start(index);
assert!(start >= space_start);
assert!(start + bytes <= space_start + MAX_SPACE_EXTENT);
// FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces,
// but the VM space is an exception. Any address after the last space is considered as the last space,
// based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT).
if index != Self::TABLE_SIZE - 1 {
assert!(start >= space_start);
assert!(start + bytes <= space_start + MAX_SPACE_EXTENT);
}
}
*mut_self.sft.get_unchecked_mut(index) = space;
}
Expand Down Expand Up @@ -512,8 +517,9 @@ mod sparse_chunk_map {
// in which case, we still set SFT map again.
debug_assert!(
old == EMPTY_SFT_NAME || new == EMPTY_SFT_NAME || old == new,
"attempt to overwrite a non-empty chunk {} in SFT map (from {} to {})",
"attempt to overwrite a non-empty chunk {} ({}) in SFT map (from {} to {})",
chunk,
crate::util::conversions::chunk_index_to_address(chunk),
old,
new
);
Expand Down
33 changes: 28 additions & 5 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -490,10 +490,8 @@ impl<VM: VMBinding> CommonSpace<VM> {
top: _top,
} => (_extent, _top),
VMRequest::Fixed {
extent: _extent,
top: _top,
..
} => (_extent, _top),
extent: _extent, ..
} => (_extent, false),
_ => unreachable!(),
};

Expand Down Expand Up @@ -524,7 +522,32 @@ impl<VM: VMBinding> CommonSpace<VM> {
// FIXME
rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
// VM.memory.setHeapRange(index, start, start.plus(extent));
args.plan_args.vm_map.insert(start, extent, rtn.descriptor);

// We only initialize our vm map if the range of the space is in our available heap range. For normally spaces,
// they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only
// insert into our vm map if the range overlaps with our heap.
{
use crate::util::heap::layout;
if layout::range_overlaps_available_range(start, extent) {
// Crop the range to our available heap range. We only need to set VM map for the range that is in our heap
let start_in_range = if start > layout::vm_layout_constants::AVAILABLE_START {
start
} else {
layout::vm_layout_constants::AVAILABLE_START
};
let end_in_range = if start + extent < layout::vm_layout_constants::AVAILABLE_END {
start + extent
} else {
layout::vm_layout_constants::AVAILABLE_END
};
qinsoon marked this conversation as resolved.
Show resolved Hide resolved

args.plan_args.vm_map.insert(
start_in_range,
end_in_range - start_in_range,
rtn.descriptor,
);
}
}

// For contiguous space, we know its address range so we reserve metadata memory for its range.
if rtn
Expand Down
Loading