Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ExternalPageResource and allow discontiguous VM space #864

Merged
merged 13 commits into from
Aug 31, 2023
Merged
8 changes: 6 additions & 2 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,13 @@ pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
Box::new(mmtk)
}

/// Add an externally mmapped region to the VM space. A VM space can be set through MMTk options (`vm_space_start` and `vm_space_size`),
/// and can also be set through this function call. A VM space can be discontiguous. This function can be called multiple times,
/// and all the address ranges passed as arguments in the function will be considered as part of the VM space.
/// Currently we do not allow remove regions from VM space.
qinsoon marked this conversation as resolved.
Show resolved Hide resolved
#[cfg(feature = "vm_space")]
pub fn lazy_init_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
mmtk.plan.base_mut().vm_space.lazy_initialize(start, size);
pub fn set_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
mmtk.plan.base_mut().vm_space.set_vm_region(start, size);
}

/// Request MMTk to create a mutator for the given thread. The ownership
Expand Down
6 changes: 5 additions & 1 deletion src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,11 @@ impl<VM: VMBinding> BasePlan<VM> {
VMRequest::discontiguous(),
)),
#[cfg(feature = "vm_space")]
vm_space: VMSpace::new(&mut args),
vm_space: VMSpace::new(args.get_space_args(
"vm_space",
false,
VMRequest::discontiguous(),
)),

initialized: AtomicBool::new(false),
trigger_gc_when_heap_is_full: AtomicBool::new(true),
Expand Down
2 changes: 0 additions & 2 deletions src/plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@ pub use global::AllocationSemantics;
pub(crate) use global::GcStatus;
pub use global::Plan;
pub(crate) use global::PlanTraceObject;
#[cfg(feature = "vm_space")] // This is used for creating VM space
pub(crate) use global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs};

mod mutator_context;
pub use mutator_context::Mutator;
Expand Down
280 changes: 149 additions & 131 deletions src/policy/vmspace.rs
Original file line number Diff line number Diff line change
@@ -1,52 +1,79 @@
use crate::plan::{CreateGeneralPlanArgs, CreateSpecificPlanArgs};
use crate::mmtk::SFT_MAP;
use crate::plan::{ObjectQueue, VectorObjectQueue};
use crate::policy::immortalspace::ImmortalSpace;
use crate::policy::sft::GCWorkerMutRef;
use crate::policy::sft::SFT;
use crate::policy::space::{CommonSpace, Space};
use crate::util::address::Address;
use crate::util::heap::HeapMeta;
use crate::util::constants::BYTES_IN_PAGE;
use crate::util::heap::externalpageresource::{ExternalPageResource, ExternalPages};
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::PageResource;
use crate::util::heap::VMRequest;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::mark_bit::MarkState;
use crate::util::opaque_pointer::*;
use crate::util::ObjectReference;
use crate::vm::VMBinding;
use crate::vm::{ObjectModel, VMBinding};

use delegate::delegate;
use std::sync::atomic::Ordering;

/// A special space for VM/Runtime managed memory. The implementation is similar to [`crate::policy::immortalspace::ImmortalSpace`],
/// except that VM space does not allocate. Instead, the runtime can add regions that are externally managed
/// and mmapped to the space, and allow objects in those regions to be traced in the same way
/// as other MMTk objects allocated by MMTk.
pub struct VMSpace<VM: VMBinding> {
inner: Option<ImmortalSpace<VM>>,
// Save it
args: CreateSpecificPlanArgs<VM>,
mark_state: MarkState,
common: CommonSpace<VM>,
pr: ExternalPageResource<VM>,
}

impl<VM: VMBinding> SFT for VMSpace<VM> {
delegate! {
// Delegate every call to the inner space. Given that we have acquired SFT, we can assume there are objects in the space and the space is initialized.
to self.space() {
fn name(&self) -> &str;
fn is_live(&self, object: ObjectReference) -> bool;
fn is_reachable(&self, object: ObjectReference) -> bool;
#[cfg(feature = "object_pinning")]
fn pin_object(&self, object: ObjectReference) -> bool;
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, object: ObjectReference) -> bool;
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, object: ObjectReference) -> bool;
fn is_movable(&self) -> bool;
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool;
fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool);
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> bool;
fn sft_trace_object(
&self,
queue: &mut VectorObjectQueue,
object: ObjectReference,
worker: GCWorkerMutRef,
) -> ObjectReference;
fn name(&self) -> &str {
self.common.name
}
fn is_live(&self, _object: ObjectReference) -> bool {
true
}
fn is_reachable(&self, object: ObjectReference) -> bool {
self.mark_state.is_marked::<VM>(object)
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
true
}
fn is_movable(&self) -> bool {
false
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
true
}
fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) {
self.mark_state
.on_object_metadata_initialization::<VM>(object);
if self.common.needs_log_bit {
VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
}
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::set_vo_bit::<VM>(object);
}
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> bool {
crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::<VM>(addr).is_some()
}
fn sft_trace_object(
&self,
queue: &mut VectorObjectQueue,
object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
self.trace_object(queue, object)
}
}

Expand All @@ -58,38 +85,43 @@ impl<VM: VMBinding> Space<VM> for VMSpace<VM> {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
self.space().get_page_resource()
&self.pr
}
fn common(&self) -> &CommonSpace<VM> {
self.space().common()
&self.common
}

fn initialize_sft(&self) {
if self.inner.is_some() {
self.common().initialize_sft(self.as_sft())
}
// Do nothing here. We always initialize SFT when we know any external pages
}

fn release_multiple_pages(&mut self, _start: Address) {
panic!("immortalspace only releases pages enmasse")
unreachable!()
}

fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
side_metadata_sanity_checker.verify_metadata_context(
std::any::type_name::<Self>(),
&SideMetadataContext {
global: self.args.global_side_metadata_specs.clone(),
local: vec![],
},
)
fn acquire(&self, _tls: VMThread, _pages: usize) -> Address {
unreachable!()
}

fn address_in_space(&self, start: Address) -> bool {
if let Some(space) = self.space_maybe() {
space.address_in_space(start)
} else {
false
}
// The default implementation checks with vm map. But vm map has some assumptions about
// the address range for spaces and the VM space may break those assumptions (as the space is
// mmapped by the runtime rather than us). So we we use SFT here.

// However, SFT map may not be an ideal solution either for 64 bits. The default
// implementation of SFT map on 64 bits is `SFTSpaceMap`, which maps the entire address
// space into an index between 0 and 31, and assumes any address with the same index
// is in the same space (with the same SFT). MMTk spaces uses 1-16. We guarantee that
// VM space does not overlap with the address range that MMTk spaces may use. So
// any region used as VM space will have an index of 0, or 17-31, and all the addresses
// that are mapped to the same index will be considered as in the VM space. That means,
// after we map a region as VM space, the nearby addresses will also be considered
// as in the VM space if we use the default `SFTSpaceMap`. We can guarantee the nearby
// addresses are not MMTk spaces, but we cannot tell whether they really in the VM space
// or not.
// A solution to this is to use `SFTDenseChunkMap` if `vm_space` is enabled on 64 bits.
// `SFTDenseChunkMap` has an overhead of a few percentages (~3%) compared to `SFTSpaceMap`.
SFT_MAP.get_checked(start).name() == self.name()
}
}

Expand All @@ -112,118 +144,104 @@ impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for VMSpace<VM
}

impl<VM: VMBinding> VMSpace<VM> {
pub fn new(args: &mut CreateSpecificPlanArgs<VM>) -> Self {
let args_clone = CreateSpecificPlanArgs {
global_args: CreateGeneralPlanArgs {
vm_map: args.global_args.vm_map,
mmapper: args.global_args.mmapper,
heap: HeapMeta::new(), // we do not use this
options: args.global_args.options.clone(),
gc_trigger: args.global_args.gc_trigger.clone(),
scheduler: args.global_args.scheduler.clone(),
},
constraints: args.constraints,
global_side_metadata_specs: args.global_side_metadata_specs.clone(),
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
let (vm_space_start, vm_space_size) =
(*args.options.vm_space_start, *args.options.vm_space_size);
let space = Self {
mark_state: MarkState::new(),
pr: ExternalPageResource::new(args.vm_map),
common: CommonSpace::new(args.into_policy_args(
false,
true,
crate::util::metadata::extract_side_metadata(&[
*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC,
]),
)),
};
// Create the space if the VM space start/size is set. Otherwise, use None.
let inner = (!args.global_args.options.vm_space_start.is_zero())
.then(|| Self::create_space(args, None));
Self {
inner,
args: args_clone,

if !vm_space_start.is_zero() {
space.add_external_pages(vm_space_start, vm_space_size);
}
}

pub fn lazy_initialize(&mut self, start: Address, size: usize) {
assert!(self.inner.is_none(), "VM space has been initialized");
self.inner = Some(Self::create_space(&mut self.args, Some((start, size))));
space
}

self.common().initialize_sft(self.as_sft());
pub fn set_vm_region(&mut self, start: Address, size: usize) {
self.add_external_pages(start, size);
}

fn create_space(
args: &mut CreateSpecificPlanArgs<VM>,
location: Option<(Address, usize)>,
) -> ImmortalSpace<VM> {
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
pub fn add_external_pages(&self, start: Address, size: usize) {
let start = start.align_down(BYTES_IN_PAGE);
let end = (start + size).align_up(BYTES_IN_PAGE);
let size = end - start;
qinsoon marked this conversation as resolved.
Show resolved Hide resolved

// If the location of the VM space is not supplied, find them in the options.
let (vm_space_start, vm_space_bytes) = location.unwrap_or((
*args.global_args.options.vm_space_start,
*args.global_args.options.vm_space_size,
));
// Verify the start and the size is valid
assert!(!vm_space_start.is_zero());
assert!(vm_space_bytes > 0);
assert!(!start.is_zero());
assert!(size > 0);

// We only map on chunk granularity. Align them.
let vm_space_start_aligned = vm_space_start.align_down(BYTES_IN_CHUNK);
let vm_space_end_aligned = (vm_space_start + vm_space_bytes).align_up(BYTES_IN_CHUNK);
let vm_space_bytes_aligned = vm_space_end_aligned - vm_space_start_aligned;
let chunk_start = start.align_down(BYTES_IN_CHUNK);
let chunk_end = end.align_up(BYTES_IN_CHUNK);
let chunk_size = chunk_end - chunk_start;

// For simplicity, VMSpace has to be outside our available heap range.
// TODO: Allow VMSpace in our available heap range.
assert!(Address::range_intersection(
&(vm_space_start_aligned..vm_space_end_aligned),
&(chunk_start..chunk_end),
&crate::util::heap::layout::available_range()
)
.is_empty());

debug!(
"Align VM space ({}, {}) to chunk ({}, {})",
vm_space_start,
vm_space_start + vm_space_bytes,
vm_space_start_aligned,
vm_space_end_aligned
);

let space_args = args.get_space_args(
"vm_space",
false,
VMRequest::fixed(vm_space_start_aligned, vm_space_bytes_aligned),
start, end, chunk_start, chunk_end
qinsoon marked this conversation as resolved.
Show resolved Hide resolved
);
let space =
ImmortalSpace::new_vm_space(space_args, vm_space_start_aligned, vm_space_bytes_aligned);

// The space is mapped externally by the VM. We need to update our mmapper to mark the range as mapped.
space.ensure_mapped();

space
}

fn space_maybe(&self) -> Option<&ImmortalSpace<VM>> {
self.inner.as_ref()
}
// Mark as mapped in mmapper
self.common.mmapper.mark_as_mapped(chunk_start, chunk_size);
// Map side metadata
self.common
.metadata
.try_map_metadata_space(chunk_start, chunk_size)
.unwrap();
// Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation.
// self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor);
// Update SFT
assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start);
unsafe {
SFT_MAP.eager_initialize(self.as_sft(), chunk_start, chunk_size);
}

fn space(&self) -> &ImmortalSpace<VM> {
self.inner.as_ref().unwrap()
self.pr.add_new_external_pages(ExternalPages { start, end });
qinsoon marked this conversation as resolved.
Show resolved Hide resolved
}

// fn space_mut(&mut self) -> &mut ImmortalSpace<VM> {
// self.inner.as_mut().unwrap()
// }

pub fn prepare(&mut self) {
if let Some(ref mut space) = &mut self.inner {
space.prepare()
self.mark_state.on_global_prepare::<VM>();
for external_pages in self.pr.get_external_pages().iter() {
self.mark_state.on_block_reset::<VM>(
external_pages.start,
external_pages.end - external_pages.start,
);
}
}

pub fn release(&mut self) {
if let Some(ref mut space) = &mut self.inner {
space.release()
}
self.mark_state.on_global_release::<VM>();
}

pub fn trace_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
if let Some(ref space) = &self.inner {
space.trace_object(queue, object)
} else {
panic!("We haven't initialized vm space, but we tried to trace the object {} and thought it was in vm space?", object)
#[cfg(feature = "vo_bit")]
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set::<VM>(object),
"{:x}: VO bit not set",
object
);
debug_assert!(self.in_space(object));
if self.mark_state.test_and_mark::<VM>(object) {
queue.enqueue(object);
}
object
}
}
Loading