-
Notifications
You must be signed in to change notification settings - Fork 69
/
Copy pathvmspace.rs
217 lines (197 loc) · 7.43 KB
/
vmspace.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
use atomic::Ordering;
use crate::policy::sft::SFT;
use crate::policy::space::{CommonSpace, Space};
use crate::util::address::Address;
use crate::util::heap::{MonotonePageResource, PageResource};
use crate::util::{metadata, ObjectReference};
use crate::plan::{ObjectQueue, VectorObjectQueue};
use crate::policy::sft::GCWorkerMutRef;
use crate::vm::{ObjectModel, VMBinding};
/// This type implements VM space, a space managed by the runtime. The space helps us trace and inspect objects
/// in the same way as other MMTk spaces, rather than treating them as special cases.
// We used to use ImmortalSpace as vm space up to commit 43e8a92b507ce9b8f771f31d2dbef7eee93f3cc2, and only
// JikesRVM was using VM space at that point. Java MMTk does the same thing for JikesRVM (using immortal space as boot space),
// and our ImmortalSpace in 43e8a92b507ce9b8f771f31d2dbef7eee93f3cc2 was implemented exactly the same as the immortal space in JikesRVM's Java MMTk.
// However, we introduce changes after 43e8a92b507ce9b8f771f31d2dbef7eee93f3cc2, and ImmortalSpace starts to use MarkState.
// MarkState provides an abstraction of how we flip mark bit, reset mark bit, and check mark bit, depending on the location
// of the mark bit (on side or in header). In that case, our ImmortalSpace is no longer the same as Java
// MMTk. JikesRVM has assumptions in its boot image generation about the space. For example, if we change the initial mark state
// from 0 to 1, JikesRVM will simply hang in the mutator phase. I suspect either JikesRVM hard coded some assumptions in their boot image
// generation, or our port is incomplete and there are some calls to Java MMTK that did not get forwarded to Rust MMTK properly. Anyway, the new immortal space
// with the changes can no longer be used as vm space for JikesRVM. To temperarily work around the issue, I duplicate ImmortalSpace
// from commit 43e8a92b507ce9b8f771f31d2dbef7eee93f3cc2 into this VMSpace, so the change to ImmortalSpace does not break JikesRVM's vm space.
// TODO: We will provide a new implementation of this VMSpace to accomodate JikesRVM, and other VMs.
pub struct VMSpace<VM: VMBinding> {
mark_state: u8,
common: CommonSpace<VM>,
pr: MonotonePageResource<VM>,
}
const GC_MARK_BIT_MASK: u8 = 1;
impl<VM: VMBinding> SFT for VMSpace<VM> {
fn name(&self) -> &str {
self.get_name()
}
fn is_live(&self, _object: ObjectReference) -> bool {
true
}
fn is_reachable(&self, object: ObjectReference) -> bool {
let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
old_value == self.mark_state
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
true
}
fn is_movable(&self) -> bool {
false
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
true
}
fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) {
let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
let new_value = (old_value & GC_MARK_BIT_MASK) | self.mark_state;
VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.store_atomic::<VM, u8>(
object,
new_value,
None,
Ordering::SeqCst,
);
if self.common.needs_log_bit {
VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
}
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::set_vo_bit::<VM>(object);
}
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> bool {
crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::<VM>(addr).is_some()
}
fn sft_trace_object(
&self,
queue: &mut VectorObjectQueue,
object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
self.trace_object(queue, object)
}
}
impl<VM: VMBinding> Space<VM> for VMSpace<VM> {
fn as_space(&self) -> &dyn Space<VM> {
self
}
fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
&self.pr
}
fn common(&self) -> &CommonSpace<VM> {
&self.common
}
fn initialize_sft(&self) {
self.common().initialize_sft(self.as_sft())
}
fn release_multiple_pages(&mut self, _start: Address) {
panic!("VMSpace only releases pages enmasse")
}
}
use crate::scheduler::GCWorker;
use crate::util::copy::CopySemantics;
impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for VMSpace<VM> {
fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
_copy: Option<CopySemantics>,
_worker: &mut GCWorker<VM>,
) -> ObjectReference {
self.trace_object(queue, object)
}
fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
false
}
}
impl<VM: VMBinding> VMSpace<VM> {
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
let vm_map = args.vm_map;
let is_discontiguous = args.vmrequest.is_discontiguous();
let common = CommonSpace::new(args.into_policy_args(
false,
true,
metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]),
));
VMSpace {
mark_state: 0,
pr: if is_discontiguous {
MonotonePageResource::new_discontiguous(vm_map)
} else {
MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
},
common,
}
}
fn test_and_mark(object: ObjectReference, value: u8) -> bool {
loop {
let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
if old_value == value {
return false;
}
if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
.compare_exchange_metadata::<VM, u8>(
object,
old_value,
old_value ^ GC_MARK_BIT_MASK,
None,
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
break;
}
}
true
}
pub fn prepare(&mut self) {
self.mark_state = GC_MARK_BIT_MASK - self.mark_state;
}
pub fn release(&mut self) {}
pub fn trace_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
#[cfg(feature = "vo_bit")]
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set::<VM>(object),
"{:x}: VO bit not set",
object
);
if VMSpace::<VM>::test_and_mark(object, self.mark_state) {
queue.enqueue(object);
}
object
}
}