Skip to content

Commit

Permalink
Add USDT tracepoints for key GC activities
Browse files Browse the repository at this point in the history
Co-authored-by: Claire Huang <claire.x.huang@gmail.com>
  • Loading branch information
caizixian and clairexhuang committed Aug 3, 2023
1 parent 5d22c2b commit c80ea2b
Show file tree
Hide file tree
Showing 13 changed files with 65 additions and 9 deletions.
2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ mmtk-macros = { version = "0.18.0", path = "macros/" }
num_cpus = "1.8"
num-traits = "0.2"
pfm = { version = "0.1.0-beta.3", optional = true }
# Pin to <0.4.0 until we have MSRV >= 1.66, then we can bump to 0.5 (0.4 forces lazy evaluation https://github.com/cuviper/probe-rs/issues/19)
probe = "0.3"
regex = "1.7.0"
spin = "0.9.5"
static_assertions = "1.1.0"
Expand Down
2 changes: 2 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ extern crate num_cpus;
extern crate downcast_rs;
#[macro_use]
extern crate static_assertions;
#[macro_use]
extern crate probe;

mod mmtk;
pub use mmtk::MMTKBuilder;
Expand Down
1 change: 1 addition & 0 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,7 @@ pub fn initialize_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>, tls: VMThre
);
mmtk.scheduler.spawn_gc_threads(mmtk, tls);
mmtk.plan.base().initialized.store(true, Ordering::SeqCst);
probe!(mmtk, collection_initialized);
}

/// Allow MMTk to trigger garbage collection when heap is full. This should only be used in pair with disable_collection().
Expand Down
4 changes: 3 additions & 1 deletion src/mmtk.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
///! MMTk instance.
//! MMTk instance.
use crate::plan::Plan;
use crate::policy::sft_map::{create_sft_map, SFTMap};
use crate::scheduler::GCWorkScheduler;
Expand Down Expand Up @@ -140,6 +140,7 @@ impl<VM: VMBinding> MMTK<VM> {
}

pub fn harness_begin(&self, tls: VMMutatorThread) {
probe!(mmtk, harness_begin);
self.plan.handle_user_collection_request(tls, true, true);
self.inside_harness.store(true, Ordering::SeqCst);
self.plan.base().stats.start_all();
Expand All @@ -149,6 +150,7 @@ impl<VM: VMBinding> MMTK<VM> {
pub fn harness_end(&'static self) {
self.plan.base().stats.stop_all(self);
self.inside_harness.store(false, Ordering::SeqCst);
probe!(mmtk, harness_end);
}

pub fn get_plan(&self) -> &dyn Plan<VM = VM> {
Expand Down
3 changes: 2 additions & 1 deletion src/plan/generational/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//! Generational plans
use enum_map::EnumMap;

///! Generational plans
use crate::plan::barriers::BarrierSelector;
use crate::plan::mutator_context::create_allocator_mapping;
use crate::plan::AllocationSemantics;
Expand Down
12 changes: 10 additions & 2 deletions src/scheduler/controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ impl<VM: VMBinding> GCController<VM> {
}

pub fn run(&mut self, tls: VMWorkerThread) {
probe!(mmtk, gccontroller_run);
// Initialize the GC worker for coordinator. We are not using the run() method from
// GCWorker so we manually initialize the worker here.
self.coordinator_worker.tls = tls;
Expand All @@ -51,7 +52,7 @@ impl<VM: VMBinding> GCController<VM> {
self.requester.wait_for_request();
debug!("[STWController: Request recieved.]");

self.do_gc_until_completion();
self.do_gc_until_completion_traced();
debug!("[STWController: Worker threads complete!]");
}
}
Expand All @@ -76,8 +77,15 @@ impl<VM: VMBinding> GCController<VM> {
false
}

/// A wrapper method for [`do_gc_until_completion`](GCController::do_gc_until_completion) to insert USDT tracepoints.
fn do_gc_until_completion_traced(&mut self) {
probe!(mmtk, gc_start);
self.do_gc_until_completion();
probe!(mmtk, gc_end);
}

/// Coordinate workers to perform GC in response to a GC request.
pub fn do_gc_until_completion(&mut self) {
fn do_gc_until_completion(&mut self) {
let gc_start = std::time::Instant::now();

debug_assert!(
Expand Down
5 changes: 5 additions & 0 deletions src/scheduler/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -545,6 +545,10 @@ impl<VM: VMBinding> ProcessEdgesBase<VM> {
pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
self.nodes.take()
}

pub fn is_roots(&self) -> bool {
self.roots
}
}

/// A short-hand for `<E::VM as VMBinding>::VMEdge`.
Expand Down Expand Up @@ -631,6 +635,7 @@ pub trait ProcessEdgesWork:
}

fn process_edges(&mut self) {
probe!(mmtk, process_edges, self.edges.len(), self.is_roots());
for i in 0..self.edges.len() {
self.process_edge(self.edges[i])
}
Expand Down
1 change: 1 addition & 0 deletions src/scheduler/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ impl<VM: VMBinding> GCWorkScheduler<VM> {
let bucket_opened = bucket.update(self);
buckets_updated = buckets_updated || bucket_opened;
if bucket_opened {
probe!(mmtk, bucket_opened, id);
new_packets = new_packets || !bucket.is_drained();
if new_packets {
// Quit the loop. There are already new packets in the newly opened buckets.
Expand Down
5 changes: 5 additions & 0 deletions src/scheduler/work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ pub trait GCWork<VM: VMBinding>: 'static + Send {
stat.end_of_work(&mut worker_stat);
}
}

/// Get the compile-time static type name for the work packet.
fn get_type_name(&self) -> &'static str {
std::any::type_name::<Self>()
}
}

use super::gc_work::ProcessEdgesWork;
Expand Down
14 changes: 14 additions & 0 deletions src/scheduler/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -345,12 +345,26 @@ impl<VM: VMBinding> GCWorker<VM> {
/// Entry of the worker thread. Resolve thread affinity, if it has been specified by the user.
/// Each worker will keep polling and executing work packets in a loop.
pub fn run(&mut self, tls: VMWorkerThread, mmtk: &'static MMTK<VM>) {
probe!(mmtk, gcworker_run);
WORKER_ORDINAL.with(|x| x.store(Some(self.ordinal), Ordering::SeqCst));
self.scheduler.resolve_affinity(self.ordinal);
self.tls = tls;
self.copy = crate::plan::create_gc_worker_context(tls, mmtk);
loop {
// Instead of having work_start and work_end tracepoints, we have
// one tracepoint before polling for more work and one tracepoint
// before executing the work.
// This allows measuring the distribution of both the time needed
// poll work (between work_poll and work), and the time needed to
// execute work (between work and next work_poll).
// If we have work_start and work_end, we cannot measure the first
// poll.
probe!(mmtk, work_poll);
let mut work = self.poll();
// probe! expands to an empty block on unsupported platforms
#[allow(unused_variables)]
let typename = work.get_type_name();
probe!(mmtk, work, typename.as_ptr(), typename.len());
work.do_work_with_stat(self, mmtk);
}
}
Expand Down
19 changes: 17 additions & 2 deletions src/util/alloc/allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
} else {
// If we are not doing precise stress GC, just call the normal alloc_slow_once().
// Normal stress test only checks for stress GC in the slowpath.
self.alloc_slow_once(size, align, offset)
self.alloc_slow_once_traced(size, align, offset)
};

if !is_mutator {
Expand Down Expand Up @@ -318,6 +318,21 @@ pub trait Allocator<VM: VMBinding>: Downcast {
/// * `offset` the required offset in bytes.
fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address;

/// A wrapper method for [`alloc_slow_once`](Allocator::alloc_slow_once) to insert USDT tracepoints.
///
/// Arguments:
/// * `size`: the allocation size in bytes.
/// * `align`: the required alignment in bytes.
/// * `offset` the required offset in bytes.
fn alloc_slow_once_traced(&mut self, size: usize, align: usize, offset: usize) -> Address {
probe!(mmtk, alloc_slow_once_start);
// probe! expands to an empty block on unsupported platforms
#[allow(clippy::let_and_return)]
let ret = self.alloc_slow_once(size, align, offset);
probe!(mmtk, alloc_slow_once_end);
ret
}

/// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to
/// N), we would expect for every N bytes allocated, we will trigger a stress GC. However, for
/// allocators that do thread local allocation, they may allocate from their thread local
Expand Down Expand Up @@ -358,7 +373,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
if self.does_thread_local_allocation() && need_poll {
warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::<Self>());
}
self.alloc_slow_once(size, align, offset)
self.alloc_slow_once_traced(size, align, offset)
}

/// The [`crate::plan::Mutator`] that includes this allocator is going to be destroyed. Some allocators
Expand Down
2 changes: 1 addition & 1 deletion src/util/alloc/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
///! Various allocators implementation.
//! Various allocators implementation.
/// The allocator trait and allocation-related functions.
pub(crate) mod allocator;
Expand Down
4 changes: 2 additions & 2 deletions src/util/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ impl PerfEventOptions {
.split(';')
.filter(|e| !e.is_empty())
.map(|e| {
let e: Vec<&str> = e.split(',').into_iter().collect();
let e: Vec<&str> = e.split(',').collect();
if e.len() != 3 {
Err("Please supply (event name, pid, cpu)".into())
} else {
Expand Down Expand Up @@ -397,7 +397,7 @@ impl NurserySize {
/// "<NurseryKind>:<size in bytes>". For example, "Fixed:8192" creates a Fixed nursery of size
/// 8192 bytes.
pub fn parse(s: &str) -> Result<NurserySize, String> {
let ns: Vec<&str> = s.split(':').into_iter().collect();
let ns: Vec<&str> = s.split(':').collect();
let kind = ns[0].parse::<NurseryKind>().map_err(|_| {
String::from("Please specify one of \"Bounded\" or \"Fixed\" nursery type")
})?;
Expand Down

0 comments on commit c80ea2b

Please sign in to comment.