Skip to content

Commit

Permalink
Rollup merge of rust-lang#58309 - wesleywiser:add_more_profiler_event…
Browse files Browse the repository at this point in the history
…s, r=michaelwoerister

Add more profiler events

- Adds Start\Stop events for time spent loading incremental query results from disk.

- Adds Start\Stop events for time spent blocked waiting for queries to complete (when parallel queries are enabled).

r? @michaelwoerister
  • Loading branch information
Centril authored Feb 14, 2019
2 parents c67d474 + e9ebc2e commit f9c9512
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 20 deletions.
17 changes: 15 additions & 2 deletions src/librustc/ty/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,15 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
let job = match lock.active.entry((*key).clone()) {
Entry::Occupied(entry) => {
match *entry.get() {
QueryResult::Started(ref job) => job.clone(),
QueryResult::Started(ref job) => {
//For parallel queries, we'll block and wait until the query running
//in another thread has completed. Record how long we wait in the
//self-profiler
#[cfg(parallel_compiler)]
tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY));

job.clone()
},
QueryResult::Poisoned => FatalError.raise(),
}
}
Expand Down Expand Up @@ -160,7 +168,10 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
// thread
#[cfg(parallel_compiler)]
{
if let Err(cycle) = job.r#await(tcx, span) {
let result = job.r#await(tcx, span);
tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY));

if let Err(cycle) = result {
return TryGetJob::JobCompleted(Err(cycle));
}
}
Expand Down Expand Up @@ -441,7 +452,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// First we try to load the result from the on-disk cache
let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) &&
self.sess.opts.debugging_opts.incremental_queries {
self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME));
let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index);
self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME));

// We always expect to find a cached result for things that
// can be forced from DepNode.
Expand Down
75 changes: 57 additions & 18 deletions src/librustc/util/profiling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,28 @@ pub enum ProfilerEvent {
GenericActivityEnd { category: ProfileCategory, time: Instant },
QueryCacheHit { query_name: &'static str, category: ProfileCategory },
QueryCount { query_name: &'static str, category: ProfileCategory, count: usize },
IncrementalLoadResultStart { query_name: &'static str, time: Instant },
IncrementalLoadResultEnd { query_name: &'static str, time: Instant },
QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: Instant },
QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: Instant },
}

impl ProfilerEvent {
fn is_start_event(&self) -> bool {
use self::ProfilerEvent::*;

match self {
QueryStart { .. } | GenericActivityStart { .. } => true,
QueryEnd { .. } | GenericActivityEnd { .. } |
QueryCacheHit { .. } | QueryCount { .. } => false,
QueryStart { .. } |
GenericActivityStart { .. } |
IncrementalLoadResultStart { .. } |
QueryBlockedStart { .. } => true,

QueryEnd { .. } |
GenericActivityEnd { .. } |
QueryCacheHit { .. } |
QueryCount { .. } |
IncrementalLoadResultEnd { .. } |
QueryBlockedEnd { .. } => false,
}
}
}
Expand All @@ -57,12 +69,7 @@ impl CategoryResultData {
}

fn total_time(&self) -> u64 {
let mut total = 0;
for (_, time) in &self.query_times {
total += time;
}

total
self.query_times.iter().map(|(_, time)| time).sum()
}

fn total_cache_data(&self) -> (u64, u64) {
Expand Down Expand Up @@ -121,13 +128,7 @@ impl CalculatedResults {
}

fn total_time(&self) -> u64 {
let mut total = 0;

for (_, data) in &self.categories {
total += data.total_time();
}

total
self.categories.iter().map(|(_, data)| data.total_time()).sum()
}

fn with_options(mut self, opts: &Options) -> CalculatedResults {
Expand Down Expand Up @@ -225,6 +226,40 @@ impl SelfProfiler {
})
}

#[inline]
pub fn incremental_load_result_start(&mut self, query_name: &'static str) {
self.record(ProfilerEvent::IncrementalLoadResultStart {
query_name,
time: Instant::now(),
})
}

#[inline]
pub fn incremental_load_result_end(&mut self, query_name: &'static str) {
self.record(ProfilerEvent::IncrementalLoadResultEnd {
query_name,
time: Instant::now(),
})
}

#[inline]
pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) {
self.record(ProfilerEvent::QueryBlockedStart {
query_name,
category,
time: Instant::now(),
})
}

#[inline]
pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) {
self.record(ProfilerEvent::QueryBlockedEnd {
query_name,
category,
time: Instant::now(),
})
}

#[inline]
fn record(&mut self, event: ProfilerEvent) {
let thread_id = std::thread::current().id();
Expand Down Expand Up @@ -317,6 +352,10 @@ impl SelfProfiler {
result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
*totals += *count as u64;
},
//we don't summarize incremental load result events in the simple output mode
IncrementalLoadResultStart { .. } | IncrementalLoadResultEnd { .. } => { },
//we don't summarize parallel query blocking in the simple output mode
QueryBlockedStart { .. } | QueryBlockedEnd { .. } => { },
}
}

Expand Down Expand Up @@ -361,9 +400,9 @@ impl SelfProfiler {
.unwrap();

let mut categories: Vec<_> = results.categories.iter().collect();
categories.sort_by(|(_, data1), (_, data2)| data2.total_time().cmp(&data1.total_time()));
categories.sort_by_cached_key(|(_, d)| d.total_time());

for (category, data) in categories {
for (category, data) in categories.iter().rev() {
let (category_hits, category_total) = data.total_cache_data();
let category_hit_percent = calculate_percent(category_hits, category_total);

Expand Down

0 comments on commit f9c9512

Please sign in to comment.