diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index f63fbd79825db..f1408400dee28 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -124,7 +124,15 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { let job = match lock.active.entry((*key).clone()) { Entry::Occupied(entry) => { match *entry.get() { - QueryResult::Started(ref job) => job.clone(), + QueryResult::Started(ref job) => { + //For parallel queries, we'll block and wait until the query running + //in another thread has completed. Record how long we wait in the + //self-profiler + #[cfg(parallel_compiler)] + tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY)); + + job.clone() + }, QueryResult::Poisoned => FatalError.raise(), } } @@ -160,7 +168,10 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { // thread #[cfg(parallel_compiler)] { - if let Err(cycle) = job.r#await(tcx, span) { + let result = job.r#await(tcx, span); + tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY)); + + if let Err(cycle) = result { return TryGetJob::JobCompleted(Err(cycle)); } } @@ -436,7 +447,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // First we try to load the result from the on-disk cache let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) && self.sess.opts.debugging_opts.incremental_queries { + self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME)); let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index); + self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME)); // We always expect to find a cached result for things that // can be forced from DepNode. diff --git a/src/librustc/util/profiling.rs b/src/librustc/util/profiling.rs index f8fa01b639500..c90bd12a3100f 100644 --- a/src/librustc/util/profiling.rs +++ b/src/librustc/util/profiling.rs @@ -25,6 +25,10 @@ pub enum ProfilerEvent { GenericActivityEnd { category: ProfileCategory, time: Instant }, QueryCacheHit { query_name: &'static str, category: ProfileCategory }, QueryCount { query_name: &'static str, category: ProfileCategory, count: usize }, + IncrementalLoadResultStart { query_name: &'static str, time: Instant }, + IncrementalLoadResultEnd { query_name: &'static str, time: Instant }, + QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: Instant }, + QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: Instant }, } impl ProfilerEvent { @@ -32,9 +36,17 @@ impl ProfilerEvent { use self::ProfilerEvent::*; match self { - QueryStart { .. } | GenericActivityStart { .. } => true, - QueryEnd { .. } | GenericActivityEnd { .. } | - QueryCacheHit { .. } | QueryCount { .. } => false, + QueryStart { .. } | + GenericActivityStart { .. } | + IncrementalLoadResultStart { .. } | + QueryBlockedStart { .. } => true, + + QueryEnd { .. } | + GenericActivityEnd { .. } | + QueryCacheHit { .. } | + QueryCount { .. } | + IncrementalLoadResultEnd { .. } | + QueryBlockedEnd { .. } => false, } } } @@ -57,12 +69,7 @@ impl CategoryResultData { } fn total_time(&self) -> u64 { - let mut total = 0; - for (_, time) in &self.query_times { - total += time; - } - - total + self.query_times.iter().map(|(_, time)| time).sum() } fn total_cache_data(&self) -> (u64, u64) { @@ -121,13 +128,7 @@ impl CalculatedResults { } fn total_time(&self) -> u64 { - let mut total = 0; - - for (_, data) in &self.categories { - total += data.total_time(); - } - - total + self.categories.iter().map(|(_, data)| data.total_time()).sum() } fn with_options(mut self, opts: &Options) -> CalculatedResults { @@ -225,6 +226,40 @@ impl SelfProfiler { }) } + #[inline] + pub fn incremental_load_result_start(&mut self, query_name: &'static str) { + self.record(ProfilerEvent::IncrementalLoadResultStart { + query_name, + time: Instant::now(), + }) + } + + #[inline] + pub fn incremental_load_result_end(&mut self, query_name: &'static str) { + self.record(ProfilerEvent::IncrementalLoadResultEnd { + query_name, + time: Instant::now(), + }) + } + + #[inline] + pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryBlockedStart { + query_name, + category, + time: Instant::now(), + }) + } + + #[inline] + pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryBlockedEnd { + query_name, + category, + time: Instant::now(), + }) + } + #[inline] fn record(&mut self, event: ProfilerEvent) { let thread_id = std::thread::current().id(); @@ -317,6 +352,10 @@ impl SelfProfiler { result_data.query_cache_stats.entry(query_name).or_insert((0, 0)); *totals += *count as u64; }, + //we don't summarize incremental load result events in the simple output mode + IncrementalLoadResultStart { .. } | IncrementalLoadResultEnd { .. } => { }, + //we don't summarize parallel query blocking in the simple output mode + QueryBlockedStart { .. } | QueryBlockedEnd { .. } => { }, } } @@ -361,9 +400,9 @@ impl SelfProfiler { .unwrap(); let mut categories: Vec<_> = results.categories.iter().collect(); - categories.sort_by(|(_, data1), (_, data2)| data2.total_time().cmp(&data1.total_time())); + categories.sort_by_cached_key(|(_, d)| d.total_time()); - for (category, data) in categories { + for (category, data) in categories.iter().rev() { let (category_hits, category_total) = data.total_cache_data(); let category_hit_percent = calculate_percent(category_hits, category_total);