Skip to content

Commit 981135a

Browse files
Streamline try_start code
This shifts some branches around and avoids interleaving parallel and non-parallel versions of the function too much.
1 parent 1c2c6b6 commit 981135a

File tree

4 files changed

+111
-101
lines changed

4 files changed

+111
-101
lines changed

compiler/rustc_query_system/src/query/caches.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ pub trait CacheSelector<K, V> {
1414
type Cache;
1515
}
1616

17-
pub trait QueryStorage: Default {
17+
pub trait QueryStorage {
1818
type Value: Debug;
1919
type Stored: Clone;
2020

@@ -23,7 +23,7 @@ pub trait QueryStorage: Default {
2323
fn store_nocache(&self, value: Self::Value) -> Self::Stored;
2424
}
2525

26-
pub trait QueryCache: QueryStorage {
26+
pub trait QueryCache: QueryStorage + Sized {
2727
type Key: Hash + Eq + Clone + Debug;
2828
type Sharded: Default;
2929

compiler/rustc_query_system/src/query/config.rs

-4
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,6 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
5252
(self.hash_result)(hcx, value)
5353
}
5454

55-
pub(crate) fn handle_cycle_error(&self, tcx: CTX, diag: DiagnosticBuilder<'_>) -> V {
56-
(self.handle_cycle_error)(tcx, diag)
57-
}
58-
5955
pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool {
6056
(self.cache_on_disk)(tcx, key, value)
6157
}

compiler/rustc_query_system/src/query/job.rs

+3-18
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ use rustc_span::Span;
99

1010
use std::convert::TryFrom;
1111
use std::hash::Hash;
12-
use std::marker::PhantomData;
1312
use std::num::NonZeroU32;
1413

1514
#[cfg(parallel_compiler)]
@@ -100,8 +99,6 @@ pub struct QueryJob<D> {
10099
/// The latch that is used to wait on this job.
101100
#[cfg(parallel_compiler)]
102101
latch: Option<QueryLatch<D>>,
103-
104-
dummy: PhantomData<QueryLatch<D>>,
105102
}
106103

107104
impl<D> QueryJob<D>
@@ -116,23 +113,17 @@ where
116113
parent,
117114
#[cfg(parallel_compiler)]
118115
latch: None,
119-
dummy: PhantomData,
120116
}
121117
}
122118

123119
#[cfg(parallel_compiler)]
124-
pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
120+
pub(super) fn latch(&mut self) -> QueryLatch<D> {
125121
if self.latch.is_none() {
126122
self.latch = Some(QueryLatch::new());
127123
}
128124
self.latch.as_ref().unwrap().clone()
129125
}
130126

131-
#[cfg(not(parallel_compiler))]
132-
pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
133-
QueryLatch { id }
134-
}
135-
136127
/// Signals to waiters that the query is complete.
137128
///
138129
/// This does nothing for single threaded rustc,
@@ -148,13 +139,7 @@ where
148139
}
149140

150141
#[cfg(not(parallel_compiler))]
151-
#[derive(Clone)]
152-
pub(super) struct QueryLatch<D> {
153-
id: QueryJobId<D>,
154-
}
155-
156-
#[cfg(not(parallel_compiler))]
157-
impl<D> QueryLatch<D>
142+
impl<D> QueryJobId<D>
158143
where
159144
D: Copy + Clone + Eq + Hash,
160145
{
@@ -172,7 +157,7 @@ where
172157
let info = query_map.get(&job).unwrap();
173158
cycle.push(info.info.clone());
174159

175-
if job == self.id {
160+
if job == *self {
176161
cycle.reverse();
177162

178163
// This is the end of the cycle

compiler/rustc_query_system/src/query/plumbing.rs

+106-77
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@ use crate::query::job::{
1111
};
1212
use crate::query::{QueryContext, QueryMap, QueryStackFrame};
1313

14-
#[cfg(not(parallel_compiler))]
15-
use rustc_data_structures::cold_path;
1614
use rustc_data_structures::fingerprint::Fingerprint;
1715
use rustc_data_structures::fx::{FxHashMap, FxHasher};
1816
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
1917
use rustc_data_structures::sync::{Lock, LockGuard};
2018
use rustc_data_structures::thin_vec::ThinVec;
19+
#[cfg(not(parallel_compiler))]
20+
use rustc_errors::DiagnosticBuilder;
2121
use rustc_errors::{Diagnostic, FatalError};
2222
use rustc_span::Span;
2323
use std::collections::hash_map::Entry;
@@ -36,7 +36,7 @@ pub struct QueryCacheStore<C: QueryCache> {
3636
pub cache_hits: AtomicUsize,
3737
}
3838

39-
impl<C: QueryCache> Default for QueryCacheStore<C> {
39+
impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
4040
fn default() -> Self {
4141
Self {
4242
cache: C::default(),
@@ -161,6 +161,31 @@ where
161161
id: QueryJobId<D>,
162162
}
163163

164+
#[cold]
165+
#[inline(never)]
166+
#[cfg(not(parallel_compiler))]
167+
fn mk_cycle<CTX, V, R>(
168+
tcx: CTX,
169+
root: QueryJobId<CTX::DepKind>,
170+
span: Span,
171+
handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
172+
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
173+
) -> R
174+
where
175+
CTX: QueryContext,
176+
V: std::fmt::Debug,
177+
R: Clone,
178+
{
179+
let error: CycleError = root.find_cycle_in_stack(
180+
tcx.try_collect_active_jobs().unwrap(),
181+
&tcx.current_query_job(),
182+
span,
183+
);
184+
let error = report_cycle(tcx.dep_context().sess(), error);
185+
let value = handle_cycle_error(tcx, error);
186+
cache.store_nocache(value)
187+
}
188+
164189
impl<'tcx, D, C> JobOwner<'tcx, D, C>
165190
where
166191
D: Copy + Clone + Eq + Hash,
@@ -180,7 +205,7 @@ where
180205
state: &'b QueryState<CTX::DepKind, C::Key>,
181206
cache: &'b QueryCacheStore<C>,
182207
span: Span,
183-
key: &C::Key,
208+
key: C::Key,
184209
lookup: QueryLookup,
185210
query: &QueryVtable<CTX, C::Key, C::Value>,
186211
) -> TryGetJob<'b, CTX::DepKind, C>
@@ -191,94 +216,86 @@ where
191216
let mut state_lock = state.shards.get_shard_by_index(shard).lock();
192217
let lock = &mut *state_lock;
193218

194-
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
195-
Entry::Occupied(mut entry) => {
196-
match entry.get_mut() {
197-
QueryResult::Started(job) => {
198-
// For parallel queries, we'll block and wait until the query running
199-
// in another thread has completed. Record how long we wait in the
200-
// self-profiler.
201-
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
202-
Some(tcx.dep_context().profiler().query_blocked())
203-
} else {
204-
None
205-
};
206-
207-
// Create the id of the job we're waiting for
208-
let id = QueryJobId::new(job.id, shard, query.dep_kind);
209-
210-
(job.latch(id), _query_blocked_prof_timer)
211-
}
212-
QueryResult::Poisoned => FatalError.raise(),
213-
}
214-
}
219+
match lock.active.entry(key) {
215220
Entry::Vacant(entry) => {
216-
// No job entry for this query. Return a new one to be started later.
217-
218221
// Generate an id unique within this shard.
219222
let id = lock.jobs.checked_add(1).unwrap();
220223
lock.jobs = id;
221224
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
222225

223-
let global_id = QueryJobId::new(id, shard, query.dep_kind);
224-
225226
let job = tcx.current_query_job();
226227
let job = QueryJob::new(id, span, job);
227228

229+
let key = entry.key().clone();
228230
entry.insert(QueryResult::Started(job));
229231

230-
let owner = JobOwner { state, cache, id: global_id, key: (*key).clone() };
232+
let global_id = QueryJobId::new(id, shard, query.dep_kind);
233+
let owner = JobOwner { state, cache, id: global_id, key };
231234
return TryGetJob::NotYetStarted(owner);
232235
}
233-
};
234-
mem::drop(state_lock);
235-
236-
// If we are single-threaded we know that we have cycle error,
237-
// so we just return the error.
238-
#[cfg(not(parallel_compiler))]
239-
return TryGetJob::Cycle(cold_path(|| {
240-
let error: CycleError = latch.find_cycle_in_stack(
241-
tcx.try_collect_active_jobs().unwrap(),
242-
&tcx.current_query_job(),
243-
span,
244-
);
245-
let error = report_cycle(tcx.dep_context().sess(), error);
246-
let value = query.handle_cycle_error(tcx, error);
247-
cache.cache.store_nocache(value)
248-
}));
249-
250-
// With parallel queries we might just have to wait on some other
251-
// thread.
252-
#[cfg(parallel_compiler)]
253-
{
254-
let result = latch.wait_on(tcx.current_query_job(), span);
255-
256-
if let Err(cycle) = result {
257-
let cycle = report_cycle(tcx.dep_context().sess(), cycle);
258-
let value = query.handle_cycle_error(tcx, cycle);
259-
let value = cache.cache.store_nocache(value);
260-
return TryGetJob::Cycle(value);
261-
}
236+
Entry::Occupied(mut entry) => {
237+
match entry.get_mut() {
238+
#[cfg(not(parallel_compiler))]
239+
QueryResult::Started(job) => {
240+
let id = QueryJobId::new(job.id, shard, query.dep_kind);
262241

263-
let cached = cache
264-
.cache
265-
.lookup(cache, &key, |value, index| {
266-
if unlikely!(tcx.dep_context().profiler().enabled()) {
267-
tcx.dep_context().profiler().query_cache_hit(index.into());
242+
drop(state_lock);
243+
244+
// If we are single-threaded we know that we have cycle error,
245+
// so we just return the error.
246+
return TryGetJob::Cycle(mk_cycle(
247+
tcx,
248+
id,
249+
span,
250+
query.handle_cycle_error,
251+
&cache.cache,
252+
));
268253
}
269-
#[cfg(debug_assertions)]
270-
{
271-
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
254+
#[cfg(parallel_compiler)]
255+
QueryResult::Started(job) => {
256+
// For parallel queries, we'll block and wait until the query running
257+
// in another thread has completed. Record how long we wait in the
258+
// self-profiler.
259+
let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
260+
261+
// Get the latch out
262+
let latch = job.latch();
263+
let key = entry.key().clone();
264+
265+
drop(state_lock);
266+
267+
// With parallel queries we might just have to wait on some other
268+
// thread.
269+
let result = latch.wait_on(tcx.current_query_job(), span);
270+
271+
if let Err(cycle) = result {
272+
let cycle = report_cycle(tcx.dep_context().sess(), cycle);
273+
let value = (query.handle_cycle_error)(tcx, cycle);
274+
let value = cache.cache.store_nocache(value);
275+
return TryGetJob::Cycle(value);
276+
}
277+
278+
let cached = cache
279+
.cache
280+
.lookup(cache, &key, |value, index| {
281+
if unlikely!(tcx.dep_context().profiler().enabled()) {
282+
tcx.dep_context().profiler().query_cache_hit(index.into());
283+
}
284+
#[cfg(debug_assertions)]
285+
{
286+
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
287+
}
288+
(value.clone(), index)
289+
})
290+
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
291+
292+
query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
293+
294+
return TryGetJob::JobCompleted(cached);
272295
}
273-
(value.clone(), index)
274-
})
275-
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
276-
277-
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
278-
prof_timer.finish_with_query_invocation_id(cached.1.into());
296+
QueryResult::Poisoned => FatalError.raise(),
297+
}
279298
}
280-
281-
return TryGetJob::JobCompleted(cached);
282299
}
283300
}
284301

@@ -421,7 +438,13 @@ where
421438
CTX: QueryContext,
422439
{
423440
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
424-
tcx, state, cache, span, &key, lookup, query,
441+
tcx,
442+
state,
443+
cache,
444+
span,
445+
key.clone(),
446+
lookup,
447+
query,
425448
) {
426449
TryGetJob::NotYetStarted(job) => job,
427450
TryGetJob::Cycle(result) => return result,
@@ -744,7 +767,13 @@ fn force_query_impl<CTX, C>(
744767
};
745768

746769
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
747-
tcx, state, cache, span, &key, lookup, query,
770+
tcx,
771+
state,
772+
cache,
773+
span,
774+
key.clone(),
775+
lookup,
776+
query,
748777
) {
749778
TryGetJob::NotYetStarted(job) => job,
750779
TryGetJob::Cycle(_) => return,

0 commit comments

Comments
 (0)