Skip to content

Commit 777bb2f

Browse files
committed
Auto merge of rust-lang#84806 - Mark-Simulacrum:try-start-entry, r=cjgillot
Streamline try_start code This shifts some branches around and avoids interleaving parallel and non-parallel versions of the function too much.
2 parents 676ee14 + 981135a commit 777bb2f

File tree

4 files changed

+111
-101
lines changed

4 files changed

+111
-101
lines changed

compiler/rustc_query_system/src/query/caches.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ pub trait CacheSelector<K, V> {
1414
type Cache;
1515
}
1616

17-
pub trait QueryStorage: Default {
17+
pub trait QueryStorage {
1818
type Value: Debug;
1919
type Stored: Clone;
2020

@@ -23,7 +23,7 @@ pub trait QueryStorage: Default {
2323
fn store_nocache(&self, value: Self::Value) -> Self::Stored;
2424
}
2525

26-
pub trait QueryCache: QueryStorage {
26+
pub trait QueryCache: QueryStorage + Sized {
2727
type Key: Hash + Eq + Clone + Debug;
2828
type Sharded: Default;
2929

compiler/rustc_query_system/src/query/config.rs

-4
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,6 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
5252
(self.hash_result)(hcx, value)
5353
}
5454

55-
pub(crate) fn handle_cycle_error(&self, tcx: CTX, diag: DiagnosticBuilder<'_>) -> V {
56-
(self.handle_cycle_error)(tcx, diag)
57-
}
58-
5955
pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool {
6056
(self.cache_on_disk)(tcx, key, value)
6157
}

compiler/rustc_query_system/src/query/job.rs

+3-18
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ use rustc_span::Span;
99

1010
use std::convert::TryFrom;
1111
use std::hash::Hash;
12-
use std::marker::PhantomData;
1312
use std::num::NonZeroU32;
1413

1514
#[cfg(parallel_compiler)]
@@ -100,8 +99,6 @@ pub struct QueryJob<D> {
10099
/// The latch that is used to wait on this job.
101100
#[cfg(parallel_compiler)]
102101
latch: Option<QueryLatch<D>>,
103-
104-
dummy: PhantomData<QueryLatch<D>>,
105102
}
106103

107104
impl<D> QueryJob<D>
@@ -116,23 +113,17 @@ where
116113
parent,
117114
#[cfg(parallel_compiler)]
118115
latch: None,
119-
dummy: PhantomData,
120116
}
121117
}
122118

123119
#[cfg(parallel_compiler)]
124-
pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
120+
pub(super) fn latch(&mut self) -> QueryLatch<D> {
125121
if self.latch.is_none() {
126122
self.latch = Some(QueryLatch::new());
127123
}
128124
self.latch.as_ref().unwrap().clone()
129125
}
130126

131-
#[cfg(not(parallel_compiler))]
132-
pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
133-
QueryLatch { id }
134-
}
135-
136127
/// Signals to waiters that the query is complete.
137128
///
138129
/// This does nothing for single threaded rustc,
@@ -148,13 +139,7 @@ where
148139
}
149140

150141
#[cfg(not(parallel_compiler))]
151-
#[derive(Clone)]
152-
pub(super) struct QueryLatch<D> {
153-
id: QueryJobId<D>,
154-
}
155-
156-
#[cfg(not(parallel_compiler))]
157-
impl<D> QueryLatch<D>
142+
impl<D> QueryJobId<D>
158143
where
159144
D: Copy + Clone + Eq + Hash,
160145
{
@@ -172,7 +157,7 @@ where
172157
let info = query_map.get(&job).unwrap();
173158
cycle.push(info.info.clone());
174159

175-
if job == self.id {
160+
if job == *self {
176161
cycle.reverse();
177162

178163
// This is the end of the cycle

compiler/rustc_query_system/src/query/plumbing.rs

+106-77
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@ use crate::query::job::{
1111
};
1212
use crate::query::{QueryContext, QueryMap, QueryStackFrame};
1313

14-
#[cfg(not(parallel_compiler))]
15-
use rustc_data_structures::cold_path;
1614
use rustc_data_structures::fingerprint::Fingerprint;
1715
use rustc_data_structures::fx::{FxHashMap, FxHasher};
1816
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
1917
use rustc_data_structures::sync::{Lock, LockGuard};
2018
use rustc_data_structures::thin_vec::ThinVec;
19+
#[cfg(not(parallel_compiler))]
20+
use rustc_errors::DiagnosticBuilder;
2121
use rustc_errors::{Diagnostic, FatalError};
2222
use rustc_span::Span;
2323
use std::collections::hash_map::Entry;
@@ -36,7 +36,7 @@ pub struct QueryCacheStore<C: QueryCache> {
3636
pub cache_hits: AtomicUsize,
3737
}
3838

39-
impl<C: QueryCache> Default for QueryCacheStore<C> {
39+
impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
4040
fn default() -> Self {
4141
Self {
4242
cache: C::default(),
@@ -158,6 +158,31 @@ where
158158
id: QueryJobId<D>,
159159
}
160160

161+
#[cold]
162+
#[inline(never)]
163+
#[cfg(not(parallel_compiler))]
164+
fn mk_cycle<CTX, V, R>(
165+
tcx: CTX,
166+
root: QueryJobId<CTX::DepKind>,
167+
span: Span,
168+
handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
169+
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
170+
) -> R
171+
where
172+
CTX: QueryContext,
173+
V: std::fmt::Debug,
174+
R: Clone,
175+
{
176+
let error: CycleError = root.find_cycle_in_stack(
177+
tcx.try_collect_active_jobs().unwrap(),
178+
&tcx.current_query_job(),
179+
span,
180+
);
181+
let error = report_cycle(tcx.dep_context().sess(), error);
182+
let value = handle_cycle_error(tcx, error);
183+
cache.store_nocache(value)
184+
}
185+
161186
impl<'tcx, D, C> JobOwner<'tcx, D, C>
162187
where
163188
D: Copy + Clone + Eq + Hash,
@@ -177,7 +202,7 @@ where
177202
state: &'b QueryState<CTX::DepKind, C::Key>,
178203
cache: &'b QueryCacheStore<C>,
179204
span: Span,
180-
key: &C::Key,
205+
key: C::Key,
181206
lookup: QueryLookup,
182207
query: &QueryVtable<CTX, C::Key, C::Value>,
183208
) -> TryGetJob<'b, CTX::DepKind, C>
@@ -188,94 +213,86 @@ where
188213
let mut state_lock = state.shards.get_shard_by_index(shard).lock();
189214
let lock = &mut *state_lock;
190215

191-
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
192-
Entry::Occupied(mut entry) => {
193-
match entry.get_mut() {
194-
QueryResult::Started(job) => {
195-
// For parallel queries, we'll block and wait until the query running
196-
// in another thread has completed. Record how long we wait in the
197-
// self-profiler.
198-
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
199-
Some(tcx.dep_context().profiler().query_blocked())
200-
} else {
201-
None
202-
};
203-
204-
// Create the id of the job we're waiting for
205-
let id = QueryJobId::new(job.id, shard, query.dep_kind);
206-
207-
(job.latch(id), _query_blocked_prof_timer)
208-
}
209-
QueryResult::Poisoned => FatalError.raise(),
210-
}
211-
}
216+
match lock.active.entry(key) {
212217
Entry::Vacant(entry) => {
213-
// No job entry for this query. Return a new one to be started later.
214-
215218
// Generate an id unique within this shard.
216219
let id = lock.jobs.checked_add(1).unwrap();
217220
lock.jobs = id;
218221
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
219222

220-
let global_id = QueryJobId::new(id, shard, query.dep_kind);
221-
222223
let job = tcx.current_query_job();
223224
let job = QueryJob::new(id, span, job);
224225

226+
let key = entry.key().clone();
225227
entry.insert(QueryResult::Started(job));
226228

227-
let owner = JobOwner { state, cache, id: global_id, key: (*key).clone() };
229+
let global_id = QueryJobId::new(id, shard, query.dep_kind);
230+
let owner = JobOwner { state, cache, id: global_id, key };
228231
return TryGetJob::NotYetStarted(owner);
229232
}
230-
};
231-
mem::drop(state_lock);
232-
233-
// If we are single-threaded we know that we have cycle error,
234-
// so we just return the error.
235-
#[cfg(not(parallel_compiler))]
236-
return TryGetJob::Cycle(cold_path(|| {
237-
let error: CycleError = latch.find_cycle_in_stack(
238-
tcx.try_collect_active_jobs().unwrap(),
239-
&tcx.current_query_job(),
240-
span,
241-
);
242-
let error = report_cycle(tcx.dep_context().sess(), error);
243-
let value = query.handle_cycle_error(tcx, error);
244-
cache.cache.store_nocache(value)
245-
}));
246-
247-
// With parallel queries we might just have to wait on some other
248-
// thread.
249-
#[cfg(parallel_compiler)]
250-
{
251-
let result = latch.wait_on(tcx.current_query_job(), span);
252-
253-
if let Err(cycle) = result {
254-
let cycle = report_cycle(tcx.dep_context().sess(), cycle);
255-
let value = query.handle_cycle_error(tcx, cycle);
256-
let value = cache.cache.store_nocache(value);
257-
return TryGetJob::Cycle(value);
258-
}
233+
Entry::Occupied(mut entry) => {
234+
match entry.get_mut() {
235+
#[cfg(not(parallel_compiler))]
236+
QueryResult::Started(job) => {
237+
let id = QueryJobId::new(job.id, shard, query.dep_kind);
259238

260-
let cached = cache
261-
.cache
262-
.lookup(cache, &key, |value, index| {
263-
if unlikely!(tcx.dep_context().profiler().enabled()) {
264-
tcx.dep_context().profiler().query_cache_hit(index.into());
239+
drop(state_lock);
240+
241+
// If we are single-threaded we know that we have cycle error,
242+
// so we just return the error.
243+
return TryGetJob::Cycle(mk_cycle(
244+
tcx,
245+
id,
246+
span,
247+
query.handle_cycle_error,
248+
&cache.cache,
249+
));
265250
}
266-
#[cfg(debug_assertions)]
267-
{
268-
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
251+
#[cfg(parallel_compiler)]
252+
QueryResult::Started(job) => {
253+
// For parallel queries, we'll block and wait until the query running
254+
// in another thread has completed. Record how long we wait in the
255+
// self-profiler.
256+
let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
257+
258+
// Get the latch out
259+
let latch = job.latch();
260+
let key = entry.key().clone();
261+
262+
drop(state_lock);
263+
264+
// With parallel queries we might just have to wait on some other
265+
// thread.
266+
let result = latch.wait_on(tcx.current_query_job(), span);
267+
268+
if let Err(cycle) = result {
269+
let cycle = report_cycle(tcx.dep_context().sess(), cycle);
270+
let value = (query.handle_cycle_error)(tcx, cycle);
271+
let value = cache.cache.store_nocache(value);
272+
return TryGetJob::Cycle(value);
273+
}
274+
275+
let cached = cache
276+
.cache
277+
.lookup(cache, &key, |value, index| {
278+
if unlikely!(tcx.dep_context().profiler().enabled()) {
279+
tcx.dep_context().profiler().query_cache_hit(index.into());
280+
}
281+
#[cfg(debug_assertions)]
282+
{
283+
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
284+
}
285+
(value.clone(), index)
286+
})
287+
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
288+
289+
query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
290+
291+
return TryGetJob::JobCompleted(cached);
269292
}
270-
(value.clone(), index)
271-
})
272-
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
273-
274-
if let Some(prof_timer) = _query_blocked_prof_timer.take() {
275-
prof_timer.finish_with_query_invocation_id(cached.1.into());
293+
QueryResult::Poisoned => FatalError.raise(),
294+
}
276295
}
277-
278-
return TryGetJob::JobCompleted(cached);
279296
}
280297
}
281298

@@ -418,7 +435,13 @@ where
418435
CTX: QueryContext,
419436
{
420437
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
421-
tcx, state, cache, span, &key, lookup, query,
438+
tcx,
439+
state,
440+
cache,
441+
span,
442+
key.clone(),
443+
lookup,
444+
query,
422445
) {
423446
TryGetJob::NotYetStarted(job) => job,
424447
TryGetJob::Cycle(result) => return result,
@@ -741,7 +764,13 @@ fn force_query_impl<CTX, C>(
741764
};
742765

743766
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
744-
tcx, state, cache, span, &key, lookup, query,
767+
tcx,
768+
state,
769+
cache,
770+
span,
771+
key.clone(),
772+
lookup,
773+
query,
745774
) {
746775
TryGetJob::NotYetStarted(job) => job,
747776
TryGetJob::Cycle(_) => return,

0 commit comments

Comments
 (0)