@@ -11,13 +11,13 @@ use crate::query::job::{
11
11
} ;
12
12
use crate :: query:: { QueryContext , QueryMap , QueryStackFrame } ;
13
13
14
- #[ cfg( not( parallel_compiler) ) ]
15
- use rustc_data_structures:: cold_path;
16
14
use rustc_data_structures:: fingerprint:: Fingerprint ;
17
15
use rustc_data_structures:: fx:: { FxHashMap , FxHasher } ;
18
16
use rustc_data_structures:: sharded:: { get_shard_index_by_hash, Sharded } ;
19
17
use rustc_data_structures:: sync:: { Lock , LockGuard } ;
20
18
use rustc_data_structures:: thin_vec:: ThinVec ;
19
+ #[ cfg( not( parallel_compiler) ) ]
20
+ use rustc_errors:: DiagnosticBuilder ;
21
21
use rustc_errors:: { Diagnostic , FatalError } ;
22
22
use rustc_span:: Span ;
23
23
use std:: collections:: hash_map:: Entry ;
@@ -36,7 +36,7 @@ pub struct QueryCacheStore<C: QueryCache> {
36
36
pub cache_hits : AtomicUsize ,
37
37
}
38
38
39
- impl < C : QueryCache > Default for QueryCacheStore < C > {
39
+ impl < C : QueryCache + Default > Default for QueryCacheStore < C > {
40
40
fn default ( ) -> Self {
41
41
Self {
42
42
cache : C :: default ( ) ,
@@ -161,6 +161,31 @@ where
161
161
id : QueryJobId < D > ,
162
162
}
163
163
164
+ #[ cold]
165
+ #[ inline( never) ]
166
+ #[ cfg( not( parallel_compiler) ) ]
167
+ fn mk_cycle < CTX , V , R > (
168
+ tcx : CTX ,
169
+ root : QueryJobId < CTX :: DepKind > ,
170
+ span : Span ,
171
+ handle_cycle_error : fn ( CTX , DiagnosticBuilder < ' _ > ) -> V ,
172
+ cache : & dyn crate :: query:: QueryStorage < Value = V , Stored = R > ,
173
+ ) -> R
174
+ where
175
+ CTX : QueryContext ,
176
+ V : std:: fmt:: Debug ,
177
+ R : Clone ,
178
+ {
179
+ let error: CycleError = root. find_cycle_in_stack (
180
+ tcx. try_collect_active_jobs ( ) . unwrap ( ) ,
181
+ & tcx. current_query_job ( ) ,
182
+ span,
183
+ ) ;
184
+ let error = report_cycle ( tcx. dep_context ( ) . sess ( ) , error) ;
185
+ let value = handle_cycle_error ( tcx, error) ;
186
+ cache. store_nocache ( value)
187
+ }
188
+
164
189
impl < ' tcx , D , C > JobOwner < ' tcx , D , C >
165
190
where
166
191
D : Copy + Clone + Eq + Hash ,
@@ -180,7 +205,7 @@ where
180
205
state : & ' b QueryState < CTX :: DepKind , C :: Key > ,
181
206
cache : & ' b QueryCacheStore < C > ,
182
207
span : Span ,
183
- key : & C :: Key ,
208
+ key : C :: Key ,
184
209
lookup : QueryLookup ,
185
210
query : & QueryVtable < CTX , C :: Key , C :: Value > ,
186
211
) -> TryGetJob < ' b , CTX :: DepKind , C >
@@ -191,94 +216,86 @@ where
191
216
let mut state_lock = state. shards . get_shard_by_index ( shard) . lock ( ) ;
192
217
let lock = & mut * state_lock;
193
218
194
- let ( latch, mut _query_blocked_prof_timer) = match lock. active . entry ( ( * key) . clone ( ) ) {
195
- Entry :: Occupied ( mut entry) => {
196
- match entry. get_mut ( ) {
197
- QueryResult :: Started ( job) => {
198
- // For parallel queries, we'll block and wait until the query running
199
- // in another thread has completed. Record how long we wait in the
200
- // self-profiler.
201
- let _query_blocked_prof_timer = if cfg ! ( parallel_compiler) {
202
- Some ( tcx. dep_context ( ) . profiler ( ) . query_blocked ( ) )
203
- } else {
204
- None
205
- } ;
206
-
207
- // Create the id of the job we're waiting for
208
- let id = QueryJobId :: new ( job. id , shard, query. dep_kind ) ;
209
-
210
- ( job. latch ( id) , _query_blocked_prof_timer)
211
- }
212
- QueryResult :: Poisoned => FatalError . raise ( ) ,
213
- }
214
- }
219
+ match lock. active . entry ( key) {
215
220
Entry :: Vacant ( entry) => {
216
- // No job entry for this query. Return a new one to be started later.
217
-
218
221
// Generate an id unique within this shard.
219
222
let id = lock. jobs . checked_add ( 1 ) . unwrap ( ) ;
220
223
lock. jobs = id;
221
224
let id = QueryShardJobId ( NonZeroU32 :: new ( id) . unwrap ( ) ) ;
222
225
223
- let global_id = QueryJobId :: new ( id, shard, query. dep_kind ) ;
224
-
225
226
let job = tcx. current_query_job ( ) ;
226
227
let job = QueryJob :: new ( id, span, job) ;
227
228
229
+ let key = entry. key ( ) . clone ( ) ;
228
230
entry. insert ( QueryResult :: Started ( job) ) ;
229
231
230
- let owner = JobOwner { state, cache, id : global_id, key : ( * key) . clone ( ) } ;
232
+ let global_id = QueryJobId :: new ( id, shard, query. dep_kind ) ;
233
+ let owner = JobOwner { state, cache, id : global_id, key } ;
231
234
return TryGetJob :: NotYetStarted ( owner) ;
232
235
}
233
- } ;
234
- mem:: drop ( state_lock) ;
235
-
236
- // If we are single-threaded we know that we have cycle error,
237
- // so we just return the error.
238
- #[ cfg( not( parallel_compiler) ) ]
239
- return TryGetJob :: Cycle ( cold_path ( || {
240
- let error: CycleError = latch. find_cycle_in_stack (
241
- tcx. try_collect_active_jobs ( ) . unwrap ( ) ,
242
- & tcx. current_query_job ( ) ,
243
- span,
244
- ) ;
245
- let error = report_cycle ( tcx. dep_context ( ) . sess ( ) , error) ;
246
- let value = query. handle_cycle_error ( tcx, error) ;
247
- cache. cache . store_nocache ( value)
248
- } ) ) ;
249
-
250
- // With parallel queries we might just have to wait on some other
251
- // thread.
252
- #[ cfg( parallel_compiler) ]
253
- {
254
- let result = latch. wait_on ( tcx. current_query_job ( ) , span) ;
255
-
256
- if let Err ( cycle) = result {
257
- let cycle = report_cycle ( tcx. dep_context ( ) . sess ( ) , cycle) ;
258
- let value = query. handle_cycle_error ( tcx, cycle) ;
259
- let value = cache. cache . store_nocache ( value) ;
260
- return TryGetJob :: Cycle ( value) ;
261
- }
236
+ Entry :: Occupied ( mut entry) => {
237
+ match entry. get_mut ( ) {
238
+ #[ cfg( not( parallel_compiler) ) ]
239
+ QueryResult :: Started ( job) => {
240
+ let id = QueryJobId :: new ( job. id , shard, query. dep_kind ) ;
262
241
263
- let cached = cache
264
- . cache
265
- . lookup ( cache, & key, |value, index| {
266
- if unlikely ! ( tcx. dep_context( ) . profiler( ) . enabled( ) ) {
267
- tcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
242
+ drop ( state_lock) ;
243
+
244
+ // If we are single-threaded we know that we have cycle error,
245
+ // so we just return the error.
246
+ return TryGetJob :: Cycle ( mk_cycle (
247
+ tcx,
248
+ id,
249
+ span,
250
+ query. handle_cycle_error ,
251
+ & cache. cache ,
252
+ ) ) ;
268
253
}
269
- #[ cfg( debug_assertions) ]
270
- {
271
- cache. cache_hits . fetch_add ( 1 , Ordering :: Relaxed ) ;
254
+ #[ cfg( parallel_compiler) ]
255
+ QueryResult :: Started ( job) => {
256
+ // For parallel queries, we'll block and wait until the query running
257
+ // in another thread has completed. Record how long we wait in the
258
+ // self-profiler.
259
+ let query_blocked_prof_timer = tcx. dep_context ( ) . profiler ( ) . query_blocked ( ) ;
260
+
261
+ // Get the latch out
262
+ let latch = job. latch ( ) ;
263
+ let key = entry. key ( ) . clone ( ) ;
264
+
265
+ drop ( state_lock) ;
266
+
267
+ // With parallel queries we might just have to wait on some other
268
+ // thread.
269
+ let result = latch. wait_on ( tcx. current_query_job ( ) , span) ;
270
+
271
+ if let Err ( cycle) = result {
272
+ let cycle = report_cycle ( tcx. dep_context ( ) . sess ( ) , cycle) ;
273
+ let value = ( query. handle_cycle_error ) ( tcx, cycle) ;
274
+ let value = cache. cache . store_nocache ( value) ;
275
+ return TryGetJob :: Cycle ( value) ;
276
+ }
277
+
278
+ let cached = cache
279
+ . cache
280
+ . lookup ( cache, & key, |value, index| {
281
+ if unlikely ! ( tcx. dep_context( ) . profiler( ) . enabled( ) ) {
282
+ tcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
283
+ }
284
+ #[ cfg( debug_assertions) ]
285
+ {
286
+ cache. cache_hits . fetch_add ( 1 , Ordering :: Relaxed ) ;
287
+ }
288
+ ( value. clone ( ) , index)
289
+ } )
290
+ . unwrap_or_else ( |_| panic ! ( "value must be in cache after waiting" ) ) ;
291
+
292
+ query_blocked_prof_timer. finish_with_query_invocation_id ( cached. 1 . into ( ) ) ;
293
+
294
+ return TryGetJob :: JobCompleted ( cached) ;
272
295
}
273
- ( value. clone ( ) , index)
274
- } )
275
- . unwrap_or_else ( |_| panic ! ( "value must be in cache after waiting" ) ) ;
276
-
277
- if let Some ( prof_timer) = _query_blocked_prof_timer. take ( ) {
278
- prof_timer. finish_with_query_invocation_id ( cached. 1 . into ( ) ) ;
296
+ QueryResult :: Poisoned => FatalError . raise ( ) ,
297
+ }
279
298
}
280
-
281
- return TryGetJob :: JobCompleted ( cached) ;
282
299
}
283
300
}
284
301
@@ -421,7 +438,13 @@ where
421
438
CTX : QueryContext ,
422
439
{
423
440
let job = match JobOwner :: < ' _ , CTX :: DepKind , C > :: try_start (
424
- tcx, state, cache, span, & key, lookup, query,
441
+ tcx,
442
+ state,
443
+ cache,
444
+ span,
445
+ key. clone ( ) ,
446
+ lookup,
447
+ query,
425
448
) {
426
449
TryGetJob :: NotYetStarted ( job) => job,
427
450
TryGetJob :: Cycle ( result) => return result,
@@ -744,7 +767,13 @@ fn force_query_impl<CTX, C>(
744
767
} ;
745
768
746
769
let job = match JobOwner :: < ' _ , CTX :: DepKind , C > :: try_start (
747
- tcx, state, cache, span, & key, lookup, query,
770
+ tcx,
771
+ state,
772
+ cache,
773
+ span,
774
+ key. clone ( ) ,
775
+ lookup,
776
+ query,
748
777
) {
749
778
TryGetJob :: NotYetStarted ( job) => job,
750
779
TryGetJob :: Cycle ( _) => return ,
0 commit comments