@@ -11,13 +11,13 @@ use crate::query::job::{
11
11
} ;
12
12
use crate :: query:: { QueryContext , QueryMap , QueryStackFrame } ;
13
13
14
- #[ cfg( not( parallel_compiler) ) ]
15
- use rustc_data_structures:: cold_path;
16
14
use rustc_data_structures:: fingerprint:: Fingerprint ;
17
15
use rustc_data_structures:: fx:: { FxHashMap , FxHasher } ;
18
16
use rustc_data_structures:: sharded:: { get_shard_index_by_hash, Sharded } ;
19
17
use rustc_data_structures:: sync:: { Lock , LockGuard } ;
20
18
use rustc_data_structures:: thin_vec:: ThinVec ;
19
+ #[ cfg( not( parallel_compiler) ) ]
20
+ use rustc_errors:: DiagnosticBuilder ;
21
21
use rustc_errors:: { Diagnostic , FatalError } ;
22
22
use rustc_span:: Span ;
23
23
use std:: collections:: hash_map:: Entry ;
@@ -36,7 +36,7 @@ pub struct QueryCacheStore<C: QueryCache> {
36
36
pub cache_hits : AtomicUsize ,
37
37
}
38
38
39
- impl < C : QueryCache > Default for QueryCacheStore < C > {
39
+ impl < C : QueryCache + Default > Default for QueryCacheStore < C > {
40
40
fn default ( ) -> Self {
41
41
Self {
42
42
cache : C :: default ( ) ,
@@ -158,6 +158,31 @@ where
158
158
id : QueryJobId < D > ,
159
159
}
160
160
161
+ #[ cold]
162
+ #[ inline( never) ]
163
+ #[ cfg( not( parallel_compiler) ) ]
164
+ fn mk_cycle < CTX , V , R > (
165
+ tcx : CTX ,
166
+ root : QueryJobId < CTX :: DepKind > ,
167
+ span : Span ,
168
+ handle_cycle_error : fn ( CTX , DiagnosticBuilder < ' _ > ) -> V ,
169
+ cache : & dyn crate :: query:: QueryStorage < Value = V , Stored = R > ,
170
+ ) -> R
171
+ where
172
+ CTX : QueryContext ,
173
+ V : std:: fmt:: Debug ,
174
+ R : Clone ,
175
+ {
176
+ let error: CycleError = root. find_cycle_in_stack (
177
+ tcx. try_collect_active_jobs ( ) . unwrap ( ) ,
178
+ & tcx. current_query_job ( ) ,
179
+ span,
180
+ ) ;
181
+ let error = report_cycle ( tcx. dep_context ( ) . sess ( ) , error) ;
182
+ let value = handle_cycle_error ( tcx, error) ;
183
+ cache. store_nocache ( value)
184
+ }
185
+
161
186
impl < ' tcx , D , C > JobOwner < ' tcx , D , C >
162
187
where
163
188
D : Copy + Clone + Eq + Hash ,
@@ -177,7 +202,7 @@ where
177
202
state : & ' b QueryState < CTX :: DepKind , C :: Key > ,
178
203
cache : & ' b QueryCacheStore < C > ,
179
204
span : Span ,
180
- key : & C :: Key ,
205
+ key : C :: Key ,
181
206
lookup : QueryLookup ,
182
207
query : & QueryVtable < CTX , C :: Key , C :: Value > ,
183
208
) -> TryGetJob < ' b , CTX :: DepKind , C >
@@ -188,94 +213,86 @@ where
188
213
let mut state_lock = state. shards . get_shard_by_index ( shard) . lock ( ) ;
189
214
let lock = & mut * state_lock;
190
215
191
- let ( latch, mut _query_blocked_prof_timer) = match lock. active . entry ( ( * key) . clone ( ) ) {
192
- Entry :: Occupied ( mut entry) => {
193
- match entry. get_mut ( ) {
194
- QueryResult :: Started ( job) => {
195
- // For parallel queries, we'll block and wait until the query running
196
- // in another thread has completed. Record how long we wait in the
197
- // self-profiler.
198
- let _query_blocked_prof_timer = if cfg ! ( parallel_compiler) {
199
- Some ( tcx. dep_context ( ) . profiler ( ) . query_blocked ( ) )
200
- } else {
201
- None
202
- } ;
203
-
204
- // Create the id of the job we're waiting for
205
- let id = QueryJobId :: new ( job. id , shard, query. dep_kind ) ;
206
-
207
- ( job. latch ( id) , _query_blocked_prof_timer)
208
- }
209
- QueryResult :: Poisoned => FatalError . raise ( ) ,
210
- }
211
- }
216
+ match lock. active . entry ( key) {
212
217
Entry :: Vacant ( entry) => {
213
- // No job entry for this query. Return a new one to be started later.
214
-
215
218
// Generate an id unique within this shard.
216
219
let id = lock. jobs . checked_add ( 1 ) . unwrap ( ) ;
217
220
lock. jobs = id;
218
221
let id = QueryShardJobId ( NonZeroU32 :: new ( id) . unwrap ( ) ) ;
219
222
220
- let global_id = QueryJobId :: new ( id, shard, query. dep_kind ) ;
221
-
222
223
let job = tcx. current_query_job ( ) ;
223
224
let job = QueryJob :: new ( id, span, job) ;
224
225
226
+ let key = entry. key ( ) . clone ( ) ;
225
227
entry. insert ( QueryResult :: Started ( job) ) ;
226
228
227
- let owner = JobOwner { state, cache, id : global_id, key : ( * key) . clone ( ) } ;
229
+ let global_id = QueryJobId :: new ( id, shard, query. dep_kind ) ;
230
+ let owner = JobOwner { state, cache, id : global_id, key } ;
228
231
return TryGetJob :: NotYetStarted ( owner) ;
229
232
}
230
- } ;
231
- mem:: drop ( state_lock) ;
232
-
233
- // If we are single-threaded we know that we have cycle error,
234
- // so we just return the error.
235
- #[ cfg( not( parallel_compiler) ) ]
236
- return TryGetJob :: Cycle ( cold_path ( || {
237
- let error: CycleError = latch. find_cycle_in_stack (
238
- tcx. try_collect_active_jobs ( ) . unwrap ( ) ,
239
- & tcx. current_query_job ( ) ,
240
- span,
241
- ) ;
242
- let error = report_cycle ( tcx. dep_context ( ) . sess ( ) , error) ;
243
- let value = query. handle_cycle_error ( tcx, error) ;
244
- cache. cache . store_nocache ( value)
245
- } ) ) ;
246
-
247
- // With parallel queries we might just have to wait on some other
248
- // thread.
249
- #[ cfg( parallel_compiler) ]
250
- {
251
- let result = latch. wait_on ( tcx. current_query_job ( ) , span) ;
252
-
253
- if let Err ( cycle) = result {
254
- let cycle = report_cycle ( tcx. dep_context ( ) . sess ( ) , cycle) ;
255
- let value = query. handle_cycle_error ( tcx, cycle) ;
256
- let value = cache. cache . store_nocache ( value) ;
257
- return TryGetJob :: Cycle ( value) ;
258
- }
233
+ Entry :: Occupied ( mut entry) => {
234
+ match entry. get_mut ( ) {
235
+ #[ cfg( not( parallel_compiler) ) ]
236
+ QueryResult :: Started ( job) => {
237
+ let id = QueryJobId :: new ( job. id , shard, query. dep_kind ) ;
259
238
260
- let cached = cache
261
- . cache
262
- . lookup ( cache, & key, |value, index| {
263
- if unlikely ! ( tcx. dep_context( ) . profiler( ) . enabled( ) ) {
264
- tcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
239
+ drop ( state_lock) ;
240
+
241
+ // If we are single-threaded we know that we have cycle error,
242
+ // so we just return the error.
243
+ return TryGetJob :: Cycle ( mk_cycle (
244
+ tcx,
245
+ id,
246
+ span,
247
+ query. handle_cycle_error ,
248
+ & cache. cache ,
249
+ ) ) ;
265
250
}
266
- #[ cfg( debug_assertions) ]
267
- {
268
- cache. cache_hits . fetch_add ( 1 , Ordering :: Relaxed ) ;
251
+ #[ cfg( parallel_compiler) ]
252
+ QueryResult :: Started ( job) => {
253
+ // For parallel queries, we'll block and wait until the query running
254
+ // in another thread has completed. Record how long we wait in the
255
+ // self-profiler.
256
+ let query_blocked_prof_timer = tcx. dep_context ( ) . profiler ( ) . query_blocked ( ) ;
257
+
258
+ // Get the latch out
259
+ let latch = job. latch ( ) ;
260
+ let key = entry. key ( ) . clone ( ) ;
261
+
262
+ drop ( state_lock) ;
263
+
264
+ // With parallel queries we might just have to wait on some other
265
+ // thread.
266
+ let result = latch. wait_on ( tcx. current_query_job ( ) , span) ;
267
+
268
+ if let Err ( cycle) = result {
269
+ let cycle = report_cycle ( tcx. dep_context ( ) . sess ( ) , cycle) ;
270
+ let value = ( query. handle_cycle_error ) ( tcx, cycle) ;
271
+ let value = cache. cache . store_nocache ( value) ;
272
+ return TryGetJob :: Cycle ( value) ;
273
+ }
274
+
275
+ let cached = cache
276
+ . cache
277
+ . lookup ( cache, & key, |value, index| {
278
+ if unlikely ! ( tcx. dep_context( ) . profiler( ) . enabled( ) ) {
279
+ tcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
280
+ }
281
+ #[ cfg( debug_assertions) ]
282
+ {
283
+ cache. cache_hits . fetch_add ( 1 , Ordering :: Relaxed ) ;
284
+ }
285
+ ( value. clone ( ) , index)
286
+ } )
287
+ . unwrap_or_else ( |_| panic ! ( "value must be in cache after waiting" ) ) ;
288
+
289
+ query_blocked_prof_timer. finish_with_query_invocation_id ( cached. 1 . into ( ) ) ;
290
+
291
+ return TryGetJob :: JobCompleted ( cached) ;
269
292
}
270
- ( value. clone ( ) , index)
271
- } )
272
- . unwrap_or_else ( |_| panic ! ( "value must be in cache after waiting" ) ) ;
273
-
274
- if let Some ( prof_timer) = _query_blocked_prof_timer. take ( ) {
275
- prof_timer. finish_with_query_invocation_id ( cached. 1 . into ( ) ) ;
293
+ QueryResult :: Poisoned => FatalError . raise ( ) ,
294
+ }
276
295
}
277
-
278
- return TryGetJob :: JobCompleted ( cached) ;
279
296
}
280
297
}
281
298
@@ -418,7 +435,13 @@ where
418
435
CTX : QueryContext ,
419
436
{
420
437
let job = match JobOwner :: < ' _ , CTX :: DepKind , C > :: try_start (
421
- tcx, state, cache, span, & key, lookup, query,
438
+ tcx,
439
+ state,
440
+ cache,
441
+ span,
442
+ key. clone ( ) ,
443
+ lookup,
444
+ query,
422
445
) {
423
446
TryGetJob :: NotYetStarted ( job) => job,
424
447
TryGetJob :: Cycle ( result) => return result,
@@ -741,7 +764,13 @@ fn force_query_impl<CTX, C>(
741
764
} ;
742
765
743
766
let job = match JobOwner :: < ' _ , CTX :: DepKind , C > :: try_start (
744
- tcx, state, cache, span, & key, lookup, query,
767
+ tcx,
768
+ state,
769
+ cache,
770
+ span,
771
+ key. clone ( ) ,
772
+ lookup,
773
+ query,
745
774
) {
746
775
TryGetJob :: NotYetStarted ( job) => job,
747
776
TryGetJob :: Cycle ( _) => return ,
0 commit comments