diff --git a/compiler/rustc_data_structures/src/fx.rs b/compiler/rustc_data_structures/src/fx.rs index 80e72250470c0..b732811da9898 100644 --- a/compiler/rustc_data_structures/src/fx.rs +++ b/compiler/rustc_data_structures/src/fx.rs @@ -1,5 +1,6 @@ use std::hash::BuildHasherDefault; +pub use indexmap::map::RawEntryApiV1 as IndexRawEntryApiV1; pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher}; pub type StdEntry<'a, K, V> = std::collections::hash_map::Entry<'a, K, V>; diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index cd7725d2d70df..455e4bd39c649 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1389,6 +1389,8 @@ fn panic_on_forbidden_read(data: &DepGraphData, dep_node_index: DepN if dep_node.is_none() { // Try to find it among the new nodes for shard in data.current.new_node_to_index.lock_shards() { + // This is OK, as there can be at most one `dep_node` with the given `dep_node_index` + #[allow(rustc::potential_query_instability)] if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) { dep_node = Some(*node); break; diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index a4fb0a5b07220..57e5ea9906f27 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -651,6 +651,8 @@ impl GraphEncoder { let mut status = self.status.lock(); let status = status.as_mut().unwrap(); if let Some(record_stats) = &status.stats { + // `stats` is sorted below so we can allow this lint here. + #[allow(rustc::potential_query_instability)] let mut stats: Vec<_> = record_stats.values().collect(); stats.sort_by_key(|s| -(s.node_counter as i64)); diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs index ba7a631fb5419..15a4d520da0c3 100644 --- a/compiler/rustc_query_system/src/lib.rs +++ b/compiler/rustc_query_system/src/lib.rs @@ -1,5 +1,5 @@ // tidy-alphabetical-start -#![allow(rustc::potential_query_instability, internal_features)] +#![allow(internal_features)] #![feature(assert_matches)] #![feature(core_intrinsics)] #![feature(hash_raw_entry)] diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index a4ced3d2c2452..9d69f6a86320d 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use std::hash::Hash; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::{FxIndexMap, IndexRawEntryApiV1}; use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::sync::{Lock, OnceLock}; use rustc_hir::def_id::LOCAL_CRATE; @@ -23,7 +23,7 @@ pub trait QueryCache: Sized { } pub struct DefaultCache { - cache: Sharded>, + cache: Sharded>, } impl Default for DefaultCache { @@ -44,7 +44,7 @@ where fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { let key_hash = sharded::make_hash(key); let lock = self.cache.lock_shard_by_hash(key_hash); - let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key); + let result = lock.raw_entry_v1().from_key_hashed_nocheck(key_hash, key); if let Some((_, value)) = result { Some(*value) } else { None } } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index ca3efc11201e1..1dacc374557ca 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -2,7 +2,7 @@ use std::hash::Hash; use std::io::Write; use std::num::NonZero; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::FxIndexMap; use rustc_errors::{Diag, DiagCtxtHandle}; use rustc_hir::def::DefKind; use rustc_session::Session; @@ -30,7 +30,7 @@ pub struct QueryInfo { pub query: QueryStackFrame, } -pub type QueryMap = FxHashMap; +pub type QueryMap = FxIndexMap; /// A value uniquely identifying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 17486be04dcde..3f86b153ee1e5 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -3,13 +3,12 @@ //! manage the caches, and so forth. use std::cell::Cell; -use std::collections::hash_map::Entry; use std::fmt::Debug; use std::hash::Hash; use std::mem; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::{FxIndexMap, IndexEntry}; use rustc_data_structures::sharded::Sharded; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; @@ -33,7 +32,7 @@ use crate::query::{ }; pub struct QueryState { - active: Sharded>, + active: Sharded>, } /// Indicates the state of a query for a given key in a query map. @@ -187,7 +186,7 @@ where // since unwinding also wants to look at this map, this can also prevent a double // panic. let mut lock = state.active.lock_shard_by_value(&key); - lock.remove(&key) + lock.shift_remove(&key) }; val.unwrap().expect_job() }; @@ -207,7 +206,7 @@ where let state = self.state; let job = { let mut shard = state.active.lock_shard_by_value(&self.key); - let job = shard.remove(&self.key).unwrap().expect_job(); + let job = shard.shift_remove(&self.key).unwrap().expect_job(); shard.insert(self.key, QueryResult::Poisoned); job @@ -344,7 +343,7 @@ where let current_job_id = qcx.current_query_job(); match state_lock.entry(key) { - Entry::Vacant(entry) => { + IndexEntry::Vacant(entry) => { // Nothing has computed or is computing the query, so we start a new job and insert it in the // state map. let id = qcx.next_job_id(); @@ -356,7 +355,7 @@ where execute_job::<_, _, INCR>(query, qcx, state, key, id, dep_node) } - Entry::Occupied(mut entry) => { + IndexEntry::Occupied(mut entry) => { match entry.get_mut() { QueryResult::Started(job) => { #[cfg(parallel_compiler)]