Skip to content

rustc_query_system: explicitly register reused dep nodes #80177

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions compiler/rustc_middle/src/dep_graph/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::{DefPathHash, LocalDefId};
use rustc_hir::def_id::LocalDefId;

mod dep_node;

Expand Down Expand Up @@ -91,9 +91,9 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
type DepKind = DepKind;
type StableHashingContext = StableHashingContext<'tcx>;

fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
fn register_reused_dep_node(&self, dep_node: &DepNode) {
if let Some(cache) = self.queries.on_disk_cache.as_ref() {
cache.register_reused_dep_path_hash(*self, hash)
cache.register_reused_dep_node(*self, dep_node)
}
}

Expand Down
38 changes: 28 additions & 10 deletions compiler/rustc_middle/src/ty/query/on_disk_cache.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
use crate::dep_graph::{DepNode, DepNodeIndex, SerializedDepNodeIndex};
use crate::mir::interpret::{AllocDecodingSession, AllocDecodingState};
use crate::mir::{self, interpret};
use crate::ty::codec::{OpaqueEncoder, RefDecodable, TyDecoder, TyEncoder};
Expand Down Expand Up @@ -264,6 +264,13 @@ impl<'sess> OnDiskCache<'sess> {
(file_to_file_index, file_index_to_stable_id)
};

// Register any dep nodes that we reused from the previous session,
// but didn't `DepNode::construct` in this session. This ensures
// that their `DefPathHash` to `RawDefId` mappings are registered
// in 'latest_foreign_def_path_hashes' if necessary, since that
// normally happens in `DepNode::construct`.
tcx.dep_graph.register_reused_dep_nodes(tcx);

// Load everything into memory so we can write it out to the on-disk
// cache. The vast majority of cacheable query results should already
// be in memory, so this should be a cheap operation.
Expand Down Expand Up @@ -467,22 +474,33 @@ impl<'sess> OnDiskCache<'sess> {
.insert(hash, RawDefId { krate: def_id.krate.as_u32(), index: def_id.index.as_u32() });
}

/// If the given `hash` still exists in the current compilation,
/// calls `store_foreign_def_id` with its current `DefId`.
/// If the given `dep_node`'s hash still exists in the current compilation,
/// and its current `DefId` is foreign, calls `store_foreign_def_id` with it.
///
/// Normally, `store_foreign_def_id_hash` can be called directly by
/// the dependency graph when we construct a `DepNode`. However,
/// when we re-use a deserialized `DepNode` from the previous compilation
/// session, we only have the `DefPathHash` available. This method is used
/// to that any `DepNode` that we re-use has a `DefPathHash` -> `RawId` written
/// out for usage in the next compilation session.
pub fn register_reused_dep_path_hash(&self, tcx: TyCtxt<'tcx>, hash: DefPathHash) {
// We can't simply copy the `RawDefId` from `foreign_def_path_hashes` to
// `latest_foreign_def_path_hashes`, since the `RawDefId` might have
// changed in the current compilation session (e.g. we've added/removed crates,
// or added/removed definitions before/after the target definition).
if let Some(def_id) = self.def_path_hash_to_def_id(tcx, hash) {
self.store_foreign_def_id_hash(def_id, hash);
pub fn register_reused_dep_node(&self, tcx: TyCtxt<'tcx>, dep_node: &DepNode) {
// For reused dep nodes, we only need to store the mapping if the node
// is one whose query key we can reconstruct from the hash. We use the
// mapping to aid that reconstruction in the next session. While we also
// use it to decode `DefId`s we encoded in the cache as `DefPathHashes`,
// they're already registered during `DefId` encoding.
if dep_node.kind.can_reconstruct_query_key() {
let hash = DefPathHash(dep_node.hash.into());

// We can't simply copy the `RawDefId` from `foreign_def_path_hashes` to
// `latest_foreign_def_path_hashes`, since the `RawDefId` might have
// changed in the current compilation session (e.g. we've added/removed crates,
// or added/removed definitions before/after the target definition).
if let Some(def_id) = self.def_path_hash_to_def_id(tcx, hash) {
if !def_id.is_local() {
self.store_foreign_def_id_hash(def_id, hash);
}
}
}
}

Expand Down
5 changes: 2 additions & 3 deletions compiler/rustc_query_system/src/dep_graph/dep_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,8 @@ pub struct DepNode<K> {
// * When a `DepNode::construct` is called, `arg.to_fingerprint()`
// is responsible for calling `OnDiskCache::store_foreign_def_id_hash`
// if needed
// * When a `DepNode` is loaded from the `PreviousDepGraph`,
// then `PreviousDepGraph::index_to_node` is responsible for calling
// `tcx.register_reused_dep_path_hash`
// * When we serialize the on-disk cache, `OnDiskCache::serialize` is
// responsible for calling `DepGraph::register_reused_dep_nodes`.
//
// FIXME: Enforce this by preventing manual construction of `DefNode`
// (e.g. add a `_priv: ()` field)
Expand Down
24 changes: 19 additions & 5 deletions compiler/rustc_query_system/src/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ impl<K: DepKind> DepGraph<K> {
// We never try to mark eval_always nodes as green
debug_assert!(!dep_node.kind.is_eval_always());

data.previous.debug_assert_eq(prev_dep_node_index, *dep_node);
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);

let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);

Expand All @@ -572,7 +572,7 @@ impl<K: DepKind> DepGraph<K> {
"try_mark_previous_green({:?}) --- found dependency {:?} to \
be immediately green",
dep_node,
data.previous.debug_dep_node(dep_dep_node_index),
data.previous.index_to_node(dep_dep_node_index)
);
current_deps.push(node_index);
}
Expand All @@ -585,12 +585,12 @@ impl<K: DepKind> DepGraph<K> {
"try_mark_previous_green({:?}) - END - dependency {:?} was \
immediately red",
dep_node,
data.previous.debug_dep_node(dep_dep_node_index)
data.previous.index_to_node(dep_dep_node_index)
);
return None;
}
None => {
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index, tcx);
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);

// We don't know the state of this dependency. If it isn't
// an eval_always node, let's try to mark it green recursively.
Expand Down Expand Up @@ -801,7 +801,7 @@ impl<K: DepKind> DepGraph<K> {
for prev_index in data.colors.values.indices() {
match data.colors.get(prev_index) {
Some(DepNodeColor::Green(_)) => {
let dep_node = data.previous.index_to_node(prev_index, tcx);
let dep_node = data.previous.index_to_node(prev_index);
tcx.try_load_from_on_disk_cache(&dep_node);
}
None | Some(DepNodeColor::Red) => {
Expand All @@ -813,6 +813,20 @@ impl<K: DepKind> DepGraph<K> {
}
}

// Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
pub fn register_reused_dep_nodes<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
let data = self.data.as_ref().unwrap();
for prev_index in data.colors.values.indices() {
match data.colors.get(prev_index) {
Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => {
let dep_node = data.previous.index_to_node(prev_index);
tcx.register_reused_dep_node(&dep_node);
}
None => {}
}
}
}

fn next_virtual_depnode_index(&self) -> DepNodeIndex {
let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
DepNodeIndex::from_u32(index)
Expand Down
3 changes: 1 addition & 2 deletions compiler/rustc_query_system/src/dep_graph/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_span::def_id::DefPathHash;

use std::fmt;
use std::hash::Hash;
Expand All @@ -33,7 +32,7 @@ pub trait DepContext: Copy {
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;

fn register_reused_dep_path_hash(&self, hash: DefPathHash);
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);

/// Return whether the current session is tainted by errors.
fn has_errors_or_delayed_span_bugs(&self) -> bool;
Expand Down
41 changes: 1 addition & 40 deletions compiler/rustc_query_system/src/dep_graph/prev.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use super::{DepKind, DepNode};
use crate::dep_graph::DepContext;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::def_id::DefPathHash;

#[derive(Debug, Encodable, Decodable)]
pub struct PreviousDepGraph<K: DepKind> {
Expand Down Expand Up @@ -33,44 +31,7 @@ impl<K: DepKind> PreviousDepGraph<K> {
}

#[inline]
pub fn index_to_node<CTX: DepContext<DepKind = K>>(
&self,
dep_node_index: SerializedDepNodeIndex,
tcx: CTX,
) -> DepNode<K> {
let dep_node = self.data.nodes[dep_node_index];
// We have just loaded a deserialized `DepNode` from the previous
// compilation session into the current one. If this was a foreign `DefId`,
// then we stored additional information in the incr comp cache when we
// initially created its fingerprint (see `DepNodeParams::to_fingerprint`)
// We won't be calling `to_fingerprint` again for this `DepNode` (we no longer
// have the original value), so we need to copy over this additional information
// from the old incremental cache into the new cache that we serialize
// and the end of this compilation session.
if dep_node.kind.can_reconstruct_query_key() {
tcx.register_reused_dep_path_hash(DefPathHash(dep_node.hash.into()));
}
dep_node
}

/// When debug assertions are enabled, asserts that the dep node at `dep_node_index` is equal to `dep_node`.
/// This method should be preferred over manually calling `index_to_node`.
/// Calls to `index_to_node` may affect global state, so gating a call
/// to `index_to_node` on debug assertions could cause behavior changes when debug assertions
/// are enabled.
#[inline]
pub fn debug_assert_eq(&self, dep_node_index: SerializedDepNodeIndex, dep_node: DepNode<K>) {
debug_assert_eq!(self.data.nodes[dep_node_index], dep_node);
}

/// Obtains a debug-printable version of the `DepNode`.
/// See `debug_assert_eq` for why this should be preferred over manually
/// calling `dep_node_index`
pub fn debug_dep_node(&self, dep_node_index: SerializedDepNodeIndex) -> impl std::fmt::Debug {
// We're returning the `DepNode` without calling `register_reused_dep_path_hash`,
// but `impl Debug` return type means that it can only be used for debug printing.
// So, there's no risk of calls trying to create new dep nodes that have this
// node as a dependency
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
self.data.nodes[dep_node_index]
}

Expand Down