diff --git a/src/librustc_data_structures/obligation_forest/mod.rs b/src/librustc_data_structures/obligation_forest/mod.rs index 974d9dcfae408..ff098b48a0b6d 100644 --- a/src/librustc_data_structures/obligation_forest/mod.rs +++ b/src/librustc_data_structures/obligation_forest/mod.rs @@ -74,11 +74,12 @@ use crate::fx::{FxHashMap, FxHashSet}; -use std::cell::{Cell, RefCell}; +use std::cell::Cell; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::hash; use std::marker::PhantomData; +use std::mem; mod graphviz; @@ -127,26 +128,27 @@ struct ObligationTreeId(usize); type ObligationTreeIdGenerator = ::std::iter::Map<::std::ops::RangeFrom, fn(usize) -> ObligationTreeId>; +/// `usize` indices are used here and throughout this module, rather than +/// `rustc_index::newtype_index!` indices, because this code is hot enough +/// that the `u32`-to-`usize` conversions that would be required are +/// significant, and space considerations are not important. +type NodeIndex = usize; + pub struct ObligationForest { /// The list of obligations. In between calls to `process_obligations`, /// this list only contains nodes in the `Pending` or `Waiting` state. - /// - /// `usize` indices are used here and throughout this module, rather than - /// `rustc_index::newtype_index!` indices, because this code is hot enough - /// that the `u32`-to-`usize` conversions that would be required are - /// significant, and space considerations are not important. nodes: Vec>, - /// A cache of predicates that have been successfully completed. - done_cache: FxHashSet, + /// Nodes must be processed in the order that they were added which this list keeps track of. + process_order: Vec, + + /// Nodes that have been removed and are ready to be reused + dead_nodes: Vec, /// A cache of the nodes in `nodes`, indexed by predicate. Unfortunately, /// its contents are not guaranteed to match those of `nodes`. See the /// comments in `process_obligation` for details. - active_cache: FxHashMap, - - /// A vector reused in compress(), to avoid allocating new vectors. - node_rewrites: RefCell>, + active_cache: FxHashMap>, obligation_tree_id_generator: ObligationTreeIdGenerator, @@ -161,18 +163,20 @@ pub struct ObligationForest { } #[derive(Debug)] -struct Node { +struct Node { obligation: O, state: Cell, + alternative_predicates: Vec, + /// Obligations that depend on this obligation for their completion. They /// must all be in a non-pending state. - dependents: Vec, + dependents: Vec, /// If true, dependents[0] points to a "parent" node, which requires /// special treatment upon error but is otherwise treated the same. /// (It would be more idiomatic to store the parent node in a separate - /// `Option` field, but that slows down the common case of + /// `Option` field, but that slows down the common case of /// iterating over the parent and other descendants together.) has_parent: bool, @@ -180,16 +184,41 @@ struct Node { obligation_tree_id: ObligationTreeId, } -impl Node { - fn new(parent: Option, obligation: O, obligation_tree_id: ObligationTreeId) -> Node { +impl Node +where + O: ForestObligation, +{ + fn new( + parent: Option, + obligation: O, + obligation_tree_id: ObligationTreeId, + ) -> Node { Node { obligation, state: Cell::new(NodeState::Pending), + alternative_predicates: vec![], dependents: if let Some(parent_index) = parent { vec![parent_index] } else { vec![] }, has_parent: parent.is_some(), obligation_tree_id, } } + + fn init( + &mut self, + parent: Option, + obligation: O, + obligation_tree_id: ObligationTreeId, + ) { + self.obligation = obligation; + self.state.set(NodeState::Pending); + self.alternative_predicates.clear(); + self.dependents.clear(); + if let Some(parent_index) = parent { + self.dependents.push(parent_index); + } + self.has_parent = parent.is_some(); + self.obligation_tree_id = obligation_tree_id; + } } /// The state of one node in some tree within the forest. This represents the @@ -283,9 +312,9 @@ impl ObligationForest { pub fn new() -> ObligationForest { ObligationForest { nodes: vec![], - done_cache: Default::default(), + process_order: vec![], + dead_nodes: vec![], active_cache: Default::default(), - node_rewrites: RefCell::new(vec![]), obligation_tree_id_generator: (0..).map(ObligationTreeId), error_cache: Default::default(), } @@ -304,14 +333,18 @@ impl ObligationForest { } // Returns Err(()) if we already know this obligation failed. - fn register_obligation_at(&mut self, obligation: O, parent: Option) -> Result<(), ()> { - if self.done_cache.contains(obligation.as_predicate()) { - return Ok(()); - } - + fn register_obligation_at( + &mut self, + obligation: O, + parent: Option, + ) -> Result<(), ()> { match self.active_cache.entry(obligation.as_predicate().clone()) { Entry::Occupied(o) => { - let node = &mut self.nodes[*o.get()]; + let index = match o.get() { + Some(index) => *index, + None => return Ok(()), // Done! + }; + let node = &mut self.nodes[index]; if let Some(parent_index) = parent { // If the node is already in `active_cache`, it has already // had its chance to be marked with a parent. So if it's @@ -339,9 +372,17 @@ impl ObligationForest { if already_failed { Err(()) } else { - let new_index = self.nodes.len(); - v.insert(new_index); - self.nodes.push(Node::new(parent, obligation, obligation_tree_id)); + let new_index = if let Some(new_index) = self.dead_nodes.pop() { + let node = &mut self.nodes[new_index]; + node.init(parent, obligation, obligation_tree_id); + new_index + } else { + let new_index = self.nodes.len(); + self.nodes.push(Node::new(parent, obligation, obligation_tree_id)); + new_index + }; + self.process_order.push(new_index); + v.insert(Some(new_index)); Ok(()) } } @@ -351,11 +392,15 @@ impl ObligationForest { /// Converts all remaining obligations to the given error. pub fn to_errors(&mut self, error: E) -> Vec> { let errors = self - .nodes + .process_order .iter() - .enumerate() - .filter(|(_index, node)| node.state.get() == NodeState::Pending) - .map(|(index, _node)| Error { error: error.clone(), backtrace: self.error_at(index) }) + .filter_map(|&index| { + if self.nodes[index].state.get() == NodeState::Pending { + Some(Error { error: error.clone(), backtrace: self.error_at(index) }) + } else { + None + } + }) .collect(); let successful_obligations = self.compress(DoCompleted::Yes); @@ -368,14 +413,15 @@ impl ObligationForest { where F: Fn(&O) -> P, { - self.nodes + self.process_order .iter() + .map(|&index| &self.nodes[index]) .filter(|node| node.state.get() == NodeState::Pending) .map(|node| f(&node.obligation)) .collect() } - fn insert_into_error_cache(&mut self, index: usize) { + fn insert_into_error_cache(&mut self, index: NodeIndex) { let node = &self.nodes[index]; self.error_cache .entry(node.obligation_tree_id) @@ -403,24 +449,31 @@ impl ObligationForest { // // We can't use an iterator for the loop because `self.nodes` is // appended to and the borrow checker would complain. We also can't use - // `for index in 0..self.nodes.len() { ... }` because the range would + // `for i in 0..self.nodes.len() { ... }` because the range would // be computed with the initial length, and we would miss the appended // nodes. Therefore we use a `while` loop. - let mut index = 0; - while index < self.nodes.len() { + let mut i = 0; + while let Some(&index) = self.process_order.get(i) { let node = &mut self.nodes[index]; + if node.state.get() != NodeState::Pending { + i += 1; + continue; + } + // `processor.process_obligation` can modify the predicate within // `node.obligation`, and that predicate is the key used for // `self.active_cache`. This means that `self.active_cache` can get // out of sync with `nodes`. It's not very common, but it does - // happen, and code in `compress` has to allow for it. - if node.state.get() != NodeState::Pending { - index += 1; - continue; + // happen so we need to update active_cache if it happens. + let before = node.obligation.as_predicate().clone(); + let result = processor.process_obligation(&mut node.obligation); + let after = node.obligation.as_predicate(); + if before != *after { + node.alternative_predicates.push(before); } - match processor.process_obligation(&mut node.obligation) { + match result { ProcessResult::Unchanged => { // No change in state. } @@ -443,7 +496,7 @@ impl ObligationForest { errors.push(Error { error: err, backtrace: self.error_at(index) }); } } - index += 1; + i += 1; } if stalled { @@ -465,8 +518,8 @@ impl ObligationForest { /// Returns a vector of obligations for `p` and all of its /// ancestors, putting them into the error state in the process. - fn error_at(&self, mut index: usize) -> Vec { - let mut error_stack: Vec = vec![]; + fn error_at(&self, mut index: NodeIndex) -> Vec { + let mut error_stack: Vec = vec![]; let mut trace = vec![]; loop { @@ -500,7 +553,8 @@ impl ObligationForest { /// pending node. fn mark_successes(&self) { // Convert all `Waiting` nodes to `Success`. - for node in &self.nodes { + for &index in &self.process_order { + let node = &self.nodes[index]; if node.state.get() == NodeState::Waiting { node.state.set(NodeState::Success); } @@ -508,7 +562,8 @@ impl ObligationForest { // Convert `Success` nodes that depend on a pending node back to // `Waiting`. - for node in &self.nodes { + for &index in &self.process_order { + let node = &self.nodes[index]; if node.state.get() == NodeState::Pending { // This call site is hot. self.inlined_mark_dependents_as_waiting(node); @@ -546,7 +601,8 @@ impl ObligationForest { { let mut stack = vec![]; - for (index, node) in self.nodes.iter().enumerate() { + for &index in &self.process_order { + let node = &self.nodes[index]; // For some benchmarks this state test is extremely hot. It's a win // to handle the no-op cases immediately to avoid the cost of the // function call. @@ -558,8 +614,12 @@ impl ObligationForest { debug_assert!(stack.is_empty()); } - fn find_cycles_from_node

(&self, stack: &mut Vec, processor: &mut P, index: usize) - where + fn find_cycles_from_node

( + &self, + stack: &mut Vec, + processor: &mut P, + index: NodeIndex, + ) where P: ObligationProcessor, { let node = &self.nodes[index]; @@ -576,7 +636,7 @@ impl ObligationForest { Some(rpos) => { // Cycle detected. processor.process_backedge( - stack[rpos..].iter().map(GetObligation(&self.nodes)), + stack[rpos..].iter().map(|i| &self.nodes[*i].obligation), PhantomData, ); } @@ -589,120 +649,63 @@ impl ObligationForest { /// must be run beforehand to remove any cycles on `Success` nodes. #[inline(never)] fn compress(&mut self, do_completed: DoCompleted) -> Option> { - let orig_nodes_len = self.nodes.len(); - let mut node_rewrites: Vec<_> = self.node_rewrites.replace(vec![]); - debug_assert!(node_rewrites.is_empty()); - node_rewrites.extend(0..orig_nodes_len); - let mut dead_nodes = 0; let mut removed_done_obligations: Vec = vec![]; - // Move removable nodes to the end, preserving the order of the - // remaining nodes. - // - // LOOP INVARIANT: - // self.nodes[0..index - dead_nodes] are the first remaining nodes - // self.nodes[index - dead_nodes..index] are all dead - // self.nodes[index..] are unchanged - for index in 0..orig_nodes_len { + let is_alive = |node: &Node| match node.state.get() { + NodeState::Pending | NodeState::Waiting | NodeState::Success => true, + NodeState::Done | NodeState::Error => false, + }; + + let mut process_order = mem::take(&mut self.process_order); + process_order.retain(|&index| { let node = &self.nodes[index]; + if node.has_parent && !is_alive(&self.nodes[node.dependents[0]]) { + self.nodes[index].has_parent = false; + } + let node = &mut self.nodes[index]; + let mut dependents = mem::take(&mut node.dependents); + dependents.retain(|&dep_index| is_alive(&self.nodes[dep_index])); + let node = &mut self.nodes[index]; + node.dependents = dependents; + match node.state.get() { - NodeState::Pending | NodeState::Waiting => { - if dead_nodes > 0 { - self.nodes.swap(index, index - dead_nodes); - node_rewrites[index] -= dead_nodes; - } - } + NodeState::Pending | NodeState::Waiting => true, NodeState::Done => { - // This lookup can fail because the contents of - // `self.active_cache` are not guaranteed to match those of - // `self.nodes`. See the comment in `process_obligation` - // for more details. - if let Some((predicate, _)) = - self.active_cache.remove_entry(node.obligation.as_predicate()) - { - self.done_cache.insert(predicate); - } else { - self.done_cache.insert(node.obligation.as_predicate().clone()); + // Mark as done + if let Some(opt) = self.active_cache.get_mut(node.obligation.as_predicate()) { + *opt = None; } + for alt in &node.alternative_predicates { + if let Some(opt) = self.active_cache.get_mut(alt) { + *opt = None + } + } + if do_completed == DoCompleted::Yes { // Extract the success stories. removed_done_obligations.push(node.obligation.clone()); } - node_rewrites[index] = orig_nodes_len; - dead_nodes += 1; + + self.dead_nodes.push(index); + false } NodeState::Error => { // We *intentionally* remove the node from the cache at this point. Otherwise // tests must come up with a different type on every type error they // check against. self.active_cache.remove(node.obligation.as_predicate()); + for alt in &node.alternative_predicates { + self.active_cache.remove(alt); + } self.insert_into_error_cache(index); - node_rewrites[index] = orig_nodes_len; - dead_nodes += 1; + self.dead_nodes.push(index); + false } NodeState::Success => unreachable!(), } - } - - if dead_nodes > 0 { - // Remove the dead nodes and rewrite indices. - self.nodes.truncate(orig_nodes_len - dead_nodes); - self.apply_rewrites(&node_rewrites); - } - - node_rewrites.truncate(0); - self.node_rewrites.replace(node_rewrites); - - if do_completed == DoCompleted::Yes { Some(removed_done_obligations) } else { None } - } - - fn apply_rewrites(&mut self, node_rewrites: &[usize]) { - let orig_nodes_len = node_rewrites.len(); - - for node in &mut self.nodes { - let mut i = 0; - while i < node.dependents.len() { - let new_index = node_rewrites[node.dependents[i]]; - if new_index >= orig_nodes_len { - node.dependents.swap_remove(i); - if i == 0 && node.has_parent { - // We just removed the parent. - node.has_parent = false; - } - } else { - node.dependents[i] = new_index; - i += 1; - } - } - } - - // This updating of `self.active_cache` is necessary because the - // removal of nodes within `compress` can fail. See above. - self.active_cache.retain(|_predicate, index| { - let new_index = node_rewrites[*index]; - if new_index >= orig_nodes_len { - false - } else { - *index = new_index; - true - } }); - } -} - -// I need a Clone closure. -#[derive(Clone)] -struct GetObligation<'a, O>(&'a [Node]); + self.process_order = process_order; -impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> { - type Output = &'a O; - extern "rust-call" fn call_once(self, args: (&'b usize,)) -> &'a O { - &self.0[*args.0].obligation - } -} - -impl<'a, 'b, O> FnMut<(&'b usize,)> for GetObligation<'a, O> { - extern "rust-call" fn call_mut(&mut self, args: (&'b usize,)) -> &'a O { - &self.0[*args.0].obligation + if do_completed == DoCompleted::Yes { Some(removed_done_obligations) } else { None } } }