1- /// The search graph is responsible for caching and cycle detection in the trait
2- /// solver. Making sure that caching doesn't result in soundness bugs or unstable
3- /// query results is very challenging and makes this one of the most-involved
4- /// self-contained components of the compiler.
5- ///
6- /// We added fuzzing support to test its correctness. The fuzzers used to verify
7- /// the current implementation can be found in https://github.com/lcnr/search_graph_fuzz.
8- ///
9- /// This is just a quick overview of the general design, please check out the relevant
10- /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11- /// more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12- /// The global cache has to be completely unobservable, while the per-cycle cache may impact
13- /// behavior as long as the resulting behavior is still correct.
1+ //! The search graph is responsible for caching and cycle detection in the trait
2+ //! solver. Making sure that caching doesn't result in soundness bugs or unstable
3+ //! query results is very challenging and makes this one of the most-involved
4+ //! self-contained components of the compiler.
5+ //!
6+ //! We added fuzzing support to test its correctness. The fuzzers used to verify
7+ //! the current implementation can be found in < https://github.com/lcnr/search_graph_fuzz> .
8+ //!
9+ //! This is just a quick overview of the general design, please check out the relevant
10+ //! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11+ //! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12+ //! The global cache has to be completely unobservable, while the per-cycle cache may impact
13+ //! behavior as long as the resulting behavior is still correct.
1414use std:: cmp:: Ordering ;
1515use std:: collections:: BTreeMap ;
1616use std:: collections:: hash_map:: Entry ;
@@ -381,18 +381,16 @@ impl PathsToNested {
381381/// The nested goals of each stack entry and the path from the
382382/// stack entry to that nested goal.
383383///
384+ /// They are used when checking whether reevaluating a global cache
385+ /// would encounter a cycle or use a provisional cache entry given the
386+ /// currentl search graph state. We need to disable the global cache
387+ /// in this case as it could otherwise result in behaviorial differences.
388+ /// Cycles can impact behavior. The cycle ABA may have different final
389+ /// results from a the cycle BAB depending on the cycle root.
390+ ///
384391/// We only start tracking nested goals once we've either encountered
385392/// overflow or a solver cycle. This is a performance optimization to
386393/// avoid tracking nested goals on the happy path.
387- ///
388- /// We use nested goals for two reasons:
389- /// - when rebasing provisional cache entries
390- /// - when checking whether we have to ignore a global cache entry as reevaluating
391- /// it would encounter a cycle or use a provisional cache entry.
392- ///
393- /// We need to disable the global cache if using it would hide a cycle, as
394- /// cycles can impact behavior. The cycle ABA may have different final
395- /// results from a the cycle BAB depending on the cycle root.
396394#[ derive_where( Debug , Default , Clone ; X : Cx ) ]
397395struct NestedGoals < X : Cx > {
398396 nested_goals : HashMap < X :: Input , PathsToNested > ,
@@ -450,6 +448,43 @@ struct ProvisionalCacheEntry<X: Cx> {
450448 result : X :: Result ,
451449}
452450
451+ /// The final result of evaluating a goal.
452+ ///
453+ /// We reset `encountered_overflow` when reevaluating a goal,
454+ /// but need to track whether we've hit the recursion limit at
455+ /// all for correctness.
456+ ///
457+ /// We've previously simply returned the final `StackEntry` but this
458+ /// made it easy to accidentally drop information from the previous
459+ /// evaluation.
460+ #[ derive_where( Debug ; X : Cx ) ]
461+ struct EvaluationResult < X : Cx > {
462+ encountered_overflow : bool ,
463+ required_depth : usize ,
464+ heads : CycleHeads ,
465+ nested_goals : NestedGoals < X > ,
466+ result : X :: Result ,
467+ }
468+
469+ impl < X : Cx > EvaluationResult < X > {
470+ fn finalize (
471+ final_entry : StackEntry < X > ,
472+ encountered_overflow : bool ,
473+ result : X :: Result ,
474+ ) -> EvaluationResult < X > {
475+ EvaluationResult {
476+ encountered_overflow,
477+ // Unlike `encountered_overflow`, we share `heads`, `required_depth`,
478+ // and `nested_goals` between evaluations.
479+ required_depth : final_entry. required_depth ,
480+ heads : final_entry. heads ,
481+ nested_goals : final_entry. nested_goals ,
482+ // We only care about the final result.
483+ result,
484+ }
485+ }
486+ }
487+
453488pub struct SearchGraph < D : Delegate < Cx = X > , X : Cx = <D as Delegate >:: Cx > {
454489 root_depth : AvailableDepth ,
455490 /// The stack of goals currently being computed.
@@ -562,7 +597,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
562597 input : X :: Input ,
563598 step_kind_from_parent : PathKind ,
564599 inspect : & mut D :: ProofTreeBuilder ,
565- mut evaluate_goal : impl FnMut ( & mut Self , & mut D :: ProofTreeBuilder ) -> X :: Result ,
600+ evaluate_goal : impl Fn ( & mut Self , X , X :: Input , & mut D :: ProofTreeBuilder ) -> X :: Result + Copy ,
566601 ) -> X :: Result {
567602 let Some ( available_depth) =
568603 AvailableDepth :: allowed_depth_for_nested :: < D > ( self . root_depth , & self . stack )
@@ -616,12 +651,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
616651 input,
617652 step_kind_from_parent,
618653 available_depth,
654+ provisional_result : None ,
619655 required_depth : 0 ,
620656 heads : Default :: default ( ) ,
621657 encountered_overflow : false ,
622658 has_been_used : None ,
623659 nested_goals : Default :: default ( ) ,
624- provisional_result : None ,
625660 } ) ;
626661
627662 // This is for global caching, so we properly track query dependencies.
@@ -630,35 +665,41 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
630665 // not tracked by the cache key and from outside of this anon task, it
631666 // must not be added to the global cache. Notably, this is the case for
632667 // trait solver cycles participants.
633- let ( ( final_entry, result) , dep_node) = cx. with_cached_task ( || {
634- self . evaluate_goal_in_task ( cx, input, inspect, & mut evaluate_goal)
635- } ) ;
668+ let ( evaluation_result, dep_node) =
669+ cx. with_cached_task ( || self . evaluate_goal_in_task ( cx, input, inspect, evaluate_goal) ) ;
636670
637671 // We've finished computing the goal and have popped it from the stack,
638672 // lazily update its parent goal.
639673 Self :: update_parent_goal (
640674 & mut self . stack ,
641- final_entry . step_kind_from_parent ,
642- final_entry . required_depth ,
643- & final_entry . heads ,
644- final_entry . encountered_overflow ,
645- UpdateParentGoalCtxt :: Ordinary ( & final_entry . nested_goals ) ,
675+ step_kind_from_parent,
676+ evaluation_result . required_depth ,
677+ & evaluation_result . heads ,
678+ evaluation_result . encountered_overflow ,
679+ UpdateParentGoalCtxt :: Ordinary ( & evaluation_result . nested_goals ) ,
646680 ) ;
681+ let result = evaluation_result. result ;
647682
648683 // We're now done with this goal. We only add the root of cycles to the global cache.
649684 // In case this goal is involved in a larger cycle add it to the provisional cache.
650- if final_entry . heads . is_empty ( ) {
685+ if evaluation_result . heads . is_empty ( ) {
651686 if let Some ( ( _scope, expected) ) = validate_cache {
652687 // Do not try to move a goal into the cache again if we're testing
653688 // the global cache.
654- assert_eq ! ( result, expected, "input={input:?}" ) ;
689+ assert_eq ! ( evaluation_result . result, expected, "input={input:?}" ) ;
655690 } else if D :: inspect_is_noop ( inspect) {
656- self . insert_global_cache ( cx, final_entry , result , dep_node)
691+ self . insert_global_cache ( cx, input , evaluation_result , dep_node)
657692 }
658693 } else if D :: ENABLE_PROVISIONAL_CACHE {
659694 debug_assert ! ( validate_cache. is_none( ) , "unexpected non-root: {input:?}" ) ;
660695 let entry = self . provisional_cache . entry ( input) . or_default ( ) ;
661- let StackEntry { heads, encountered_overflow, .. } = final_entry;
696+ let EvaluationResult {
697+ encountered_overflow,
698+ required_depth : _,
699+ heads,
700+ nested_goals : _,
701+ result,
702+ } = evaluation_result;
662703 let path_from_head = Self :: cycle_path_kind (
663704 & self . stack ,
664705 step_kind_from_parent,
@@ -1023,19 +1064,25 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10231064 cx : X ,
10241065 input : X :: Input ,
10251066 inspect : & mut D :: ProofTreeBuilder ,
1026- mut evaluate_goal : impl FnMut ( & mut Self , & mut D :: ProofTreeBuilder ) -> X :: Result ,
1027- ) -> ( StackEntry < X > , X :: Result ) {
1067+ evaluate_goal : impl Fn ( & mut Self , X , X :: Input , & mut D :: ProofTreeBuilder ) -> X :: Result + Copy ,
1068+ ) -> EvaluationResult < X > {
1069+ // We reset `encountered_overflow` each time we rerun this goal
1070+ // but need to make sure we currently propagate it to the global
1071+ // cache even if only some of the evaluations actually reach the
1072+ // recursion limit.
1073+ let mut encountered_overflow = false ;
10281074 let mut i = 0 ;
10291075 loop {
1030- let result = evaluate_goal ( self , inspect) ;
1076+ let result = evaluate_goal ( self , cx , input , inspect) ;
10311077 let stack_entry = self . stack . pop ( ) ;
1078+ encountered_overflow |= stack_entry. encountered_overflow ;
10321079 debug_assert_eq ! ( stack_entry. input, input) ;
10331080
10341081 // If the current goal is not the root of a cycle, we are done.
10351082 //
10361083 // There are no provisional cache entries which depend on this goal.
10371084 let Some ( usage_kind) = stack_entry. has_been_used else {
1038- return ( stack_entry, result) ;
1085+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
10391086 } ;
10401087
10411088 // If it is a cycle head, we have to keep trying to prove it until
@@ -1051,7 +1098,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10511098 // final result is equal to the initial response for that case.
10521099 if self . reached_fixpoint ( cx, & stack_entry, usage_kind, result) {
10531100 self . rebase_provisional_cache_entries ( & stack_entry, |_, result| result) ;
1054- return ( stack_entry, result) ;
1101+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
10551102 }
10561103
10571104 // If computing this goal results in ambiguity with no constraints,
@@ -1070,7 +1117,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10701117 self . rebase_provisional_cache_entries ( & stack_entry, |input, _| {
10711118 D :: propagate_ambiguity ( cx, input, result)
10721119 } ) ;
1073- return ( stack_entry, result) ;
1120+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
10741121 } ;
10751122
10761123 // If we've reached the fixpoint step limit, we bail with overflow and taint all
@@ -1082,7 +1129,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10821129 self . rebase_provisional_cache_entries ( & stack_entry, |input, _| {
10831130 D :: on_fixpoint_overflow ( cx, input)
10841131 } ) ;
1085- return ( stack_entry, result) ;
1132+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
10861133 }
10871134
10881135 // Clear all provisional cache entries which depend on a previous provisional
@@ -1091,9 +1138,22 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10911138
10921139 debug ! ( ?result, "fixpoint changed provisional results" ) ;
10931140 self . stack . push ( StackEntry {
1094- has_been_used : None ,
1141+ input,
1142+ step_kind_from_parent : stack_entry. step_kind_from_parent ,
1143+ available_depth : stack_entry. available_depth ,
10951144 provisional_result : Some ( result) ,
1096- ..stack_entry
1145+ // We can keep these goals from previous iterations as they are only
1146+ // ever read after finalizing this evaluation.
1147+ required_depth : stack_entry. required_depth ,
1148+ heads : stack_entry. heads ,
1149+ nested_goals : stack_entry. nested_goals ,
1150+ // We reset these two fields when rerunning this goal. We could
1151+ // keep `encountered_overflow` as it's only used as a performance
1152+ // optimization. However, given that the proof tree will likely look
1153+ // similar to the previous iterations when reevaluating, it's better
1154+ // for caching if the reevaluation also starts out with `false`.
1155+ encountered_overflow : false ,
1156+ has_been_used : None ,
10971157 } ) ;
10981158 }
10991159 }
@@ -1109,21 +1169,11 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
11091169 fn insert_global_cache (
11101170 & mut self ,
11111171 cx : X ,
1112- final_entry : StackEntry < X > ,
1113- result : X :: Result ,
1172+ input : X :: Input ,
1173+ evaluation_result : EvaluationResult < X > ,
11141174 dep_node : X :: DepNodeIndex ,
11151175 ) {
1116- debug ! ( ?final_entry, ?result, "insert global cache" ) ;
1117- cx. with_global_cache ( |cache| {
1118- cache. insert (
1119- cx,
1120- final_entry. input ,
1121- result,
1122- dep_node,
1123- final_entry. required_depth ,
1124- final_entry. encountered_overflow ,
1125- final_entry. nested_goals ,
1126- )
1127- } )
1176+ debug ! ( ?evaluation_result, "insert global cache" ) ;
1177+ cx. with_global_cache ( |cache| cache. insert ( cx, input, evaluation_result, dep_node) )
11281178 }
11291179}
0 commit comments