summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_trait_selection/src/solve/search_graph/mod.rs')
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/mod.rs133
1 files changed, 101 insertions, 32 deletions
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
index 0030e9aa3..c7eb8de65 100644
--- a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
@@ -3,11 +3,12 @@ mod overflow;
use self::cache::ProvisionalEntry;
use super::{CanonicalGoal, Certainty, MaybeCause, QueryResult};
+pub(super) use crate::solve::search_graph::overflow::OverflowHandler;
use cache::ProvisionalCache;
use overflow::OverflowData;
use rustc_index::vec::IndexVec;
use rustc_middle::ty::TyCtxt;
-use std::collections::hash_map::Entry;
+use std::{collections::hash_map::Entry, mem};
rustc_index::newtype_index! {
pub struct StackDepth {}
@@ -42,10 +43,29 @@ impl<'tcx> SearchGraph<'tcx> {
&& !self.overflow_data.did_overflow()
}
+ /// Whether we're currently in a cycle. This should only be used
+ /// for debug assertions.
+ pub(super) fn in_cycle(&self) -> bool {
+ if let Some(stack_depth) = self.stack.last() {
+ // Either the current goal on the stack is the root of a cycle...
+ if self.stack[stack_depth].has_been_used {
+ return true;
+ }
+
+ // ...or it depends on a goal with a lower depth.
+ let current_goal = self.stack[stack_depth].goal;
+ let entry_index = self.provisional_cache.lookup_table[&current_goal];
+ self.provisional_cache.entries[entry_index].depth != stack_depth
+ } else {
+ false
+ }
+ }
+
/// Tries putting the new goal on the stack, returning an error if it is already cached.
///
/// This correctly updates the provisional cache if there is a cycle.
- pub(super) fn try_push_stack(
+ #[instrument(level = "debug", skip(self, tcx), ret)]
+ fn try_push_stack(
&mut self,
tcx: TyCtxt<'tcx>,
goal: CanonicalGoal<'tcx>,
@@ -79,8 +99,10 @@ impl<'tcx> SearchGraph<'tcx> {
Entry::Occupied(entry_index) => {
let entry_index = *entry_index.get();
- cache.add_dependency_of_leaf_on(entry_index);
let stack_depth = cache.depth(entry_index);
+ debug!("encountered cycle with depth {stack_depth:?}");
+
+ cache.add_dependency_of_leaf_on(entry_index);
self.stack[stack_depth].has_been_used = true;
// NOTE: The goals on the stack aren't the only goals involved in this cycle.
@@ -117,25 +139,29 @@ impl<'tcx> SearchGraph<'tcx> {
/// updated the provisional cache and we have to recompute the current goal.
///
/// FIXME: Refer to the rustc-dev-guide entry once it exists.
- pub(super) fn try_finalize_goal(
+ #[instrument(level = "debug", skip(self, tcx, actual_goal), ret)]
+ fn try_finalize_goal(
&mut self,
tcx: TyCtxt<'tcx>,
actual_goal: CanonicalGoal<'tcx>,
response: QueryResult<'tcx>,
) -> bool {
- let StackElem { goal, has_been_used } = self.stack.pop().unwrap();
+ let stack_elem = self.stack.pop().unwrap();
+ let StackElem { goal, has_been_used } = stack_elem;
assert_eq!(goal, actual_goal);
let cache = &mut self.provisional_cache;
let provisional_entry_index = *cache.lookup_table.get(&goal).unwrap();
let provisional_entry = &mut cache.entries[provisional_entry_index];
- let depth = provisional_entry.depth;
+ // We eagerly update the response in the cache here. If we have to reevaluate
+ // this goal we use the new response when hitting a cycle, and we definitely
+ // want to access the final response whenever we look at the cache.
+ let prev_response = mem::replace(&mut provisional_entry.response, response);
+
// Was the current goal the root of a cycle and was the provisional response
// different from the final one.
- if has_been_used && provisional_entry.response != response {
- // If so, update the provisional reponse for this goal...
- provisional_entry.response = response;
- // ...remove all entries whose result depends on this goal
+ if has_been_used && prev_response != response {
+ // If so, remove all entries whose result depends on this goal
// from the provisional cache...
//
// That's not completely correct, as a nested goal can also
@@ -150,29 +176,72 @@ impl<'tcx> SearchGraph<'tcx> {
self.stack.push(StackElem { goal, has_been_used: false });
false
} else {
- // If not, we're done with this goal.
- //
- // Check whether that this goal doesn't depend on a goal deeper on the stack
- // and if so, move it and all nested goals to the global cache.
- //
- // Note that if any nested goal were to depend on something deeper on the stack,
- // this would have also updated the depth of the current goal.
- if depth == self.stack.next_index() {
- for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..)
- {
- let actual_index = cache.lookup_table.remove(&entry.goal);
- debug_assert_eq!(Some(i), actual_index);
- debug_assert!(entry.depth == depth);
- cache::try_move_finished_goal_to_global_cache(
- tcx,
- &mut self.overflow_data,
- &self.stack,
- entry.goal,
- entry.response,
- );
- }
- }
+ self.try_move_finished_goal_to_global_cache(tcx, stack_elem);
true
}
}
+
+ fn try_move_finished_goal_to_global_cache(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ stack_elem: StackElem<'tcx>,
+ ) {
+ let StackElem { goal, .. } = stack_elem;
+ let cache = &mut self.provisional_cache;
+ let provisional_entry_index = *cache.lookup_table.get(&goal).unwrap();
+ let provisional_entry = &mut cache.entries[provisional_entry_index];
+ let depth = provisional_entry.depth;
+
+ // If not, we're done with this goal.
+ //
+ // Check whether that this goal doesn't depend on a goal deeper on the stack
+ // and if so, move it and all nested goals to the global cache.
+ //
+ // Note that if any nested goal were to depend on something deeper on the stack,
+ // this would have also updated the depth of the current goal.
+ if depth == self.stack.next_index() {
+ for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
+ let actual_index = cache.lookup_table.remove(&entry.goal);
+ debug_assert_eq!(Some(i), actual_index);
+ debug_assert!(entry.depth == depth);
+ cache::try_move_finished_goal_to_global_cache(
+ tcx,
+ &mut self.overflow_data,
+ &self.stack,
+ entry.goal,
+ entry.response,
+ );
+ }
+ }
+ }
+
+ pub(super) fn with_new_goal(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ canonical_goal: CanonicalGoal<'tcx>,
+ mut loop_body: impl FnMut(&mut Self) -> QueryResult<'tcx>,
+ ) -> QueryResult<'tcx> {
+ match self.try_push_stack(tcx, canonical_goal) {
+ Ok(()) => {}
+ // Our goal is already on the stack, eager return.
+ Err(response) => return response,
+ }
+
+ self.repeat_while_none(
+ |this| {
+ let result = this.deal_with_overflow(tcx, canonical_goal);
+ let stack_elem = this.stack.pop().unwrap();
+ this.try_move_finished_goal_to_global_cache(tcx, stack_elem);
+ result
+ },
+ |this| {
+ let result = loop_body(this);
+ if this.try_finalize_goal(tcx, canonical_goal, result) {
+ Some(result)
+ } else {
+ None
+ }
+ },
+ )
+ }
}