summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_trait_selection/src/solve/search_graph
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_trait_selection/src/solve/search_graph')
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/cache.rs123
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/mod.rs178
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs84
3 files changed, 385 insertions, 0 deletions
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs b/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
new file mode 100644
index 000000000..730a8e612
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
@@ -0,0 +1,123 @@
+//! This module both handles the global cache which stores "finished" goals,
+//! and the provisional cache which contains partially computed goals.
+//!
+//! The provisional cache is necessary when dealing with coinductive cycles.
+//!
+//! For more information about the provisional cache and coinduction in general,
+//! check out the relevant section of the rustc-dev-guide.
+//!
+//! FIXME(@lcnr): Write that section, feel free to ping me if you need help here
+//! before then or if I still haven't done that before January 2023.
+use super::overflow::OverflowData;
+use super::StackDepth;
+use crate::solve::{CanonicalGoal, QueryResult};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::TyCtxt;
+
+rustc_index::newtype_index! {
+ pub struct EntryIndex {}
+}
+
+#[derive(Debug, Clone)]
+pub(super) struct ProvisionalEntry<'tcx> {
+ // In case we have a coinductive cycle, this is the
+ // the currently least restrictive result of this goal.
+ pub(super) response: QueryResult<'tcx>,
+ // In case of a cycle, the position of deepest stack entry involved
+ // in that cycle. This is monotonically decreasing in the stack as all
+ // elements between the current stack element in the deepest stack entry
+ // involved have to also be involved in that cycle.
+ //
+ // We can only move entries to the global cache once we're complete done
+ // with the cycle. If this entry has not been involved in a cycle,
+ // this is just its own depth.
+ pub(super) depth: StackDepth,
+
+ // The goal for this entry. Should always be equal to the corresponding goal
+ // in the lookup table.
+ pub(super) goal: CanonicalGoal<'tcx>,
+}
+
+pub(super) struct ProvisionalCache<'tcx> {
+ pub(super) entries: IndexVec<EntryIndex, ProvisionalEntry<'tcx>>,
+ // FIXME: This is only used to quickly check whether a given goal
+ // is in the cache. We should experiment with using something like
+ // `SsoHashSet` here because in most cases there are only a few entries.
+ pub(super) lookup_table: FxHashMap<CanonicalGoal<'tcx>, EntryIndex>,
+}
+
+impl<'tcx> ProvisionalCache<'tcx> {
+ pub(super) fn empty() -> ProvisionalCache<'tcx> {
+ ProvisionalCache { entries: Default::default(), lookup_table: Default::default() }
+ }
+
+ pub(super) fn is_empty(&self) -> bool {
+ self.entries.is_empty() && self.lookup_table.is_empty()
+ }
+
+ /// Adds a dependency from the current leaf to `target` in the cache
+ /// to prevent us from moving any goals which depend on the current leaf
+ /// to the global cache while we're still computing `target`.
+ ///
+ /// Its important to note that `target` may already be part of a different cycle.
+ /// In this case we have to ensure that we also depend on all other goals
+ /// in the existing cycle in addition to the potentially direct cycle with `target`.
+ pub(super) fn add_dependency_of_leaf_on(&mut self, target: EntryIndex) {
+ let depth = self.entries[target].depth;
+ for provisional_entry in &mut self.entries.raw[target.index()..] {
+ // The depth of `target` is the position of the deepest goal in the stack
+ // on which `target` depends. That goal is the `root` of this cycle.
+ //
+ // Any entry which was added after `target` is either on the stack itself
+ // at which point its depth is definitely at least as high as the depth of
+ // `root`. If it's not on the stack itself it has to depend on a goal
+ // between `root` and `leaf`. If it were to depend on a goal deeper in the
+ // stack than `root`, then `root` would also depend on that goal, at which
+ // point `root` wouldn't be the root anymore.
+ debug_assert!(provisional_entry.depth >= depth);
+ provisional_entry.depth = depth;
+ }
+
+ // We only update entries which were added after `target` as no other
+ // entry should have a higher depth.
+ //
+ // Any entry which previously had a higher depth than target has to
+ // be between `target` and `root`. Because of this we would have updated
+ // its depth when calling `add_dependency_of_leaf_on(root)` for `target`.
+ if cfg!(debug_assertions) {
+ self.entries.iter().all(|e| e.depth <= depth);
+ }
+ }
+
+ pub(super) fn depth(&self, entry_index: EntryIndex) -> StackDepth {
+ self.entries[entry_index].depth
+ }
+
+ pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> QueryResult<'tcx> {
+ self.entries[entry_index].response.clone()
+ }
+}
+
+pub(super) fn try_move_finished_goal_to_global_cache<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ overflow_data: &mut OverflowData,
+ stack: &IndexVec<super::StackDepth, super::StackElem<'tcx>>,
+ goal: CanonicalGoal<'tcx>,
+ response: QueryResult<'tcx>,
+) {
+ // We move goals to the global cache if we either did not hit an overflow or if it's
+ // the root goal as that will now always hit the same overflow limit.
+ //
+ // NOTE: We cannot move any non-root goals to the global cache even if their final result
+ // isn't impacted by the overflow as that goal still has unstable query dependencies
+ // because it didn't go its full depth.
+ //
+ // FIXME(@lcnr): We could still cache subtrees which are not impacted by overflow though.
+ // Tracking that info correctly isn't trivial, so I haven't implemented it for now.
+ let should_cache_globally = !overflow_data.did_overflow() || stack.is_empty();
+ if should_cache_globally {
+ // FIXME: move the provisional entry to the global cache.
+ let _ = (tcx, goal, response);
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
new file mode 100644
index 000000000..0030e9aa3
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
@@ -0,0 +1,178 @@
+mod cache;
+mod overflow;
+
+use self::cache::ProvisionalEntry;
+use super::{CanonicalGoal, Certainty, MaybeCause, QueryResult};
+use cache::ProvisionalCache;
+use overflow::OverflowData;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::TyCtxt;
+use std::collections::hash_map::Entry;
+
+rustc_index::newtype_index! {
+ pub struct StackDepth {}
+}
+
+struct StackElem<'tcx> {
+ goal: CanonicalGoal<'tcx>,
+ has_been_used: bool,
+}
+
+pub(super) struct SearchGraph<'tcx> {
+ /// The stack of goals currently being computed.
+ ///
+ /// An element is *deeper* in the stack if its index is *lower*.
+ stack: IndexVec<StackDepth, StackElem<'tcx>>,
+ overflow_data: OverflowData,
+ provisional_cache: ProvisionalCache<'tcx>,
+}
+
+impl<'tcx> SearchGraph<'tcx> {
+ pub(super) fn new(tcx: TyCtxt<'tcx>) -> SearchGraph<'tcx> {
+ Self {
+ stack: Default::default(),
+ overflow_data: OverflowData::new(tcx),
+ provisional_cache: ProvisionalCache::empty(),
+ }
+ }
+
+ pub(super) fn is_empty(&self) -> bool {
+ self.stack.is_empty()
+ && self.provisional_cache.is_empty()
+ && !self.overflow_data.did_overflow()
+ }
+
+ /// Tries putting the new goal on the stack, returning an error if it is already cached.
+ ///
+ /// This correctly updates the provisional cache if there is a cycle.
+ pub(super) fn try_push_stack(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ goal: CanonicalGoal<'tcx>,
+ ) -> Result<(), QueryResult<'tcx>> {
+ // FIXME: start by checking the global cache
+
+ // Look at the provisional cache to check for cycles.
+ let cache = &mut self.provisional_cache;
+ match cache.lookup_table.entry(goal) {
+ // No entry, simply push this goal on the stack after dealing with overflow.
+ Entry::Vacant(v) => {
+ if self.overflow_data.has_overflow(self.stack.len()) {
+ return Err(self.deal_with_overflow(tcx, goal));
+ }
+
+ let depth = self.stack.push(StackElem { goal, has_been_used: false });
+ let response = super::response_no_constraints(tcx, goal, Certainty::Yes);
+ let entry_index = cache.entries.push(ProvisionalEntry { response, depth, goal });
+ v.insert(entry_index);
+ Ok(())
+ }
+ // We have a nested goal which relies on a goal `root` deeper in the stack.
+ //
+ // We first store that we may have to rerun `evaluate_goal` for `root` in case the
+ // provisional response is not equal to the final response. We also update the depth
+ // of all goals which recursively depend on our current goal to depend on `root`
+ // instead.
+ //
+ // Finally we can return either the provisional response for that goal if we have a
+ // coinductive cycle or an ambiguous result if the cycle is inductive.
+ Entry::Occupied(entry_index) => {
+ let entry_index = *entry_index.get();
+
+ cache.add_dependency_of_leaf_on(entry_index);
+ let stack_depth = cache.depth(entry_index);
+
+ self.stack[stack_depth].has_been_used = true;
+ // NOTE: The goals on the stack aren't the only goals involved in this cycle.
+ // We can also depend on goals which aren't part of the stack but coinductively
+ // depend on the stack themselves. We already checked whether all the goals
+ // between these goals and their root on the stack. This means that as long as
+ // each goal in a cycle is checked for coinductivity by itself, simply checking
+ // the stack is enough.
+ if self.stack.raw[stack_depth.index()..]
+ .iter()
+ .all(|g| g.goal.value.predicate.is_coinductive(tcx))
+ {
+ Err(cache.provisional_result(entry_index))
+ } else {
+ Err(super::response_no_constraints(
+ tcx,
+ goal,
+ Certainty::Maybe(MaybeCause::Overflow),
+ ))
+ }
+ }
+ }
+ }
+
+ /// We cannot simply store the result of [super::EvalCtxt::compute_goal] as we have to deal with
+ /// coinductive cycles.
+ ///
+ /// When we encounter a coinductive cycle, we have to prove the final result of that cycle
+ /// while we are still computing that result. Because of this we continously recompute the
+ /// cycle until the result of the previous iteration is equal to the final result, at which
+ /// point we are done.
+ ///
+ /// This function returns `true` if we were able to finalize the goal and `false` if it has
+ /// updated the provisional cache and we have to recompute the current goal.
+ ///
+ /// FIXME: Refer to the rustc-dev-guide entry once it exists.
+ pub(super) fn try_finalize_goal(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ actual_goal: CanonicalGoal<'tcx>,
+ response: QueryResult<'tcx>,
+ ) -> bool {
+ let StackElem { goal, has_been_used } = self.stack.pop().unwrap();
+ assert_eq!(goal, actual_goal);
+
+ let cache = &mut self.provisional_cache;
+ let provisional_entry_index = *cache.lookup_table.get(&goal).unwrap();
+ let provisional_entry = &mut cache.entries[provisional_entry_index];
+ let depth = provisional_entry.depth;
+ // Was the current goal the root of a cycle and was the provisional response
+ // different from the final one.
+ if has_been_used && provisional_entry.response != response {
+ // If so, update the provisional reponse for this goal...
+ provisional_entry.response = response;
+ // ...remove all entries whose result depends on this goal
+ // from the provisional cache...
+ //
+ // That's not completely correct, as a nested goal can also
+ // depend on a goal which is lower in the stack so it doesn't
+ // actually depend on the current goal. This should be fairly
+ // rare and is hopefully not relevant for performance.
+ #[allow(rustc::potential_query_instability)]
+ cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
+ cache.entries.truncate(provisional_entry_index.index() + 1);
+
+ // ...and finally push our goal back on the stack and reevaluate it.
+ self.stack.push(StackElem { goal, has_been_used: false });
+ false
+ } else {
+ // If not, we're done with this goal.
+ //
+ // Check whether that this goal doesn't depend on a goal deeper on the stack
+ // and if so, move it and all nested goals to the global cache.
+ //
+ // Note that if any nested goal were to depend on something deeper on the stack,
+ // this would have also updated the depth of the current goal.
+ if depth == self.stack.next_index() {
+ for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..)
+ {
+ let actual_index = cache.lookup_table.remove(&entry.goal);
+ debug_assert_eq!(Some(i), actual_index);
+ debug_assert!(entry.depth == depth);
+ cache::try_move_finished_goal_to_global_cache(
+ tcx,
+ &mut self.overflow_data,
+ &self.stack,
+ entry.goal,
+ entry.response,
+ );
+ }
+ }
+ true
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs b/compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs
new file mode 100644
index 000000000..1dd3894c9
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/overflow.rs
@@ -0,0 +1,84 @@
+use rustc_infer::infer::canonical::Canonical;
+use rustc_infer::traits::query::NoSolution;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::Limit;
+
+use super::SearchGraph;
+use crate::solve::{response_no_constraints, Certainty, EvalCtxt, MaybeCause, QueryResult};
+
+/// When detecting a solver overflow, we return ambiguity. Overflow can be
+/// *hidden* by either a fatal error in an **AND** or a trivial success in an **OR**.
+///
+/// This is in issue in case of exponential blowup, e.g. if each goal on the stack
+/// has multiple nested (overflowing) candidates. To deal with this, we reduce the limit
+/// used by the solver when hitting the default limit for the first time.
+///
+/// FIXME: Get tests where always using the `default_limit` results in a hang and refer
+/// to them here. We can also improve the overflow strategy if necessary.
+pub(super) struct OverflowData {
+ default_limit: Limit,
+ current_limit: Limit,
+ /// When proving an **AND** we have to repeatedly iterate over the yet unproven goals.
+ ///
+ /// Because of this each iteration also increases the depth in addition to the stack
+ /// depth.
+ additional_depth: usize,
+}
+
+impl OverflowData {
+ pub(super) fn new(tcx: TyCtxt<'_>) -> OverflowData {
+ let default_limit = tcx.recursion_limit();
+ OverflowData { default_limit, current_limit: default_limit, additional_depth: 0 }
+ }
+
+ #[inline]
+ pub(super) fn did_overflow(&self) -> bool {
+ self.default_limit.0 != self.current_limit.0
+ }
+
+ #[inline]
+ pub(super) fn has_overflow(&self, depth: usize) -> bool {
+ !self.current_limit.value_within_limit(depth + self.additional_depth)
+ }
+
+ /// Updating the current limit when hitting overflow.
+ fn deal_with_overflow(&mut self) {
+ // When first hitting overflow we reduce the overflow limit
+ // for all future goals to prevent hangs if there's an exponental
+ // blowup.
+ self.current_limit.0 = self.default_limit.0 / 8;
+ }
+}
+
+impl<'tcx> SearchGraph<'tcx> {
+ pub fn deal_with_overflow(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ goal: Canonical<'tcx, impl Sized>,
+ ) -> QueryResult<'tcx> {
+ self.overflow_data.deal_with_overflow();
+ response_no_constraints(tcx, goal, Certainty::Maybe(MaybeCause::Overflow))
+ }
+}
+
+impl<'tcx> EvalCtxt<'_, 'tcx> {
+ /// A `while`-loop which tracks overflow.
+ pub fn repeat_while_none(
+ &mut self,
+ mut loop_body: impl FnMut(&mut Self) -> Option<Result<Certainty, NoSolution>>,
+ ) -> Result<Certainty, NoSolution> {
+ let start_depth = self.search_graph.overflow_data.additional_depth;
+ let depth = self.search_graph.stack.len();
+ while !self.search_graph.overflow_data.has_overflow(depth) {
+ if let Some(result) = loop_body(self) {
+ self.search_graph.overflow_data.additional_depth = start_depth;
+ return result;
+ }
+
+ self.search_graph.overflow_data.additional_depth += 1;
+ }
+ self.search_graph.overflow_data.additional_depth = start_depth;
+ self.search_graph.overflow_data.deal_with_overflow();
+ Ok(Certainty::Maybe(MaybeCause::Overflow))
+ }
+}