From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- .../src/traits/select/candidate_assembly.rs | 1009 ++++++++ .../src/traits/select/confirmation.rs | 1266 +++++++++ .../rustc_trait_selection/src/traits/select/mod.rs | 2698 ++++++++++++++++++++ 3 files changed, 4973 insertions(+) create mode 100644 compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs create mode 100644 compiler/rustc_trait_selection/src/traits/select/confirmation.rs create mode 100644 compiler/rustc_trait_selection/src/traits/select/mod.rs (limited to 'compiler/rustc_trait_selection/src/traits/select') diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs new file mode 100644 index 000000000..a60ce0f34 --- /dev/null +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -0,0 +1,1009 @@ +//! Candidate assembly. +//! +//! The selection process begins by examining all in-scope impls, +//! caller obligations, and so forth and assembling a list of +//! candidates. See the [rustc dev guide] for more details. +//! +//! [rustc dev guide]:https://rustc-dev-guide.rust-lang.org/traits/resolution.html#candidate-assembly +use hir::LangItem; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_infer::traits::TraitEngine; +use rustc_infer::traits::{Obligation, SelectionError, TraitObligation}; +use rustc_lint_defs::builtin::DEREF_INTO_DYN_SUPERTRAIT; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::{self, ToPredicate, Ty, TypeVisitable}; +use rustc_target::spec::abi::Abi; + +use crate::traits; +use crate::traits::coherence::Conflict; +use crate::traits::query::evaluate_obligation::InferCtxtExt; +use crate::traits::{util, SelectionResult}; +use crate::traits::{Ambiguous, ErrorReporting, Overflow, Unimplemented}; + +use super::BuiltinImplConditions; +use super::IntercrateAmbiguityCause; +use super::OverflowError; +use super::SelectionCandidate::{self, *}; +use super::{EvaluatedCandidate, SelectionCandidateSet, SelectionContext, TraitObligationStack}; + +impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { + #[instrument(level = "debug", skip(self))] + pub(super) fn candidate_from_obligation<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { + // Watch out for overflow. This intentionally bypasses (and does + // not update) the cache. + self.check_recursion_limit(&stack.obligation, &stack.obligation)?; + + // Check the cache. Note that we freshen the trait-ref + // separately rather than using `stack.fresh_trait_ref` -- + // this is because we want the unbound variables to be + // replaced with fresh types starting from index 0. + let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate); + debug!(?cache_fresh_trait_pred); + debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars()); + + if let Some(c) = + self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred) + { + debug!(candidate = ?c, "CACHE HIT"); + return c; + } + + // If no match, compute result and insert into cache. + // + // FIXME(nikomatsakis) -- this cache is not taking into + // account cycles that may have occurred in forming the + // candidate. I don't know of any specific problems that + // result but it seems awfully suspicious. + let (candidate, dep_node) = + self.in_task(|this| this.candidate_from_obligation_no_cache(stack)); + + debug!(?candidate, "CACHE MISS"); + self.insert_candidate_cache( + stack.obligation.param_env, + cache_fresh_trait_pred, + dep_node, + candidate.clone(), + ); + candidate + } + + fn candidate_from_obligation_no_cache<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { + if let Some(conflict) = self.is_knowable(stack) { + debug!("coherence stage: not knowable"); + if self.intercrate_ambiguity_causes.is_some() { + debug!("evaluate_stack: intercrate_ambiguity_causes is some"); + // Heuristics: show the diagnostics when there are no candidates in crate. + if let Ok(candidate_set) = self.assemble_candidates(stack) { + let mut no_candidates_apply = true; + + for c in candidate_set.vec.iter() { + if self.evaluate_candidate(stack, &c)?.may_apply() { + no_candidates_apply = false; + break; + } + } + + if !candidate_set.ambiguous && no_candidates_apply { + let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; + let self_ty = trait_ref.self_ty(); + let (trait_desc, self_desc) = with_no_trimmed_paths!({ + let trait_desc = trait_ref.print_only_trait_path().to_string(); + let self_desc = if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + }; + (trait_desc, self_desc) + }); + let cause = if let Conflict::Upstream = conflict { + IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } + } else { + IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } + }; + debug!(?cause, "evaluate_stack: pushing cause"); + self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause); + } + } + } + return Ok(None); + } + + let candidate_set = self.assemble_candidates(stack)?; + + if candidate_set.ambiguous { + debug!("candidate set contains ambig"); + return Ok(None); + } + + let candidates = candidate_set.vec; + + debug!(?stack, ?candidates, "assembled {} candidates", candidates.len()); + + // At this point, we know that each of the entries in the + // candidate set is *individually* applicable. Now we have to + // figure out if they contain mutual incompatibilities. This + // frequently arises if we have an unconstrained input type -- + // for example, we are looking for `$0: Eq` where `$0` is some + // unconstrained type variable. In that case, we'll get a + // candidate which assumes $0 == int, one that assumes `$0 == + // usize`, etc. This spells an ambiguity. + + let mut candidates = self.filter_impls(candidates, stack.obligation); + + // If there is more than one candidate, first winnow them down + // by considering extra conditions (nested obligations and so + // forth). We don't winnow if there is exactly one + // candidate. This is a relatively minor distinction but it + // can lead to better inference and error-reporting. An + // example would be if there was an impl: + // + // impl Vec { fn push_clone(...) { ... } } + // + // and we were to see some code `foo.push_clone()` where `boo` + // is a `Vec` and `Bar` does not implement `Clone`. If + // we were to winnow, we'd wind up with zero candidates. + // Instead, we select the right impl now but report "`Bar` does + // not implement `Clone`". + if candidates.len() == 1 { + return self.filter_reservation_impls(candidates.pop().unwrap(), stack.obligation); + } + + // Winnow, but record the exact outcome of evaluation, which + // is needed for specialization. Propagate overflow if it occurs. + let mut candidates = candidates + .into_iter() + .map(|c| match self.evaluate_candidate(stack, &c) { + Ok(eval) if eval.may_apply() => { + Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval })) + } + Ok(_) => Ok(None), + Err(OverflowError::Canonical) => Err(Overflow(OverflowError::Canonical)), + Err(OverflowError::ErrorReporting) => Err(ErrorReporting), + Err(OverflowError::Error(e)) => Err(Overflow(OverflowError::Error(e))), + }) + .flat_map(Result::transpose) + .collect::, _>>()?; + + debug!(?stack, ?candidates, "winnowed to {} candidates", candidates.len()); + + let needs_infer = stack.obligation.predicate.has_infer_types_or_consts(); + + // If there are STILL multiple candidates, we can further + // reduce the list by dropping duplicates -- including + // resolving specializations. + if candidates.len() > 1 { + let mut i = 0; + while i < candidates.len() { + let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| { + self.candidate_should_be_dropped_in_favor_of( + &candidates[i], + &candidates[j], + needs_infer, + ) + }); + if is_dup { + debug!(candidate = ?candidates[i], "Dropping candidate #{}/{}", i, candidates.len()); + candidates.swap_remove(i); + } else { + debug!(candidate = ?candidates[i], "Retaining candidate #{}/{}", i, candidates.len()); + i += 1; + + // If there are *STILL* multiple candidates, give up + // and report ambiguity. + if i > 1 { + debug!("multiple matches, ambig"); + return Err(Ambiguous( + candidates + .into_iter() + .filter_map(|c| match c.candidate { + SelectionCandidate::ImplCandidate(def_id) => Some(def_id), + _ => None, + }) + .collect(), + )); + } + } + } + } + + // If there are *NO* candidates, then there are no impls -- + // that we know of, anyway. Note that in the case where there + // are unbound type variables within the obligation, it might + // be the case that you could still satisfy the obligation + // from another crate by instantiating the type variables with + // a type from another crate that does have an impl. This case + // is checked for in `evaluate_stack` (and hence users + // who might care about this case, like coherence, should use + // that function). + if candidates.is_empty() { + // If there's an error type, 'downgrade' our result from + // `Err(Unimplemented)` to `Ok(None)`. This helps us avoid + // emitting additional spurious errors, since we're guaranteed + // to have emitted at least one. + if stack.obligation.predicate.references_error() { + debug!(?stack.obligation.predicate, "found error type in predicate, treating as ambiguous"); + return Ok(None); + } + return Err(Unimplemented); + } + + // Just one candidate left. + self.filter_reservation_impls(candidates.pop().unwrap().candidate, stack.obligation) + } + + #[instrument(skip(self, stack), level = "debug")] + pub(super) fn assemble_candidates<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + ) -> Result, SelectionError<'tcx>> { + let TraitObligationStack { obligation, .. } = *stack; + let obligation = &Obligation { + param_env: obligation.param_env, + cause: obligation.cause.clone(), + recursion_depth: obligation.recursion_depth, + predicate: self.infcx().resolve_vars_if_possible(obligation.predicate), + }; + + if obligation.predicate.skip_binder().self_ty().is_ty_var() { + debug!(ty = ?obligation.predicate.skip_binder().self_ty(), "ambiguous inference var or opaque type"); + // Self is a type variable (e.g., `_: AsRef`). + // + // This is somewhat problematic, as the current scheme can't really + // handle it turning to be a projection. This does end up as truly + // ambiguous in most cases anyway. + // + // Take the fast path out - this also improves + // performance by preventing assemble_candidates_from_impls from + // matching every impl for this trait. + return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true }); + } + + let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false }; + + // The only way to prove a NotImplemented(T: Foo) predicate is via a negative impl. + // There are no compiler built-in rules for this. + if obligation.polarity() == ty::ImplPolarity::Negative { + self.assemble_candidates_for_trait_alias(obligation, &mut candidates); + self.assemble_candidates_from_impls(obligation, &mut candidates); + } else { + self.assemble_candidates_for_trait_alias(obligation, &mut candidates); + + // Other bounds. Consider both in-scope bounds from fn decl + // and applicable impls. There is a certain set of precedence rules here. + let def_id = obligation.predicate.def_id(); + let lang_items = self.tcx().lang_items(); + + if lang_items.copy_trait() == Some(def_id) { + debug!(obligation_self_ty = ?obligation.predicate.skip_binder().self_ty()); + + // User-defined copy impls are permitted, but only for + // structs and enums. + self.assemble_candidates_from_impls(obligation, &mut candidates); + + // For other types, we'll use the builtin rules. + let copy_conditions = self.copy_clone_conditions(obligation); + self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates); + } else if lang_items.discriminant_kind_trait() == Some(def_id) { + // `DiscriminantKind` is automatically implemented for every type. + candidates.vec.push(DiscriminantKindCandidate); + } else if lang_items.pointee_trait() == Some(def_id) { + // `Pointee` is automatically implemented for every type. + candidates.vec.push(PointeeCandidate); + } else if lang_items.sized_trait() == Some(def_id) { + // Sized is never implementable by end-users, it is + // always automatically computed. + let sized_conditions = self.sized_conditions(obligation); + self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates); + } else if lang_items.unsize_trait() == Some(def_id) { + self.assemble_candidates_for_unsizing(obligation, &mut candidates); + } else if lang_items.destruct_trait() == Some(def_id) { + self.assemble_const_destruct_candidates(obligation, &mut candidates); + } else if lang_items.transmute_trait() == Some(def_id) { + // User-defined transmutability impls are permitted. + self.assemble_candidates_from_impls(obligation, &mut candidates); + self.assemble_candidates_for_transmutability(obligation, &mut candidates); + } else { + if lang_items.clone_trait() == Some(def_id) { + // Same builtin conditions as `Copy`, i.e., every type which has builtin support + // for `Copy` also has builtin support for `Clone`, and tuples/arrays of `Clone` + // types have builtin support for `Clone`. + let clone_conditions = self.copy_clone_conditions(obligation); + self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates); + } + + self.assemble_generator_candidates(obligation, &mut candidates); + self.assemble_closure_candidates(obligation, &mut candidates); + self.assemble_fn_pointer_candidates(obligation, &mut candidates); + self.assemble_candidates_from_impls(obligation, &mut candidates); + self.assemble_candidates_from_object_ty(obligation, &mut candidates); + } + + self.assemble_candidates_from_projected_tys(obligation, &mut candidates); + self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?; + // Auto implementations have lower priority, so we only + // consider triggering a default if there is no other impl that can apply. + if candidates.vec.is_empty() { + self.assemble_candidates_from_auto_impls(obligation, &mut candidates); + } + } + debug!("candidate list size: {}", candidates.vec.len()); + Ok(candidates) + } + + #[tracing::instrument(level = "debug", skip(self, candidates))] + fn assemble_candidates_from_projected_tys( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + // Before we go into the whole placeholder thing, just + // quickly check if the self-type is a projection at all. + match obligation.predicate.skip_binder().trait_ref.self_ty().kind() { + ty::Projection(_) | ty::Opaque(..) => {} + ty::Infer(ty::TyVar(_)) => { + span_bug!( + obligation.cause.span, + "Self=_ should have been handled by assemble_candidates" + ); + } + _ => return, + } + + let result = self + .infcx + .probe(|_| self.match_projection_obligation_against_definition_bounds(obligation)); + + candidates.vec.extend(result.into_iter().map(ProjectionCandidate)); + } + + /// Given an obligation like ``, searches the obligations that the caller + /// supplied to find out whether it is listed among them. + /// + /// Never affects the inference environment. + #[tracing::instrument(level = "debug", skip(self, stack, candidates))] + fn assemble_candidates_from_caller_bounds<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) -> Result<(), SelectionError<'tcx>> { + debug!(?stack.obligation); + + let all_bounds = stack + .obligation + .param_env + .caller_bounds() + .iter() + .filter_map(|o| o.to_opt_poly_trait_pred()); + + // Micro-optimization: filter out predicates relating to different traits. + let matching_bounds = + all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); + + // Keep only those bounds which may apply, and propagate overflow if it occurs. + for bound in matching_bounds { + // FIXME(oli-obk): it is suspicious that we are dropping the constness and + // polarity here. + let wc = self.where_clause_may_apply(stack, bound.map_bound(|t| t.trait_ref))?; + if wc.may_apply() { + candidates.vec.push(ParamCandidate(bound)); + } + } + + Ok(()) + } + + fn assemble_generator_candidates( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) { + return; + } + + // Okay to skip binder because the substs on generator types never + // touch bound regions, they just capture the in-scope + // type/region parameters. + let self_ty = obligation.self_ty().skip_binder(); + match self_ty.kind() { + ty::Generator(..) => { + debug!(?self_ty, ?obligation, "assemble_generator_candidates",); + + candidates.vec.push(GeneratorCandidate); + } + ty::Infer(ty::TyVar(_)) => { + debug!("assemble_generator_candidates: ambiguous self-type"); + candidates.ambiguous = true; + } + _ => {} + } + } + + /// Checks for the artificial impl that the compiler will create for an obligation like `X : + /// FnMut<..>` where `X` is a closure type. + /// + /// Note: the type parameters on a closure candidate are modeled as *output* type + /// parameters and hence do not affect whether this trait is a match or not. They will be + /// unified during the confirmation step. + fn assemble_closure_candidates( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + let Some(kind) = self.tcx().fn_trait_kind_from_lang_item(obligation.predicate.def_id()) else { + return; + }; + + // Okay to skip binder because the substs on closure types never + // touch bound regions, they just capture the in-scope + // type/region parameters + match *obligation.self_ty().skip_binder().kind() { + ty::Closure(_, closure_substs) => { + debug!(?kind, ?obligation, "assemble_unboxed_candidates"); + match self.infcx.closure_kind(closure_substs) { + Some(closure_kind) => { + debug!(?closure_kind, "assemble_unboxed_candidates"); + if closure_kind.extends(kind) { + candidates.vec.push(ClosureCandidate); + } + } + None => { + debug!("assemble_unboxed_candidates: closure_kind not yet known"); + candidates.vec.push(ClosureCandidate); + } + } + } + ty::Infer(ty::TyVar(_)) => { + debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); + candidates.ambiguous = true; + } + _ => {} + } + } + + /// Implements one of the `Fn()` family for a fn pointer. + fn assemble_fn_pointer_candidates( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + // We provide impl of all fn traits for fn pointers. + if self.tcx().fn_trait_kind_from_lang_item(obligation.predicate.def_id()).is_none() { + return; + } + + // Okay to skip binder because what we are inspecting doesn't involve bound regions. + let self_ty = obligation.self_ty().skip_binder(); + match *self_ty.kind() { + ty::Infer(ty::TyVar(_)) => { + debug!("assemble_fn_pointer_candidates: ambiguous self-type"); + candidates.ambiguous = true; // Could wind up being a fn() type. + } + // Provide an impl, but only for suitable `fn` pointers. + ty::FnPtr(_) => { + if let ty::FnSig { + unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + c_variadic: false, + .. + } = self_ty.fn_sig(self.tcx()).skip_binder() + { + candidates.vec.push(FnPointerCandidate { is_const: false }); + } + } + // Provide an impl for suitable functions, rejecting `#[target_feature]` functions (RFC 2396). + ty::FnDef(def_id, _) => { + if let ty::FnSig { + unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + c_variadic: false, + .. + } = self_ty.fn_sig(self.tcx()).skip_binder() + { + if self.tcx().codegen_fn_attrs(def_id).target_features.is_empty() { + candidates + .vec + .push(FnPointerCandidate { is_const: self.tcx().is_const_fn(def_id) }); + } + } + } + _ => {} + } + } + + /// Searches for impls that might apply to `obligation`. + fn assemble_candidates_from_impls( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + debug!(?obligation, "assemble_candidates_from_impls"); + + // Essentially any user-written impl will match with an error type, + // so creating `ImplCandidates` isn't useful. However, we might + // end up finding a candidate elsewhere (e.g. a `BuiltinCandidate` for `Sized) + // This helps us avoid overflow: see issue #72839 + // Since compilation is already guaranteed to fail, this is just + // to try to show the 'nicest' possible errors to the user. + // We don't check for errors in the `ParamEnv` - in practice, + // it seems to cause us to be overly aggressive in deciding + // to give up searching for candidates, leading to spurious errors. + if obligation.predicate.references_error() { + return; + } + + self.tcx().for_each_relevant_impl( + obligation.predicate.def_id(), + obligation.predicate.skip_binder().trait_ref.self_ty(), + |impl_def_id| { + // Before we create the substitutions and everything, first + // consider a "quick reject". This avoids creating more types + // and so forth that we need to. + let impl_trait_ref = self.tcx().bound_impl_trait_ref(impl_def_id).unwrap(); + if self.fast_reject_trait_refs(obligation, &impl_trait_ref.0) { + return; + } + + self.infcx.probe(|_| { + if let Ok(_substs) = self.match_impl(impl_def_id, impl_trait_ref, obligation) { + candidates.vec.push(ImplCandidate(impl_def_id)); + } + }); + }, + ); + } + + fn assemble_candidates_from_auto_impls( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + // Okay to skip binder here because the tests we do below do not involve bound regions. + let self_ty = obligation.self_ty().skip_binder(); + debug!(?self_ty, "assemble_candidates_from_auto_impls"); + + let def_id = obligation.predicate.def_id(); + + if self.tcx().trait_is_auto(def_id) { + match self_ty.kind() { + ty::Dynamic(..) => { + // For object types, we don't know what the closed + // over types are. This means we conservatively + // say nothing; a candidate may be added by + // `assemble_candidates_from_object_ty`. + } + ty::Foreign(..) => { + // Since the contents of foreign types is unknown, + // we don't add any `..` impl. Default traits could + // still be provided by a manual implementation for + // this trait and type. + } + ty::Param(..) | ty::Projection(..) => { + // In these cases, we don't know what the actual + // type is. Therefore, we cannot break it down + // into its constituent types. So we don't + // consider the `..` impl but instead just add no + // candidates: this means that typeck will only + // succeed if there is another reason to believe + // that this obligation holds. That could be a + // where-clause or, in the case of an object type, + // it could be that the object type lists the + // trait (e.g., `Foo+Send : Send`). See + // `ui/typeck/typeck-default-trait-impl-send-param.rs` + // for an example of a test case that exercises + // this path. + } + ty::Infer(ty::TyVar(_)) => { + // The auto impl might apply; we don't know. + candidates.ambiguous = true; + } + ty::Generator(_, _, movability) + if self.tcx().lang_items().unpin_trait() == Some(def_id) => + { + match movability { + hir::Movability::Static => { + // Immovable generators are never `Unpin`, so + // suppress the normal auto-impl candidate for it. + } + hir::Movability::Movable => { + // Movable generators are always `Unpin`, so add an + // unconditional builtin candidate. + candidates.vec.push(BuiltinCandidate { has_nested: false }); + } + } + } + + _ => candidates.vec.push(AutoImplCandidate(def_id)), + } + } + } + + /// Searches for impls that might apply to `obligation`. + fn assemble_candidates_from_object_ty( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + debug!( + self_ty = ?obligation.self_ty().skip_binder(), + "assemble_candidates_from_object_ty", + ); + + self.infcx.probe(|_snapshot| { + // The code below doesn't care about regions, and the + // self-ty here doesn't escape this probe, so just erase + // any LBR. + let self_ty = self.tcx().erase_late_bound_regions(obligation.self_ty()); + let poly_trait_ref = match self_ty.kind() { + ty::Dynamic(ref data, ..) => { + if data.auto_traits().any(|did| did == obligation.predicate.def_id()) { + debug!( + "assemble_candidates_from_object_ty: matched builtin bound, \ + pushing candidate" + ); + candidates.vec.push(BuiltinObjectCandidate); + return; + } + + if let Some(principal) = data.principal() { + if !self.infcx.tcx.features().object_safe_for_dispatch { + principal.with_self_ty(self.tcx(), self_ty) + } else if self.tcx().is_object_safe(principal.def_id()) { + principal.with_self_ty(self.tcx(), self_ty) + } else { + return; + } + } else { + // Only auto trait bounds exist. + return; + } + } + ty::Infer(ty::TyVar(_)) => { + debug!("assemble_candidates_from_object_ty: ambiguous"); + candidates.ambiguous = true; // could wind up being an object type + return; + } + _ => return, + }; + + debug!(?poly_trait_ref, "assemble_candidates_from_object_ty"); + + let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate); + let placeholder_trait_predicate = + self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate); + + // Count only those upcast versions that match the trait-ref + // we are looking for. Specifically, do not only check for the + // correct trait, but also the correct type parameters. + // For example, we may be trying to upcast `Foo` to `Bar`, + // but `Foo` is declared as `trait Foo: Bar`. + let candidate_supertraits = util::supertraits(self.tcx(), poly_trait_ref) + .enumerate() + .filter(|&(_, upcast_trait_ref)| { + self.infcx.probe(|_| { + self.match_normalize_trait_ref( + obligation, + upcast_trait_ref, + placeholder_trait_predicate.trait_ref, + ) + .is_ok() + }) + }) + .map(|(idx, _)| ObjectCandidate(idx)); + + candidates.vec.extend(candidate_supertraits); + }) + } + + /// Temporary migration for #89190 + fn need_migrate_deref_output_trait_object( + &mut self, + ty: Ty<'tcx>, + cause: &traits::ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + ) -> Option<(Ty<'tcx>, DefId)> { + let tcx = self.tcx(); + if tcx.features().trait_upcasting { + return None; + } + + // + let trait_ref = ty::TraitRef { + def_id: tcx.lang_items().deref_trait()?, + substs: tcx.mk_substs_trait(ty, &[]), + }; + + let obligation = traits::Obligation::new( + cause.clone(), + param_env, + ty::Binder::dummy(trait_ref).without_const().to_predicate(tcx), + ); + if !self.infcx.predicate_may_hold(&obligation) { + return None; + } + + let mut fulfillcx = traits::FulfillmentContext::new_in_snapshot(); + let normalized_ty = fulfillcx.normalize_projection_type( + &self.infcx, + param_env, + ty::ProjectionTy { + item_def_id: tcx.lang_items().deref_target()?, + substs: trait_ref.substs, + }, + cause.clone(), + ); + + let ty::Dynamic(data, ..) = normalized_ty.kind() else { + return None; + }; + + let def_id = data.principal_def_id()?; + + return Some((normalized_ty, def_id)); + } + + /// Searches for unsizing that might apply to `obligation`. + fn assemble_candidates_for_unsizing( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + // We currently never consider higher-ranked obligations e.g. + // `for<'a> &'a T: Unsize` to be implemented. This is not + // because they are a priori invalid, and we could potentially add support + // for them later, it's just that there isn't really a strong need for it. + // A `T: Unsize` obligation is always used as part of a `T: CoerceUnsize` + // impl, and those are generally applied to concrete types. + // + // That said, one might try to write a fn with a where clause like + // for<'a> Foo<'a, T>: Unsize> + // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. + // Still, you'd be more likely to write that where clause as + // T: Trait + // so it seems ok if we (conservatively) fail to accept that `Unsize` + // obligation above. Should be possible to extend this in the future. + let Some(source) = obligation.self_ty().no_bound_vars() else { + // Don't add any candidates if there are bound regions. + return; + }; + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); + + debug!(?source, ?target, "assemble_candidates_for_unsizing"); + + match (source.kind(), target.kind()) { + // Trait+Kx+'a -> Trait+Ky+'b (upcasts). + (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { + // Upcast coercions permit several things: + // + // 1. Dropping auto traits, e.g., `Foo + Send` to `Foo` + // 2. Tightening the region bound, e.g., `Foo + 'a` to `Foo + 'b` if `'a: 'b` + // 3. Tightening trait to its super traits, eg. `Foo` to `Bar` if `Foo: Bar` + // + // Note that neither of the first two of these changes requires any + // change at runtime. The third needs to change pointer metadata at runtime. + // + // We always perform upcasting coercions when we can because of reason + // #2 (region bounds). + let auto_traits_compatible = data_b + .auto_traits() + // All of a's auto traits need to be in b's auto traits. + .all(|b| data_a.auto_traits().any(|a| a == b)); + if auto_traits_compatible { + let principal_def_id_a = data_a.principal_def_id(); + let principal_def_id_b = data_b.principal_def_id(); + if principal_def_id_a == principal_def_id_b { + // no cyclic + candidates.vec.push(BuiltinUnsizeCandidate); + } else if principal_def_id_a.is_some() && principal_def_id_b.is_some() { + // not casual unsizing, now check whether this is trait upcasting coercion. + let principal_a = data_a.principal().unwrap(); + let target_trait_did = principal_def_id_b.unwrap(); + let source_trait_ref = principal_a.with_self_ty(self.tcx(), source); + if let Some((deref_output_ty, deref_output_trait_did)) = self + .need_migrate_deref_output_trait_object( + source, + &obligation.cause, + obligation.param_env, + ) + { + if deref_output_trait_did == target_trait_did { + self.tcx().struct_span_lint_hir( + DEREF_INTO_DYN_SUPERTRAIT, + obligation.cause.body_id, + obligation.cause.span, + |lint| { + lint.build(&format!( + "`{}` implements `Deref` with supertrait `{}` as output", + source, + deref_output_ty + )).emit(); + }, + ); + return; + } + } + + for (idx, upcast_trait_ref) in + util::supertraits(self.tcx(), source_trait_ref).enumerate() + { + if upcast_trait_ref.def_id() == target_trait_did { + candidates.vec.push(TraitUpcastingUnsizeCandidate(idx)); + } + } + } + } + } + + // `T` -> `Trait` + (_, &ty::Dynamic(..)) => { + candidates.vec.push(BuiltinUnsizeCandidate); + } + + // Ambiguous handling is below `T` -> `Trait`, because inference + // variables can still implement `Unsize` and nested + // obligations will have the final say (likely deferred). + (&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => { + debug!("assemble_candidates_for_unsizing: ambiguous"); + candidates.ambiguous = true; + } + + // `[T; n]` -> `[T]` + (&ty::Array(..), &ty::Slice(_)) => { + candidates.vec.push(BuiltinUnsizeCandidate); + } + + // `Struct` -> `Struct` + (&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => { + if def_id_a == def_id_b { + candidates.vec.push(BuiltinUnsizeCandidate); + } + } + + // `(.., T)` -> `(.., U)` + (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { + if tys_a.len() == tys_b.len() { + candidates.vec.push(BuiltinUnsizeCandidate); + } + } + + _ => {} + }; + } + + #[tracing::instrument(level = "debug", skip(self, obligation, candidates))] + fn assemble_candidates_for_transmutability( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + if obligation.has_param_types_or_consts() { + return; + } + + if obligation.has_infer_types_or_consts() { + candidates.ambiguous = true; + return; + } + + candidates.vec.push(TransmutabilityCandidate); + } + + #[tracing::instrument(level = "debug", skip(self, obligation, candidates))] + fn assemble_candidates_for_trait_alias( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + // Okay to skip binder here because the tests we do below do not involve bound regions. + let self_ty = obligation.self_ty().skip_binder(); + debug!(?self_ty); + + let def_id = obligation.predicate.def_id(); + + if self.tcx().is_trait_alias(def_id) { + candidates.vec.push(TraitAliasCandidate(def_id)); + } + } + + /// Assembles the trait which are built-in to the language itself: + /// `Copy`, `Clone` and `Sized`. + #[tracing::instrument(level = "debug", skip(self, candidates))] + fn assemble_builtin_bound_candidates( + &mut self, + conditions: BuiltinImplConditions<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + match conditions { + BuiltinImplConditions::Where(nested) => { + candidates + .vec + .push(BuiltinCandidate { has_nested: !nested.skip_binder().is_empty() }); + } + BuiltinImplConditions::None => {} + BuiltinImplConditions::Ambiguous => { + candidates.ambiguous = true; + } + } + } + + fn assemble_const_destruct_candidates( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + // If the predicate is `~const Destruct` in a non-const environment, we don't actually need + // to check anything. We'll short-circuit checking any obligations in confirmation, too. + if !obligation.is_const() { + candidates.vec.push(ConstDestructCandidate(None)); + return; + } + + let self_ty = self.infcx().shallow_resolve(obligation.self_ty()); + match self_ty.skip_binder().kind() { + ty::Opaque(..) + | ty::Dynamic(..) + | ty::Error(_) + | ty::Bound(..) + | ty::Param(_) + | ty::Placeholder(_) + | ty::Projection(_) => { + // We don't know if these are `~const Destruct`, at least + // not structurally... so don't push a candidate. + } + + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Infer(ty::IntVar(_)) + | ty::Infer(ty::FloatVar(_)) + | ty::Str + | ty::RawPtr(_) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Never + | ty::Foreign(_) + | ty::Array(..) + | ty::Slice(_) + | ty::Closure(..) + | ty::Generator(..) + | ty::Tuple(_) + | ty::GeneratorWitness(_) => { + // These are built-in, and cannot have a custom `impl const Destruct`. + candidates.vec.push(ConstDestructCandidate(None)); + } + + ty::Adt(..) => { + // Find a custom `impl Drop` impl, if it exists + let relevant_impl = self.tcx().find_map_relevant_impl( + self.tcx().require_lang_item(LangItem::Drop, None), + obligation.predicate.skip_binder().trait_ref.self_ty(), + Some, + ); + + if let Some(impl_def_id) = relevant_impl { + // Check that `impl Drop` is actually const, if there is a custom impl + if self.tcx().constness(impl_def_id) == hir::Constness::Const { + candidates.vec.push(ConstDestructCandidate(Some(impl_def_id))); + } + } else { + // Otherwise check the ADT like a built-in type (structurally) + candidates.vec.push(ConstDestructCandidate(None)); + } + } + + ty::Infer(_) => { + candidates.ambiguous = true; + } + } + } +} diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs new file mode 100644 index 000000000..2a1099fc8 --- /dev/null +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -0,0 +1,1266 @@ +//! Confirmation. +//! +//! Confirmation unifies the output type parameters of the trait +//! with the values found in the obligation, possibly yielding a +//! type error. See the [rustc dev guide] for more details. +//! +//! [rustc dev guide]: +//! https://rustc-dev-guide.rust-lang.org/traits/resolution.html#confirmation +use rustc_data_structures::stack::ensure_sufficient_stack; +use rustc_hir::lang_items::LangItem; +use rustc_index::bit_set::GrowableBitSet; +use rustc_infer::infer::InferOk; +use rustc_infer::infer::LateBoundRegionConversionTime::HigherRankedType; +use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef}; +use rustc_middle::ty::{self, GenericParamDefKind, Ty, TyCtxt}; +use rustc_middle::ty::{ToPolyTraitRef, ToPredicate}; +use rustc_span::def_id::DefId; + +use crate::traits::project::{normalize_with_depth, normalize_with_depth_to}; +use crate::traits::util::{self, closure_trait_ref_and_return_type, predicate_for_trait_def}; +use crate::traits::{ + BuiltinDerivedObligation, ImplDerivedObligation, ImplDerivedObligationCause, ImplSource, + ImplSourceAutoImplData, ImplSourceBuiltinData, ImplSourceClosureData, + ImplSourceConstDestructData, ImplSourceDiscriminantKindData, ImplSourceFnPointerData, + ImplSourceGeneratorData, ImplSourceObjectData, ImplSourcePointeeData, ImplSourceTraitAliasData, + ImplSourceTraitUpcastingData, ImplSourceUserDefinedData, Normalized, ObjectCastObligation, + Obligation, ObligationCause, OutputTypeParameterMismatch, PredicateObligation, Selection, + SelectionError, TraitNotObjectSafe, TraitObligation, Unimplemented, VtblSegment, +}; + +use super::BuiltinImplConditions; +use super::SelectionCandidate::{self, *}; +use super::SelectionContext; + +use std::iter; +use std::ops::ControlFlow; + +impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { + #[instrument(level = "debug", skip(self))] + pub(super) fn confirm_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + candidate: SelectionCandidate<'tcx>, + ) -> Result, SelectionError<'tcx>> { + let mut impl_src = match candidate { + BuiltinCandidate { has_nested } => { + let data = self.confirm_builtin_candidate(obligation, has_nested); + ImplSource::Builtin(data) + } + + TransmutabilityCandidate => { + let data = self.confirm_transmutability_candidate(obligation)?; + ImplSource::Builtin(data) + } + + ParamCandidate(param) => { + let obligations = + self.confirm_param_candidate(obligation, param.map_bound(|t| t.trait_ref)); + ImplSource::Param(obligations, param.skip_binder().constness) + } + + ImplCandidate(impl_def_id) => { + ImplSource::UserDefined(self.confirm_impl_candidate(obligation, impl_def_id)) + } + + AutoImplCandidate(trait_def_id) => { + let data = self.confirm_auto_impl_candidate(obligation, trait_def_id); + ImplSource::AutoImpl(data) + } + + ProjectionCandidate(idx) => { + let obligations = self.confirm_projection_candidate(obligation, idx)?; + // FIXME(jschievink): constness + ImplSource::Param(obligations, ty::BoundConstness::NotConst) + } + + ObjectCandidate(idx) => { + let data = self.confirm_object_candidate(obligation, idx)?; + ImplSource::Object(data) + } + + ClosureCandidate => { + let vtable_closure = self.confirm_closure_candidate(obligation)?; + ImplSource::Closure(vtable_closure) + } + + GeneratorCandidate => { + let vtable_generator = self.confirm_generator_candidate(obligation)?; + ImplSource::Generator(vtable_generator) + } + + FnPointerCandidate { .. } => { + let data = self.confirm_fn_pointer_candidate(obligation)?; + ImplSource::FnPointer(data) + } + + DiscriminantKindCandidate => { + ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) + } + + PointeeCandidate => ImplSource::Pointee(ImplSourcePointeeData), + + TraitAliasCandidate(alias_def_id) => { + let data = self.confirm_trait_alias_candidate(obligation, alias_def_id); + ImplSource::TraitAlias(data) + } + + BuiltinObjectCandidate => { + // This indicates something like `Trait + Send: Send`. In this case, we know that + // this holds because that's what the object type is telling us, and there's really + // no additional obligations to prove and no types in particular to unify, etc. + ImplSource::Param(Vec::new(), ty::BoundConstness::NotConst) + } + + BuiltinUnsizeCandidate => { + let data = self.confirm_builtin_unsize_candidate(obligation)?; + ImplSource::Builtin(data) + } + + TraitUpcastingUnsizeCandidate(idx) => { + let data = self.confirm_trait_upcasting_unsize_candidate(obligation, idx)?; + ImplSource::TraitUpcasting(data) + } + + ConstDestructCandidate(def_id) => { + let data = self.confirm_const_destruct_candidate(obligation, def_id)?; + ImplSource::ConstDestruct(data) + } + }; + + if !obligation.predicate.is_const_if_const() { + // normalize nested predicates according to parent predicate's constness. + impl_src = impl_src.map(|mut o| { + o.predicate = o.predicate.without_const(self.tcx()); + o + }); + } + + Ok(impl_src) + } + + fn confirm_projection_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + idx: usize, + ) -> Result>, SelectionError<'tcx>> { + let tcx = self.tcx(); + + let trait_predicate = self.infcx.shallow_resolve(obligation.predicate); + let placeholder_trait_predicate = + self.infcx().replace_bound_vars_with_placeholders(trait_predicate).trait_ref; + let placeholder_self_ty = placeholder_trait_predicate.self_ty(); + let placeholder_trait_predicate = ty::Binder::dummy(placeholder_trait_predicate); + let (def_id, substs) = match *placeholder_self_ty.kind() { + ty::Projection(proj) => (proj.item_def_id, proj.substs), + ty::Opaque(def_id, substs) => (def_id, substs), + _ => bug!("projection candidate for unexpected type: {:?}", placeholder_self_ty), + }; + + let candidate_predicate = + tcx.bound_item_bounds(def_id).map_bound(|i| i[idx]).subst(tcx, substs); + let candidate = candidate_predicate + .to_opt_poly_trait_pred() + .expect("projection candidate is not a trait predicate") + .map_bound(|t| t.trait_ref); + let mut obligations = Vec::new(); + let candidate = normalize_with_depth_to( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + candidate, + &mut obligations, + ); + + obligations.extend(self.infcx.commit_if_ok(|_| { + self.infcx + .at(&obligation.cause, obligation.param_env) + .sup(placeholder_trait_predicate, candidate) + .map(|InferOk { obligations, .. }| obligations) + .map_err(|_| Unimplemented) + })?); + + if let ty::Projection(..) = placeholder_self_ty.kind() { + let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs).predicates; + debug!(?predicates, "projection predicates"); + for predicate in predicates { + let normalized = normalize_with_depth_to( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + predicate, + &mut obligations, + ); + obligations.push(Obligation::with_depth( + obligation.cause.clone(), + obligation.recursion_depth + 1, + obligation.param_env, + normalized, + )); + } + } + + Ok(obligations) + } + + fn confirm_param_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + param: ty::PolyTraitRef<'tcx>, + ) -> Vec> { + debug!(?obligation, ?param, "confirm_param_candidate"); + + // During evaluation, we already checked that this + // where-clause trait-ref could be unified with the obligation + // trait-ref. Repeat that unification now without any + // transactional boundary; it should not fail. + match self.match_where_clause_trait_ref(obligation, param) { + Ok(obligations) => obligations, + Err(()) => { + bug!( + "Where clause `{:?}` was applicable to `{:?}` but now is not", + param, + obligation + ); + } + } + } + + fn confirm_builtin_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + has_nested: bool, + ) -> ImplSourceBuiltinData> { + debug!(?obligation, ?has_nested, "confirm_builtin_candidate"); + + let lang_items = self.tcx().lang_items(); + let obligations = if has_nested { + let trait_def = obligation.predicate.def_id(); + let conditions = if Some(trait_def) == lang_items.sized_trait() { + self.sized_conditions(obligation) + } else if Some(trait_def) == lang_items.copy_trait() { + self.copy_clone_conditions(obligation) + } else if Some(trait_def) == lang_items.clone_trait() { + self.copy_clone_conditions(obligation) + } else { + bug!("unexpected builtin trait {:?}", trait_def) + }; + let BuiltinImplConditions::Where(nested) = conditions else { + bug!("obligation {:?} had matched a builtin impl but now doesn't", obligation); + }; + + let cause = obligation.derived_cause(BuiltinDerivedObligation); + ensure_sufficient_stack(|| { + self.collect_predicates_for_types( + obligation.param_env, + cause, + obligation.recursion_depth + 1, + trait_def, + nested, + ) + }) + } else { + vec![] + }; + + debug!(?obligations); + + ImplSourceBuiltinData { nested: obligations } + } + + fn confirm_transmutability_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> Result>, SelectionError<'tcx>> { + debug!(?obligation, "confirm_transmutability_candidate"); + + let predicate = obligation.predicate; + + let type_at = |i| predicate.map_bound(|p| p.trait_ref.substs.type_at(i)); + let bool_at = |i| { + predicate + .skip_binder() + .trait_ref + .substs + .const_at(i) + .try_eval_bool(self.tcx(), obligation.param_env) + .unwrap_or(true) + }; + + let src_and_dst = predicate.map_bound(|p| rustc_transmute::Types { + src: p.trait_ref.substs.type_at(1), + dst: p.trait_ref.substs.type_at(0), + }); + + let scope = type_at(2).skip_binder(); + + let assume = rustc_transmute::Assume { + alignment: bool_at(3), + lifetimes: bool_at(4), + validity: bool_at(5), + visibility: bool_at(6), + }; + + let cause = obligation.cause.clone(); + + let mut transmute_env = rustc_transmute::TransmuteTypeEnv::new(self.infcx); + + let maybe_transmutable = transmute_env.is_transmutable(cause, src_and_dst, scope, assume); + + use rustc_transmute::Answer; + + match maybe_transmutable { + Answer::Yes => Ok(ImplSourceBuiltinData { nested: vec![] }), + _ => Err(Unimplemented), + } + } + + /// This handles the case where an `auto trait Foo` impl is being used. + /// The idea is that the impl applies to `X : Foo` if the following conditions are met: + /// + /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds + /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. + fn confirm_auto_impl_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + trait_def_id: DefId, + ) -> ImplSourceAutoImplData> { + debug!(?obligation, ?trait_def_id, "confirm_auto_impl_candidate"); + + let self_ty = self.infcx.shallow_resolve(obligation.predicate.self_ty()); + let types = self.constituent_types_for_ty(self_ty); + self.vtable_auto_impl(obligation, trait_def_id, types) + } + + /// See `confirm_auto_impl_candidate`. + fn vtable_auto_impl( + &mut self, + obligation: &TraitObligation<'tcx>, + trait_def_id: DefId, + nested: ty::Binder<'tcx, Vec>>, + ) -> ImplSourceAutoImplData> { + debug!(?nested, "vtable_auto_impl"); + ensure_sufficient_stack(|| { + let cause = obligation.derived_cause(BuiltinDerivedObligation); + + let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); + let trait_ref = self.infcx.replace_bound_vars_with_placeholders(poly_trait_ref); + let trait_obligations: Vec> = self.impl_or_trait_obligations( + &cause, + obligation.recursion_depth + 1, + obligation.param_env, + trait_def_id, + &trait_ref.substs, + obligation.predicate, + ); + + let mut obligations = self.collect_predicates_for_types( + obligation.param_env, + cause, + obligation.recursion_depth + 1, + trait_def_id, + nested, + ); + + // Adds the predicates from the trait. Note that this contains a `Self: Trait` + // predicate as usual. It won't have any effect since auto traits are coinductive. + obligations.extend(trait_obligations); + + debug!(?obligations, "vtable_auto_impl"); + + ImplSourceAutoImplData { trait_def_id, nested: obligations } + }) + } + + fn confirm_impl_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + impl_def_id: DefId, + ) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> { + debug!(?obligation, ?impl_def_id, "confirm_impl_candidate"); + + // First, create the substitutions by matching the impl again, + // this time not in a probe. + let substs = self.rematch_impl(impl_def_id, obligation); + debug!(?substs, "impl substs"); + ensure_sufficient_stack(|| { + self.vtable_impl( + impl_def_id, + substs, + &obligation.cause, + obligation.recursion_depth + 1, + obligation.param_env, + obligation.predicate, + ) + }) + } + + fn vtable_impl( + &mut self, + impl_def_id: DefId, + substs: Normalized<'tcx, SubstsRef<'tcx>>, + cause: &ObligationCause<'tcx>, + recursion_depth: usize, + param_env: ty::ParamEnv<'tcx>, + parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>, + ) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> { + debug!(?impl_def_id, ?substs, ?recursion_depth, "vtable_impl"); + + let mut impl_obligations = self.impl_or_trait_obligations( + cause, + recursion_depth, + param_env, + impl_def_id, + &substs.value, + parent_trait_pred, + ); + + debug!(?impl_obligations, "vtable_impl"); + + // Because of RFC447, the impl-trait-ref and obligations + // are sufficient to determine the impl substs, without + // relying on projections in the impl-trait-ref. + // + // e.g., `impl> Foo<::T> for V` + impl_obligations.extend(substs.obligations); + + ImplSourceUserDefinedData { impl_def_id, substs: substs.value, nested: impl_obligations } + } + + fn confirm_object_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + index: usize, + ) -> Result>, SelectionError<'tcx>> { + let tcx = self.tcx(); + debug!(?obligation, ?index, "confirm_object_candidate"); + + let trait_predicate = self.infcx.replace_bound_vars_with_placeholders(obligation.predicate); + let self_ty = self.infcx.shallow_resolve(trait_predicate.self_ty()); + let obligation_trait_ref = ty::Binder::dummy(trait_predicate.trait_ref); + let ty::Dynamic(data, ..) = *self_ty.kind() else { + span_bug!(obligation.cause.span, "object candidate with non-object"); + }; + + let object_trait_ref = data.principal().unwrap_or_else(|| { + span_bug!(obligation.cause.span, "object candidate with no principal") + }); + let object_trait_ref = self.infcx.replace_bound_vars_with_fresh_vars( + obligation.cause.span, + HigherRankedType, + object_trait_ref, + ); + let object_trait_ref = object_trait_ref.with_self_ty(self.tcx(), self_ty); + + let mut nested = vec![]; + + let mut supertraits = util::supertraits(tcx, ty::Binder::dummy(object_trait_ref)); + let unnormalized_upcast_trait_ref = + supertraits.nth(index).expect("supertraits iterator no longer has as many elements"); + + let upcast_trait_ref = normalize_with_depth_to( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + unnormalized_upcast_trait_ref, + &mut nested, + ); + + nested.extend(self.infcx.commit_if_ok(|_| { + self.infcx + .at(&obligation.cause, obligation.param_env) + .sup(obligation_trait_ref, upcast_trait_ref) + .map(|InferOk { obligations, .. }| obligations) + .map_err(|_| Unimplemented) + })?); + + // Check supertraits hold. This is so that their associated type bounds + // will be checked in the code below. + for super_trait in tcx + .super_predicates_of(trait_predicate.def_id()) + .instantiate(tcx, trait_predicate.trait_ref.substs) + .predicates + .into_iter() + { + let normalized_super_trait = normalize_with_depth_to( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + super_trait, + &mut nested, + ); + nested.push(Obligation::new( + obligation.cause.clone(), + obligation.param_env, + normalized_super_trait, + )); + } + + let assoc_types: Vec<_> = tcx + .associated_items(trait_predicate.def_id()) + .in_definition_order() + .filter_map( + |item| if item.kind == ty::AssocKind::Type { Some(item.def_id) } else { None }, + ) + .collect(); + + for assoc_type in assoc_types { + let defs: &ty::Generics = tcx.generics_of(assoc_type); + + if !defs.params.is_empty() && !tcx.features().generic_associated_types_extended { + tcx.sess.delay_span_bug( + obligation.cause.span, + "GATs in trait object shouldn't have been considered", + ); + return Err(SelectionError::Unimplemented); + } + + // This maybe belongs in wf, but that can't (doesn't) handle + // higher-ranked things. + // Prevent, e.g., `dyn Iterator`. + for bound in self.tcx().bound_item_bounds(assoc_type).transpose_iter() { + let subst_bound = + if defs.count() == 0 { + bound.subst(tcx, trait_predicate.trait_ref.substs) + } else { + let mut substs = smallvec::SmallVec::with_capacity(defs.count()); + substs.extend(trait_predicate.trait_ref.substs.iter()); + let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> = + smallvec::SmallVec::with_capacity( + bound.0.kind().bound_vars().len() + defs.count(), + ); + bound_vars.extend(bound.0.kind().bound_vars().into_iter()); + InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param + .kind + { + GenericParamDefKind::Type { .. } => { + let kind = ty::BoundTyKind::Param(param.name); + let bound_var = ty::BoundVariableKind::Ty(kind); + bound_vars.push(bound_var); + tcx.mk_ty(ty::Bound( + ty::INNERMOST, + ty::BoundTy { + var: ty::BoundVar::from_usize(bound_vars.len() - 1), + kind, + }, + )) + .into() + } + GenericParamDefKind::Lifetime => { + let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name); + let bound_var = ty::BoundVariableKind::Region(kind); + bound_vars.push(bound_var); + tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { + var: ty::BoundVar::from_usize(bound_vars.len() - 1), + kind, + }, + )) + .into() + } + GenericParamDefKind::Const { .. } => { + let bound_var = ty::BoundVariableKind::Const; + bound_vars.push(bound_var); + tcx.mk_const(ty::ConstS { + ty: tcx.type_of(param.def_id), + kind: ty::ConstKind::Bound( + ty::INNERMOST, + ty::BoundVar::from_usize(bound_vars.len() - 1), + ), + }) + .into() + } + }); + let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter()); + let assoc_ty_substs = tcx.intern_substs(&substs); + + let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter()); + let bound = + bound.map_bound(|b| b.kind().skip_binder()).subst(tcx, assoc_ty_substs); + tcx.mk_predicate(ty::Binder::bind_with_vars(bound, bound_vars)) + }; + let normalized_bound = normalize_with_depth_to( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + subst_bound, + &mut nested, + ); + nested.push(Obligation::new( + obligation.cause.clone(), + obligation.param_env, + normalized_bound, + )); + } + } + + debug!(?nested, "object nested obligations"); + + let vtable_base = super::super::vtable_trait_first_method_offset( + tcx, + (unnormalized_upcast_trait_ref, ty::Binder::dummy(object_trait_ref)), + ); + + Ok(ImplSourceObjectData { upcast_trait_ref, vtable_base, nested }) + } + + fn confirm_fn_pointer_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> Result>, SelectionError<'tcx>> + { + debug!(?obligation, "confirm_fn_pointer_candidate"); + + // Okay to skip binder; it is reintroduced below. + let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); + let sig = self_ty.fn_sig(self.tcx()); + let trait_ref = closure_trait_ref_and_return_type( + self.tcx(), + obligation.predicate.def_id(), + self_ty, + sig, + util::TupleArgumentsFlag::Yes, + ) + .map_bound(|(trait_ref, _)| trait_ref); + + let nested = self.confirm_poly_trait_refs(obligation, trait_ref)?; + Ok(ImplSourceFnPointerData { fn_ty: self_ty, nested }) + } + + fn confirm_trait_alias_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + alias_def_id: DefId, + ) -> ImplSourceTraitAliasData<'tcx, PredicateObligation<'tcx>> { + debug!(?obligation, ?alias_def_id, "confirm_trait_alias_candidate"); + + let predicate = self.infcx().replace_bound_vars_with_placeholders(obligation.predicate); + let trait_ref = predicate.trait_ref; + let trait_def_id = trait_ref.def_id; + let substs = trait_ref.substs; + + let trait_obligations = self.impl_or_trait_obligations( + &obligation.cause, + obligation.recursion_depth, + obligation.param_env, + trait_def_id, + &substs, + obligation.predicate, + ); + + debug!(?trait_def_id, ?trait_obligations, "trait alias obligations"); + + ImplSourceTraitAliasData { alias_def_id, substs, nested: trait_obligations } + } + + fn confirm_generator_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> Result>, SelectionError<'tcx>> + { + // Okay to skip binder because the substs on generator types never + // touch bound regions, they just capture the in-scope + // type/region parameters. + let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); + let ty::Generator(generator_def_id, substs, _) = *self_ty.kind() else { + bug!("closure candidate for non-closure {:?}", obligation); + }; + + debug!(?obligation, ?generator_def_id, ?substs, "confirm_generator_candidate"); + + let trait_ref = self.generator_trait_ref_unnormalized(obligation, substs); + + let nested = self.confirm_poly_trait_refs(obligation, trait_ref)?; + debug!(?trait_ref, ?nested, "generator candidate obligations"); + + Ok(ImplSourceGeneratorData { generator_def_id, substs, nested }) + } + + #[instrument(skip(self), level = "debug")] + fn confirm_closure_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> Result>, SelectionError<'tcx>> { + let kind = self + .tcx() + .fn_trait_kind_from_lang_item(obligation.predicate.def_id()) + .unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation)); + + // Okay to skip binder because the substs on closure types never + // touch bound regions, they just capture the in-scope + // type/region parameters. + let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); + let ty::Closure(closure_def_id, substs) = *self_ty.kind() else { + bug!("closure candidate for non-closure {:?}", obligation); + }; + + let trait_ref = self.closure_trait_ref_unnormalized(obligation, substs); + let mut nested = self.confirm_poly_trait_refs(obligation, trait_ref)?; + + debug!(?closure_def_id, ?trait_ref, ?nested, "confirm closure candidate obligations"); + + // FIXME: Chalk + + if !self.tcx().sess.opts.unstable_opts.chalk { + nested.push(Obligation::new( + obligation.cause.clone(), + obligation.param_env, + ty::Binder::dummy(ty::PredicateKind::ClosureKind(closure_def_id, substs, kind)) + .to_predicate(self.tcx()), + )); + } + + Ok(ImplSourceClosureData { closure_def_id, substs, nested }) + } + + /// In the case of closure types and fn pointers, + /// we currently treat the input type parameters on the trait as + /// outputs. This means that when we have a match we have only + /// considered the self type, so we have to go back and make sure + /// to relate the argument types too. This is kind of wrong, but + /// since we control the full set of impls, also not that wrong, + /// and it DOES yield better error messages (since we don't report + /// errors as if there is no applicable impl, but rather report + /// errors are about mismatched argument types. + /// + /// Here is an example. Imagine we have a closure expression + /// and we desugared it so that the type of the expression is + /// `Closure`, and `Closure` expects `i32` as argument. Then it + /// is "as if" the compiler generated this impl: + /// ```ignore (illustrative) + /// impl Fn(i32) for Closure { ... } + /// ``` + /// Now imagine our obligation is `Closure: Fn(usize)`. So far + /// we have matched the self type `Closure`. At this point we'll + /// compare the `i32` to `usize` and generate an error. + /// + /// Note that this checking occurs *after* the impl has selected, + /// because these output type parameters should not affect the + /// selection of the impl. Therefore, if there is a mismatch, we + /// report an error to the user. + #[instrument(skip(self), level = "trace")] + fn confirm_poly_trait_refs( + &mut self, + obligation: &TraitObligation<'tcx>, + expected_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Result>, SelectionError<'tcx>> { + let obligation_trait_ref = obligation.predicate.to_poly_trait_ref(); + // Normalize the obligation and expected trait refs together, because why not + let Normalized { obligations: nested, value: (obligation_trait_ref, expected_trait_ref) } = + ensure_sufficient_stack(|| { + normalize_with_depth( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + (obligation_trait_ref, expected_trait_ref), + ) + }); + + self.infcx + .at(&obligation.cause, obligation.param_env) + .sup(obligation_trait_ref, expected_trait_ref) + .map(|InferOk { mut obligations, .. }| { + obligations.extend(nested); + obligations + }) + .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) + } + + fn confirm_trait_upcasting_unsize_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + idx: usize, + ) -> Result>, SelectionError<'tcx>> + { + let tcx = self.tcx(); + + // `assemble_candidates_for_unsizing` should ensure there are no late-bound + // regions here. See the comment there for more details. + let source = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap()); + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); + let target = self.infcx.shallow_resolve(target); + + debug!(?source, ?target, "confirm_trait_upcasting_unsize_candidate"); + + let mut nested = vec![]; + let source_trait_ref; + let upcast_trait_ref; + match (source.kind(), target.kind()) { + // TraitA+Kx+'a -> TraitB+Ky+'b (trait upcasting coercion). + (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => { + // See `assemble_candidates_for_unsizing` for more info. + // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`. + let principal_a = data_a.principal().unwrap(); + source_trait_ref = principal_a.with_self_ty(tcx, source); + upcast_trait_ref = util::supertraits(tcx, source_trait_ref).nth(idx).unwrap(); + assert_eq!(data_b.principal_def_id(), Some(upcast_trait_ref.def_id())); + let existential_predicate = upcast_trait_ref.map_bound(|trait_ref| { + ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef::erase_self_ty( + tcx, trait_ref, + )) + }); + let iter = Some(existential_predicate) + .into_iter() + .chain( + data_a + .projection_bounds() + .map(|b| b.map_bound(ty::ExistentialPredicate::Projection)), + ) + .chain( + data_b + .auto_traits() + .map(ty::ExistentialPredicate::AutoTrait) + .map(ty::Binder::dummy), + ); + let existential_predicates = tcx.mk_poly_existential_predicates(iter); + let source_trait = tcx.mk_dynamic(existential_predicates, r_b); + + // Require that the traits involved in this upcast are **equal**; + // only the **lifetime bound** is changed. + let InferOk { obligations, .. } = self + .infcx + .at(&obligation.cause, obligation.param_env) + .sup(target, source_trait) + .map_err(|_| Unimplemented)?; + nested.extend(obligations); + + // Register one obligation for 'a: 'b. + let cause = ObligationCause::new( + obligation.cause.span, + obligation.cause.body_id, + ObjectCastObligation(source, target), + ); + let outlives = ty::OutlivesPredicate(r_a, r_b); + nested.push(Obligation::with_depth( + cause, + obligation.recursion_depth + 1, + obligation.param_env, + obligation.predicate.rebind(outlives).to_predicate(tcx), + )); + } + _ => bug!(), + }; + + let vtable_segment_callback = { + let mut vptr_offset = 0; + move |segment| { + match segment { + VtblSegment::MetadataDSA => { + vptr_offset += TyCtxt::COMMON_VTABLE_ENTRIES.len(); + } + VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => { + vptr_offset += util::count_own_vtable_entries(tcx, trait_ref); + if trait_ref == upcast_trait_ref { + if emit_vptr { + return ControlFlow::Break(Some(vptr_offset)); + } else { + return ControlFlow::Break(None); + } + } + + if emit_vptr { + vptr_offset += 1; + } + } + } + ControlFlow::Continue(()) + } + }; + + let vtable_vptr_slot = + super::super::prepare_vtable_segments(tcx, source_trait_ref, vtable_segment_callback) + .unwrap(); + + Ok(ImplSourceTraitUpcastingData { upcast_trait_ref, vtable_vptr_slot, nested }) + } + + fn confirm_builtin_unsize_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> Result>, SelectionError<'tcx>> { + let tcx = self.tcx(); + + // `assemble_candidates_for_unsizing` should ensure there are no late-bound + // regions here. See the comment there for more details. + let source = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap()); + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); + let target = self.infcx.shallow_resolve(target); + + debug!(?source, ?target, "confirm_builtin_unsize_candidate"); + + let mut nested = vec![]; + match (source.kind(), target.kind()) { + // Trait+Kx+'a -> Trait+Ky+'b (auto traits and lifetime subtyping). + (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => { + // See `assemble_candidates_for_unsizing` for more info. + // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`. + let iter = data_a + .principal() + .map(|b| b.map_bound(ty::ExistentialPredicate::Trait)) + .into_iter() + .chain( + data_a + .projection_bounds() + .map(|b| b.map_bound(ty::ExistentialPredicate::Projection)), + ) + .chain( + data_b + .auto_traits() + .map(ty::ExistentialPredicate::AutoTrait) + .map(ty::Binder::dummy), + ); + let existential_predicates = tcx.mk_poly_existential_predicates(iter); + let source_trait = tcx.mk_dynamic(existential_predicates, r_b); + + // Require that the traits involved in this upcast are **equal**; + // only the **lifetime bound** is changed. + let InferOk { obligations, .. } = self + .infcx + .at(&obligation.cause, obligation.param_env) + .sup(target, source_trait) + .map_err(|_| Unimplemented)?; + nested.extend(obligations); + + // Register one obligation for 'a: 'b. + let cause = ObligationCause::new( + obligation.cause.span, + obligation.cause.body_id, + ObjectCastObligation(source, target), + ); + let outlives = ty::OutlivesPredicate(r_a, r_b); + nested.push(Obligation::with_depth( + cause, + obligation.recursion_depth + 1, + obligation.param_env, + obligation.predicate.rebind(outlives).to_predicate(tcx), + )); + } + + // `T` -> `Trait` + (_, &ty::Dynamic(ref data, r)) => { + let mut object_dids = data.auto_traits().chain(data.principal_def_id()); + if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) { + return Err(TraitNotObjectSafe(did)); + } + + let cause = ObligationCause::new( + obligation.cause.span, + obligation.cause.body_id, + ObjectCastObligation(source, target), + ); + + let predicate_to_obligation = |predicate| { + Obligation::with_depth( + cause.clone(), + obligation.recursion_depth + 1, + obligation.param_env, + predicate, + ) + }; + + // Create obligations: + // - Casting `T` to `Trait` + // - For all the various builtin bounds attached to the object cast. (In other + // words, if the object type is `Foo + Send`, this would create an obligation for + // the `Send` check.) + // - Projection predicates + nested.extend( + data.iter().map(|predicate| { + predicate_to_obligation(predicate.with_self_ty(tcx, source)) + }), + ); + + // We can only make objects from sized types. + let tr = ty::Binder::dummy(ty::TraitRef::new( + tcx.require_lang_item(LangItem::Sized, None), + tcx.mk_substs_trait(source, &[]), + )); + nested.push(predicate_to_obligation(tr.without_const().to_predicate(tcx))); + + // If the type is `Foo + 'a`, ensure that the type + // being cast to `Foo + 'a` outlives `'a`: + let outlives = ty::OutlivesPredicate(source, r); + nested.push(predicate_to_obligation(ty::Binder::dummy(outlives).to_predicate(tcx))); + } + + // `[T; n]` -> `[T]` + (&ty::Array(a, _), &ty::Slice(b)) => { + let InferOk { obligations, .. } = self + .infcx + .at(&obligation.cause, obligation.param_env) + .eq(b, a) + .map_err(|_| Unimplemented)?; + nested.extend(obligations); + } + + // `Struct` -> `Struct` + (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => { + let maybe_unsizing_param_idx = |arg: GenericArg<'tcx>| match arg.unpack() { + GenericArgKind::Type(ty) => match ty.kind() { + ty::Param(p) => Some(p.index), + _ => None, + }, + + // Lifetimes aren't allowed to change during unsizing. + GenericArgKind::Lifetime(_) => None, + + GenericArgKind::Const(ct) => match ct.kind() { + ty::ConstKind::Param(p) => Some(p.index), + _ => None, + }, + }; + + // FIXME(eddyb) cache this (including computing `unsizing_params`) + // by putting it in a query; it would only need the `DefId` as it + // looks at declared field types, not anything substituted. + + // The last field of the structure has to exist and contain type/const parameters. + let (tail_field, prefix_fields) = + def.non_enum_variant().fields.split_last().ok_or(Unimplemented)?; + let tail_field_ty = tcx.bound_type_of(tail_field.did); + + let mut unsizing_params = GrowableBitSet::new_empty(); + for arg in tail_field_ty.0.walk() { + if let Some(i) = maybe_unsizing_param_idx(arg) { + unsizing_params.insert(i); + } + } + + // Ensure none of the other fields mention the parameters used + // in unsizing. + for field in prefix_fields { + for arg in tcx.type_of(field.did).walk() { + if let Some(i) = maybe_unsizing_param_idx(arg) { + unsizing_params.remove(i); + } + } + } + + if unsizing_params.is_empty() { + return Err(Unimplemented); + } + + // Extract `TailField` and `TailField` from `Struct` and `Struct`. + let source_tail = tail_field_ty.subst(tcx, substs_a); + let target_tail = tail_field_ty.subst(tcx, substs_b); + + // Check that the source struct with the target's + // unsizing parameters is equal to the target. + let substs = tcx.mk_substs(substs_a.iter().enumerate().map(|(i, k)| { + if unsizing_params.contains(i as u32) { substs_b[i] } else { k } + })); + let new_struct = tcx.mk_adt(def, substs); + let InferOk { obligations, .. } = self + .infcx + .at(&obligation.cause, obligation.param_env) + .eq(target, new_struct) + .map_err(|_| Unimplemented)?; + nested.extend(obligations); + + // Construct the nested `TailField: Unsize>` predicate. + nested.push(predicate_for_trait_def( + tcx, + obligation.param_env, + obligation.cause.clone(), + obligation.predicate.def_id(), + obligation.recursion_depth + 1, + source_tail, + &[target_tail.into()], + )); + } + + // `(.., T)` -> `(.., U)` + (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { + assert_eq!(tys_a.len(), tys_b.len()); + + // The last field of the tuple has to exist. + let (&a_last, a_mid) = tys_a.split_last().ok_or(Unimplemented)?; + let &b_last = tys_b.last().unwrap(); + + // Check that the source tuple with the target's + // last element is equal to the target. + let new_tuple = tcx.mk_tup(a_mid.iter().copied().chain(iter::once(b_last))); + let InferOk { obligations, .. } = self + .infcx + .at(&obligation.cause, obligation.param_env) + .eq(target, new_tuple) + .map_err(|_| Unimplemented)?; + nested.extend(obligations); + + // Construct the nested `T: Unsize` predicate. + nested.push(ensure_sufficient_stack(|| { + predicate_for_trait_def( + tcx, + obligation.param_env, + obligation.cause.clone(), + obligation.predicate.def_id(), + obligation.recursion_depth + 1, + a_last, + &[b_last.into()], + ) + })); + } + + _ => bug!(), + }; + + Ok(ImplSourceBuiltinData { nested }) + } + + fn confirm_const_destruct_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + impl_def_id: Option, + ) -> Result>, SelectionError<'tcx>> { + // `~const Destruct` in a non-const environment is always trivially true, since our type is `Drop` + if !obligation.is_const() { + return Ok(ImplSourceConstDestructData { nested: vec![] }); + } + + let drop_trait = self.tcx().require_lang_item(LangItem::Drop, None); + + let tcx = self.tcx(); + let self_ty = self.infcx.shallow_resolve(obligation.self_ty()); + + let mut nested = vec![]; + let cause = obligation.derived_cause(BuiltinDerivedObligation); + + // If we have a custom `impl const Drop`, then + // first check it like a regular impl candidate. + // This is copied from confirm_impl_candidate but remaps the predicate to `~const Drop` beforehand. + if let Some(impl_def_id) = impl_def_id { + let mut new_obligation = obligation.clone(); + new_obligation.predicate = new_obligation.predicate.map_bound(|mut trait_pred| { + trait_pred.trait_ref.def_id = drop_trait; + trait_pred + }); + let substs = self.rematch_impl(impl_def_id, &new_obligation); + debug!(?substs, "impl substs"); + + let cause = obligation.derived_cause(|derived| { + ImplDerivedObligation(Box::new(ImplDerivedObligationCause { + derived, + impl_def_id, + span: obligation.cause.span, + })) + }); + let obligations = ensure_sufficient_stack(|| { + self.vtable_impl( + impl_def_id, + substs, + &cause, + new_obligation.recursion_depth + 1, + new_obligation.param_env, + obligation.predicate, + ) + }); + nested.extend(obligations.nested); + } + + // We want to confirm the ADT's fields if we have an ADT + let mut stack = match *self_ty.skip_binder().kind() { + ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(tcx, substs)).collect(), + _ => vec![self_ty.skip_binder()], + }; + + while let Some(nested_ty) = stack.pop() { + match *nested_ty.kind() { + // We know these types are trivially drop + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Infer(ty::IntVar(_)) + | ty::Infer(ty::FloatVar(_)) + | ty::Str + | ty::RawPtr(_) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Never + | ty::Foreign(_) => {} + + // These types are built-in, so we can fast-track by registering + // nested predicates for their constituent type(s) + ty::Array(ty, _) | ty::Slice(ty) => { + stack.push(ty); + } + ty::Tuple(tys) => { + stack.extend(tys.iter()); + } + ty::Closure(_, substs) => { + stack.push(substs.as_closure().tupled_upvars_ty()); + } + ty::Generator(_, substs, _) => { + let generator = substs.as_generator(); + stack.extend([generator.tupled_upvars_ty(), generator.witness()]); + } + ty::GeneratorWitness(tys) => { + stack.extend(tcx.erase_late_bound_regions(tys).to_vec()); + } + + // If we have a projection type, make sure to normalize it so we replace it + // with a fresh infer variable + ty::Projection(..) => { + let predicate = normalize_with_depth_to( + self, + obligation.param_env, + cause.clone(), + obligation.recursion_depth + 1, + self_ty + .rebind(ty::TraitPredicate { + trait_ref: ty::TraitRef { + def_id: self.tcx().require_lang_item(LangItem::Destruct, None), + substs: self.tcx().mk_substs_trait(nested_ty, &[]), + }, + constness: ty::BoundConstness::ConstIfConst, + polarity: ty::ImplPolarity::Positive, + }) + .to_predicate(tcx), + &mut nested, + ); + + nested.push(Obligation::with_depth( + cause.clone(), + obligation.recursion_depth + 1, + obligation.param_env, + predicate, + )); + } + + // If we have any other type (e.g. an ADT), just register a nested obligation + // since it's either not `const Drop` (and we raise an error during selection), + // or it's an ADT (and we need to check for a custom impl during selection) + _ => { + let predicate = self_ty + .rebind(ty::TraitPredicate { + trait_ref: ty::TraitRef { + def_id: self.tcx().require_lang_item(LangItem::Destruct, None), + substs: self.tcx().mk_substs_trait(nested_ty, &[]), + }, + constness: ty::BoundConstness::ConstIfConst, + polarity: ty::ImplPolarity::Positive, + }) + .to_predicate(tcx); + + nested.push(Obligation::with_depth( + cause.clone(), + obligation.recursion_depth + 1, + obligation.param_env, + predicate, + )); + } + } + } + + Ok(ImplSourceConstDestructData { nested }) + } +} diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs new file mode 100644 index 000000000..c01ac1979 --- /dev/null +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -0,0 +1,2698 @@ +//! Candidate selection. See the [rustc dev guide] for more information on how this works. +//! +//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection + +use self::EvaluationResult::*; +use self::SelectionCandidate::*; + +use super::coherence::{self, Conflict}; +use super::const_evaluatable; +use super::project; +use super::project::normalize_with_depth_to; +use super::project::ProjectionTyObligation; +use super::util; +use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def}; +use super::wf; +use super::{ + ErrorReporting, ImplDerivedObligation, ImplDerivedObligationCause, Normalized, Obligation, + ObligationCause, ObligationCauseCode, Overflow, PredicateObligation, Selection, SelectionError, + SelectionResult, TraitObligation, TraitQueryMode, +}; + +use crate::infer::{InferCtxt, InferOk, TypeFreshener}; +use crate::traits::error_reporting::InferCtxtExt; +use crate::traits::project::ProjectAndUnifyResult; +use crate::traits::project::ProjectionCacheKeyExt; +use crate::traits::ProjectionCacheKey; +use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet}; +use rustc_data_structures::stack::ensure_sufficient_stack; +use rustc_errors::{Diagnostic, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_infer::infer::LateBoundRegionConversionTime; +use rustc_middle::dep_graph::{DepKind, DepNodeIndex}; +use rustc_middle::mir::interpret::ErrorHandled; +use rustc_middle::ty::abstract_const::NotConstEvaluatable; +use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams}; +use rustc_middle::ty::fold::BottomUpFolder; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::relate::TypeRelation; +use rustc_middle::ty::subst::{Subst, SubstsRef}; +use rustc_middle::ty::{self, EarlyBinder, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate}; +use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, TypeVisitable}; +use rustc_span::symbol::sym; + +use std::cell::{Cell, RefCell}; +use std::cmp; +use std::fmt::{self, Display}; +use std::iter; + +pub use rustc_middle::traits::select::*; + +mod candidate_assembly; +mod confirmation; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum IntercrateAmbiguityCause { + DownstreamCrate { trait_desc: String, self_desc: Option }, + UpstreamCrateUpdate { trait_desc: String, self_desc: Option }, + ReservationImpl { message: String }, +} + +impl IntercrateAmbiguityCause { + /// Emits notes when the overlap is caused by complex intercrate ambiguities. + /// See #23980 for details. + pub fn add_intercrate_ambiguity_hint(&self, err: &mut Diagnostic) { + err.note(&self.intercrate_ambiguity_hint()); + } + + pub fn intercrate_ambiguity_hint(&self) -> String { + match self { + IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } => { + let self_desc = if let Some(ty) = self_desc { + format!(" for type `{}`", ty) + } else { + String::new() + }; + format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc) + } + IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } => { + let self_desc = if let Some(ty) = self_desc { + format!(" for type `{}`", ty) + } else { + String::new() + }; + format!( + "upstream crates may add a new impl of trait `{}`{} \ + in future versions", + trait_desc, self_desc + ) + } + IntercrateAmbiguityCause::ReservationImpl { message } => message.clone(), + } + } +} + +pub struct SelectionContext<'cx, 'tcx> { + infcx: &'cx InferCtxt<'cx, 'tcx>, + + /// Freshener used specifically for entries on the obligation + /// stack. This ensures that all entries on the stack at one time + /// will have the same set of placeholder entries, which is + /// important for checking for trait bounds that recursively + /// require themselves. + freshener: TypeFreshener<'cx, 'tcx>, + + /// During coherence we have to assume that other crates may add + /// additional impls which we currently don't know about. + /// + /// To deal with this evaluation should be conservative + /// and consider the possibility of impls from outside this crate. + /// This comes up primarily when resolving ambiguity. Imagine + /// there is some trait reference `$0: Bar` where `$0` is an + /// inference variable. If `intercrate` is true, then we can never + /// say for sure that this reference is not implemented, even if + /// there are *no impls at all for `Bar`*, because `$0` could be + /// bound to some type that in a downstream crate that implements + /// `Bar`. + /// + /// Outside of coherence we set this to false because we are only + /// interested in types that the user could actually have written. + /// In other words, we consider `$0: Bar` to be unimplemented if + /// there is no type that the user could *actually name* that + /// would satisfy it. This avoids crippling inference, basically. + intercrate: bool, + /// If `intercrate` is set, we remember predicates which were + /// considered ambiguous because of impls potentially added in other crates. + /// This is used in coherence to give improved diagnostics. + /// We don't do his until we detect a coherence error because it can + /// lead to false overflow results (#47139) and because always + /// computing it may negatively impact performance. + intercrate_ambiguity_causes: Option>, + + /// The mode that trait queries run in, which informs our error handling + /// policy. In essence, canonicalized queries need their errors propagated + /// rather than immediately reported because we do not have accurate spans. + query_mode: TraitQueryMode, +} + +// A stack that walks back up the stack frame. +struct TraitObligationStack<'prev, 'tcx> { + obligation: &'prev TraitObligation<'tcx>, + + /// The trait predicate from `obligation` but "freshened" with the + /// selection-context's freshener. Used to check for recursion. + fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, + + /// Starts out equal to `depth` -- if, during evaluation, we + /// encounter a cycle, then we will set this flag to the minimum + /// depth of that cycle for all participants in the cycle. These + /// participants will then forego caching their results. This is + /// not the most efficient solution, but it addresses #60010. The + /// problem we are trying to prevent: + /// + /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait` + /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok) + /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok) + /// + /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait` + /// is `EvaluatedToOk`; this is because they were only considered + /// ok on the premise that if `A: AutoTrait` held, but we indeed + /// encountered a problem (later on) with `A: AutoTrait. So we + /// currently set a flag on the stack node for `B: AutoTrait` (as + /// well as the second instance of `A: AutoTrait`) to suppress + /// caching. + /// + /// This is a simple, targeted fix. A more-performant fix requires + /// deeper changes, but would permit more caching: we could + /// basically defer caching until we have fully evaluated the + /// tree, and then cache the entire tree at once. In any case, the + /// performance impact here shouldn't be so horrible: every time + /// this is hit, we do cache at least one trait, so we only + /// evaluate each member of a cycle up to N times, where N is the + /// length of the cycle. This means the performance impact is + /// bounded and we shouldn't have any terrible worst-cases. + reached_depth: Cell, + + previous: TraitObligationStackList<'prev, 'tcx>, + + /// The number of parent frames plus one (thus, the topmost frame has depth 1). + depth: usize, + + /// The depth-first number of this node in the search graph -- a + /// pre-order index. Basically, a freshly incremented counter. + dfn: usize, +} + +struct SelectionCandidateSet<'tcx> { + // A list of candidates that definitely apply to the current + // obligation (meaning: types unify). + vec: Vec>, + + // If `true`, then there were candidates that might or might + // not have applied, but we couldn't tell. This occurs when some + // of the input types are type variables, in which case there are + // various "builtin" rules that might or might not trigger. + ambiguous: bool, +} + +#[derive(PartialEq, Eq, Debug, Clone)] +struct EvaluatedCandidate<'tcx> { + candidate: SelectionCandidate<'tcx>, + evaluation: EvaluationResult, +} + +/// When does the builtin impl for `T: Trait` apply? +#[derive(Debug)] +enum BuiltinImplConditions<'tcx> { + /// The impl is conditional on `T1, T2, ...: Trait`. + Where(ty::Binder<'tcx, Vec>>), + /// There is no built-in impl. There may be some other + /// candidate (a where-clause or user-defined impl). + None, + /// It is unknown whether there is an impl. + Ambiguous, +} + +impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { + pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> { + SelectionContext { + infcx, + freshener: infcx.freshener_keep_static(), + intercrate: false, + intercrate_ambiguity_causes: None, + query_mode: TraitQueryMode::Standard, + } + } + + pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> { + SelectionContext { + infcx, + freshener: infcx.freshener_keep_static(), + intercrate: true, + intercrate_ambiguity_causes: None, + query_mode: TraitQueryMode::Standard, + } + } + + pub fn with_query_mode( + infcx: &'cx InferCtxt<'cx, 'tcx>, + query_mode: TraitQueryMode, + ) -> SelectionContext<'cx, 'tcx> { + debug!(?query_mode, "with_query_mode"); + SelectionContext { + infcx, + freshener: infcx.freshener_keep_static(), + intercrate: false, + intercrate_ambiguity_causes: None, + query_mode, + } + } + + /// Enables tracking of intercrate ambiguity causes. See + /// the documentation of [`Self::intercrate_ambiguity_causes`] for more. + pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) { + assert!(self.intercrate); + assert!(self.intercrate_ambiguity_causes.is_none()); + self.intercrate_ambiguity_causes = Some(FxIndexSet::default()); + debug!("selcx: enable_tracking_intercrate_ambiguity_causes"); + } + + /// Gets the intercrate ambiguity causes collected since tracking + /// was enabled and disables tracking at the same time. If + /// tracking is not enabled, just returns an empty vector. + pub fn take_intercrate_ambiguity_causes(&mut self) -> FxIndexSet { + assert!(self.intercrate); + self.intercrate_ambiguity_causes.take().unwrap_or_default() + } + + pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> { + self.infcx + } + + pub fn tcx(&self) -> TyCtxt<'tcx> { + self.infcx.tcx + } + + pub fn is_intercrate(&self) -> bool { + self.intercrate + } + + /////////////////////////////////////////////////////////////////////////// + // Selection + // + // The selection phase tries to identify *how* an obligation will + // be resolved. For example, it will identify which impl or + // parameter bound is to be used. The process can be inconclusive + // if the self type in the obligation is not fully inferred. Selection + // can result in an error in one of two ways: + // + // 1. If no applicable impl or parameter bound can be found. + // 2. If the output type parameters in the obligation do not match + // those specified by the impl/bound. For example, if the obligation + // is `Vec: Iterable`, but the impl specifies + // `impl Iterable for Vec`, than an error would result. + + /// Attempts to satisfy the obligation. If successful, this will affect the surrounding + /// type environment by performing unification. + #[instrument(level = "debug", skip(self))] + pub fn select( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> SelectionResult<'tcx, Selection<'tcx>> { + let candidate = match self.select_from_obligation(obligation) { + Err(SelectionError::Overflow(OverflowError::Canonical)) => { + // In standard mode, overflow must have been caught and reported + // earlier. + assert!(self.query_mode == TraitQueryMode::Canonical); + return Err(SelectionError::Overflow(OverflowError::Canonical)); + } + Err(SelectionError::Ambiguous(_)) => { + return Ok(None); + } + Err(e) => { + return Err(e); + } + Ok(None) => { + return Ok(None); + } + Ok(Some(candidate)) => candidate, + }; + + match self.confirm_candidate(obligation, candidate) { + Err(SelectionError::Overflow(OverflowError::Canonical)) => { + assert!(self.query_mode == TraitQueryMode::Canonical); + Err(SelectionError::Overflow(OverflowError::Canonical)) + } + Err(e) => Err(e), + Ok(candidate) => { + debug!(?candidate, "confirmed"); + Ok(Some(candidate)) + } + } + } + + pub(crate) fn select_from_obligation( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { + debug_assert!(!obligation.predicate.has_escaping_bound_vars()); + + let pec = &ProvisionalEvaluationCache::default(); + let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation); + + self.candidate_from_obligation(&stack) + } + + /////////////////////////////////////////////////////////////////////////// + // EVALUATION + // + // Tests whether an obligation can be selected or whether an impl + // can be applied to particular types. It skips the "confirmation" + // step and hence completely ignores output type parameters. + // + // The result is "true" if the obligation *may* hold and "false" if + // we can be sure it does not. + + /// Evaluates whether the obligation `obligation` can be satisfied (by any means). + pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { + debug!(?obligation, "predicate_may_hold_fatal"); + + // This fatal query is a stopgap that should only be used in standard mode, + // where we do not expect overflow to be propagated. + assert!(self.query_mode == TraitQueryMode::Standard); + + self.evaluate_root_obligation(obligation) + .expect("Overflow should be caught earlier in standard query mode") + .may_apply() + } + + /// Evaluates whether the obligation `obligation` can be satisfied + /// and returns an `EvaluationResult`. This is meant for the + /// *initial* call. + pub fn evaluate_root_obligation( + &mut self, + obligation: &PredicateObligation<'tcx>, + ) -> Result { + self.evaluation_probe(|this| { + this.evaluate_predicate_recursively( + TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()), + obligation.clone(), + ) + }) + } + + fn evaluation_probe( + &mut self, + op: impl FnOnce(&mut Self) -> Result, + ) -> Result { + self.infcx.probe(|snapshot| -> Result { + let result = op(self)?; + + match self.infcx.leak_check(true, snapshot) { + Ok(()) => {} + Err(_) => return Ok(EvaluatedToErr), + } + + if self.infcx.opaque_types_added_in_snapshot(snapshot) { + return Ok(result.max(EvaluatedToOkModuloOpaqueTypes)); + } + + match self.infcx.region_constraints_added_in_snapshot(snapshot) { + None => Ok(result), + Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)), + } + }) + } + + /// Evaluates the predicates in `predicates` recursively. Note that + /// this applies projections in the predicates, and therefore + /// is run within an inference probe. + #[instrument(skip(self, stack), level = "debug")] + fn evaluate_predicates_recursively<'o, I>( + &mut self, + stack: TraitObligationStackList<'o, 'tcx>, + predicates: I, + ) -> Result + where + I: IntoIterator> + std::fmt::Debug, + { + let mut result = EvaluatedToOk; + for obligation in predicates { + let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?; + if let EvaluatedToErr = eval { + // fast-path - EvaluatedToErr is the top of the lattice, + // so we don't need to look on the other predicates. + return Ok(EvaluatedToErr); + } else { + result = cmp::max(result, eval); + } + } + Ok(result) + } + + #[instrument( + level = "debug", + skip(self, previous_stack), + fields(previous_stack = ?previous_stack.head()) + )] + fn evaluate_predicate_recursively<'o>( + &mut self, + previous_stack: TraitObligationStackList<'o, 'tcx>, + obligation: PredicateObligation<'tcx>, + ) -> Result { + // `previous_stack` stores a `TraitObligation`, while `obligation` is + // a `PredicateObligation`. These are distinct types, so we can't + // use any `Option` combinator method that would force them to be + // the same. + match previous_stack.head() { + Some(h) => self.check_recursion_limit(&obligation, h.obligation)?, + None => self.check_recursion_limit(&obligation, &obligation)?, + } + + let result = ensure_sufficient_stack(|| { + let bound_predicate = obligation.predicate.kind(); + match bound_predicate.skip_binder() { + ty::PredicateKind::Trait(t) => { + let t = bound_predicate.rebind(t); + debug_assert!(!t.has_escaping_bound_vars()); + let obligation = obligation.with(t); + self.evaluate_trait_predicate_recursively(previous_stack, obligation) + } + + ty::PredicateKind::Subtype(p) => { + let p = bound_predicate.rebind(p); + // Does this code ever run? + match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) { + Some(Ok(InferOk { mut obligations, .. })) => { + self.add_depth(obligations.iter_mut(), obligation.recursion_depth); + self.evaluate_predicates_recursively( + previous_stack, + obligations.into_iter(), + ) + } + Some(Err(_)) => Ok(EvaluatedToErr), + None => Ok(EvaluatedToAmbig), + } + } + + ty::PredicateKind::Coerce(p) => { + let p = bound_predicate.rebind(p); + // Does this code ever run? + match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) { + Some(Ok(InferOk { mut obligations, .. })) => { + self.add_depth(obligations.iter_mut(), obligation.recursion_depth); + self.evaluate_predicates_recursively( + previous_stack, + obligations.into_iter(), + ) + } + Some(Err(_)) => Ok(EvaluatedToErr), + None => Ok(EvaluatedToAmbig), + } + } + + ty::PredicateKind::WellFormed(arg) => { + // So, there is a bit going on here. First, `WellFormed` predicates + // are coinductive, like trait predicates with auto traits. + // This means that we need to detect if we have recursively + // evaluated `WellFormed(X)`. Otherwise, we would run into + // a "natural" overflow error. + // + // Now, the next question is whether we need to do anything + // special with caching. Considering the following tree: + // - `WF(Foo)` + // - `Bar: Send` + // - `WF(Foo)` + // - `Foo: Trait` + // In this case, the innermost `WF(Foo)` should return + // `EvaluatedToOk`, since it's coinductive. Then if + // `Bar: Send` is resolved to `EvaluatedToOk`, it can be + // inserted into a cache (because without thinking about `WF` + // goals, it isn't in a cycle). If `Foo: Trait` later doesn't + // hold, then `Bar: Send` shouldn't hold. Therefore, we + // *do* need to keep track of coinductive cycles. + + let cache = previous_stack.cache; + let dfn = cache.next_dfn(); + + for stack_arg in previous_stack.cache.wf_args.borrow().iter().rev() { + if stack_arg.0 != arg { + continue; + } + debug!("WellFormed({:?}) on stack", arg); + if let Some(stack) = previous_stack.head { + // Okay, let's imagine we have two different stacks: + // `T: NonAutoTrait -> WF(T) -> T: NonAutoTrait` + // `WF(T) -> T: NonAutoTrait -> WF(T)` + // Because of this, we need to check that all + // predicates between the WF goals are coinductive. + // Otherwise, we can say that `T: NonAutoTrait` is + // true. + // Let's imagine we have a predicate stack like + // `Foo: Bar -> WF(T) -> T: NonAutoTrait -> T: Auto + // depth ^1 ^2 ^3 + // and the current predicate is `WF(T)`. `wf_args` + // would contain `(T, 1)`. We want to check all + // trait predicates greater than `1`. The previous + // stack would be `T: Auto`. + let cycle = stack.iter().take_while(|s| s.depth > stack_arg.1); + let tcx = self.tcx(); + let cycle = + cycle.map(|stack| stack.obligation.predicate.to_predicate(tcx)); + if self.coinductive_match(cycle) { + stack.update_reached_depth(stack_arg.1); + return Ok(EvaluatedToOk); + } else { + return Ok(EvaluatedToRecur); + } + } + return Ok(EvaluatedToOk); + } + + match wf::obligations( + self.infcx, + obligation.param_env, + obligation.cause.body_id, + obligation.recursion_depth + 1, + arg, + obligation.cause.span, + ) { + Some(mut obligations) => { + self.add_depth(obligations.iter_mut(), obligation.recursion_depth); + + cache.wf_args.borrow_mut().push((arg, previous_stack.depth())); + let result = + self.evaluate_predicates_recursively(previous_stack, obligations); + cache.wf_args.borrow_mut().pop(); + + let result = result?; + + if !result.must_apply_modulo_regions() { + cache.on_failure(dfn); + } + + cache.on_completion(dfn); + + Ok(result) + } + None => Ok(EvaluatedToAmbig), + } + } + + ty::PredicateKind::TypeOutlives(pred) => { + // A global type with no late-bound regions can only + // contain the "'static" lifetime (any other lifetime + // would either be late-bound or local), so it is guaranteed + // to outlive any other lifetime + if pred.0.is_global() && !pred.0.has_late_bound_regions() { + Ok(EvaluatedToOk) + } else { + Ok(EvaluatedToOkModuloRegions) + } + } + + ty::PredicateKind::RegionOutlives(..) => { + // We do not consider region relationships when evaluating trait matches. + Ok(EvaluatedToOkModuloRegions) + } + + ty::PredicateKind::ObjectSafe(trait_def_id) => { + if self.tcx().is_object_safe(trait_def_id) { + Ok(EvaluatedToOk) + } else { + Ok(EvaluatedToErr) + } + } + + ty::PredicateKind::Projection(data) => { + let data = bound_predicate.rebind(data); + let project_obligation = obligation.with(data); + match project::poly_project_and_unify_type(self, &project_obligation) { + ProjectAndUnifyResult::Holds(mut subobligations) => { + 'compute_res: { + // If we've previously marked this projection as 'complete', then + // use the final cached result (either `EvaluatedToOk` or + // `EvaluatedToOkModuloRegions`), and skip re-evaluating the + // sub-obligations. + if let Some(key) = + ProjectionCacheKey::from_poly_projection_predicate(self, data) + { + if let Some(cached_res) = self + .infcx + .inner + .borrow_mut() + .projection_cache() + .is_complete(key) + { + break 'compute_res Ok(cached_res); + } + } + + self.add_depth( + subobligations.iter_mut(), + obligation.recursion_depth, + ); + let res = self.evaluate_predicates_recursively( + previous_stack, + subobligations, + ); + if let Ok(eval_rslt) = res + && (eval_rslt == EvaluatedToOk || eval_rslt == EvaluatedToOkModuloRegions) + && let Some(key) = + ProjectionCacheKey::from_poly_projection_predicate( + self, data, + ) + { + // If the result is something that we can cache, then mark this + // entry as 'complete'. This will allow us to skip evaluating the + // subobligations at all the next time we evaluate the projection + // predicate. + self.infcx + .inner + .borrow_mut() + .projection_cache() + .complete(key, eval_rslt); + } + res + } + } + ProjectAndUnifyResult::FailedNormalization => Ok(EvaluatedToAmbig), + ProjectAndUnifyResult::Recursive => Ok(EvaluatedToRecur), + ProjectAndUnifyResult::MismatchedProjectionTypes(_) => Ok(EvaluatedToErr), + } + } + + ty::PredicateKind::ClosureKind(_, closure_substs, kind) => { + match self.infcx.closure_kind(closure_substs) { + Some(closure_kind) => { + if closure_kind.extends(kind) { + Ok(EvaluatedToOk) + } else { + Ok(EvaluatedToErr) + } + } + None => Ok(EvaluatedToAmbig), + } + } + + ty::PredicateKind::ConstEvaluatable(uv) => { + match const_evaluatable::is_const_evaluatable( + self.infcx, + uv, + obligation.param_env, + obligation.cause.span, + ) { + Ok(()) => Ok(EvaluatedToOk), + Err(NotConstEvaluatable::MentionsInfer) => Ok(EvaluatedToAmbig), + Err(NotConstEvaluatable::MentionsParam) => Ok(EvaluatedToErr), + Err(_) => Ok(EvaluatedToErr), + } + } + + ty::PredicateKind::ConstEquate(c1, c2) => { + debug!(?c1, ?c2, "evaluate_predicate_recursively: equating consts"); + + if self.tcx().features().generic_const_exprs { + // FIXME: we probably should only try to unify abstract constants + // if the constants depend on generic parameters. + // + // Let's just see where this breaks :shrug: + if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) = + (c1.kind(), c2.kind()) + { + if self.infcx.try_unify_abstract_consts( + a.shrink(), + b.shrink(), + obligation.param_env, + ) { + return Ok(EvaluatedToOk); + } + } + } + + let evaluate = |c: ty::Const<'tcx>| { + if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() { + match self.infcx.try_const_eval_resolve( + obligation.param_env, + unevaluated, + c.ty(), + Some(obligation.cause.span), + ) { + Ok(val) => Ok(val), + Err(e) => Err(e), + } + } else { + Ok(c) + } + }; + + match (evaluate(c1), evaluate(c2)) { + (Ok(c1), Ok(c2)) => { + match self + .infcx() + .at(&obligation.cause, obligation.param_env) + .eq(c1, c2) + { + Ok(_) => Ok(EvaluatedToOk), + Err(_) => Ok(EvaluatedToErr), + } + } + (Err(ErrorHandled::Reported(_)), _) + | (_, Err(ErrorHandled::Reported(_))) => Ok(EvaluatedToErr), + (Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => { + span_bug!( + obligation.cause.span(), + "ConstEquate: const_eval_resolve returned an unexpected error" + ) + } + (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => { + if c1.has_infer_types_or_consts() || c2.has_infer_types_or_consts() { + Ok(EvaluatedToAmbig) + } else { + // Two different constants using generic parameters ~> error. + Ok(EvaluatedToErr) + } + } + } + } + ty::PredicateKind::TypeWellFormedFromEnv(..) => { + bug!("TypeWellFormedFromEnv is only used for chalk") + } + } + }); + + debug!("finished: {:?} from {:?}", result, obligation); + + result + } + + #[instrument(skip(self, previous_stack), level = "debug")] + fn evaluate_trait_predicate_recursively<'o>( + &mut self, + previous_stack: TraitObligationStackList<'o, 'tcx>, + mut obligation: TraitObligation<'tcx>, + ) -> Result { + if !self.intercrate + && obligation.is_global() + && obligation.param_env.caller_bounds().iter().all(|bound| bound.needs_subst()) + { + // If a param env has no global bounds, global obligations do not + // depend on its particular value in order to work, so we can clear + // out the param env and get better caching. + debug!("in global"); + obligation.param_env = obligation.param_env.without_caller_bounds(); + } + + let stack = self.push_stack(previous_stack, &obligation); + let mut fresh_trait_pred = stack.fresh_trait_pred; + let mut param_env = obligation.param_env; + + fresh_trait_pred = fresh_trait_pred.map_bound(|mut pred| { + pred.remap_constness(&mut param_env); + pred + }); + + debug!(?fresh_trait_pred); + + // If a trait predicate is in the (local or global) evaluation cache, + // then we know it holds without cycles. + if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) { + debug!(?result, "CACHE HIT"); + return Ok(result); + } + + if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) { + debug!(?result, "PROVISIONAL CACHE HIT"); + stack.update_reached_depth(result.reached_depth); + return Ok(result.result); + } + + // Check if this is a match for something already on the + // stack. If so, we don't want to insert the result into the + // main cache (it is cycle dependent) nor the provisional + // cache (which is meant for things that have completed but + // for a "backedge" -- this result *is* the backedge). + if let Some(cycle_result) = self.check_evaluation_cycle(&stack) { + return Ok(cycle_result); + } + + let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack)); + let result = result?; + + if !result.must_apply_modulo_regions() { + stack.cache().on_failure(stack.dfn); + } + + let reached_depth = stack.reached_depth.get(); + if reached_depth >= stack.depth { + debug!(?result, "CACHE MISS"); + self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result); + stack.cache().on_completion(stack.dfn); + } else { + debug!(?result, "PROVISIONAL"); + debug!( + "caching provisionally because {:?} \ + is a cycle participant (at depth {}, reached depth {})", + fresh_trait_pred, stack.depth, reached_depth, + ); + + stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result); + } + + Ok(result) + } + + /// If there is any previous entry on the stack that precisely + /// matches this obligation, then we can assume that the + /// obligation is satisfied for now (still all other conditions + /// must be met of course). One obvious case this comes up is + /// marker traits like `Send`. Think of a linked list: + /// + /// struct List { data: T, next: Option>> } + /// + /// `Box>` will be `Send` if `T` is `Send` and + /// `Option>>` is `Send`, and in turn + /// `Option>>` is `Send` if `Box>` is + /// `Send`. + /// + /// Note that we do this comparison using the `fresh_trait_ref` + /// fields. Because these have all been freshened using + /// `self.freshener`, we can be sure that (a) this will not + /// affect the inferencer state and (b) that if we see two + /// fresh regions with the same index, they refer to the same + /// unbound type variable. + fn check_evaluation_cycle( + &mut self, + stack: &TraitObligationStack<'_, 'tcx>, + ) -> Option { + if let Some(cycle_depth) = stack + .iter() + .skip(1) // Skip top-most frame. + .find(|prev| { + stack.obligation.param_env == prev.obligation.param_env + && stack.fresh_trait_pred == prev.fresh_trait_pred + }) + .map(|stack| stack.depth) + { + debug!("evaluate_stack --> recursive at depth {}", cycle_depth); + + // If we have a stack like `A B C D E A`, where the top of + // the stack is the final `A`, then this will iterate over + // `A, E, D, C, B` -- i.e., all the participants apart + // from the cycle head. We mark them as participating in a + // cycle. This suppresses caching for those nodes. See + // `in_cycle` field for more details. + stack.update_reached_depth(cycle_depth); + + // Subtle: when checking for a coinductive cycle, we do + // not compare using the "freshened trait refs" (which + // have erased regions) but rather the fully explicit + // trait refs. This is important because it's only a cycle + // if the regions match exactly. + let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth); + let tcx = self.tcx(); + let cycle = cycle.map(|stack| stack.obligation.predicate.to_predicate(tcx)); + if self.coinductive_match(cycle) { + debug!("evaluate_stack --> recursive, coinductive"); + Some(EvaluatedToOk) + } else { + debug!("evaluate_stack --> recursive, inductive"); + Some(EvaluatedToRecur) + } + } else { + None + } + } + + fn evaluate_stack<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + ) -> Result { + // In intercrate mode, whenever any of the generics are unbound, + // there can always be an impl. Even if there are no impls in + // this crate, perhaps the type would be unified with + // something from another crate that does provide an impl. + // + // In intra mode, we must still be conservative. The reason is + // that we want to avoid cycles. Imagine an impl like: + // + // impl Eq for Vec + // + // and a trait reference like `$0 : Eq` where `$0` is an + // unbound variable. When we evaluate this trait-reference, we + // will unify `$0` with `Vec<$1>` (for some fresh variable + // `$1`), on the condition that `$1 : Eq`. We will then wind + // up with many candidates (since that are other `Eq` impls + // that apply) and try to winnow things down. This results in + // a recursive evaluation that `$1 : Eq` -- as you can + // imagine, this is just where we started. To avoid that, we + // check for unbound variables and return an ambiguous (hence possible) + // match if we've seen this trait before. + // + // This suffices to allow chains like `FnMut` implemented in + // terms of `Fn` etc, but we could probably make this more + // precise still. + let unbound_input_types = + stack.fresh_trait_pred.skip_binder().trait_ref.substs.types().any(|ty| ty.is_fresh()); + + if stack.obligation.polarity() != ty::ImplPolarity::Negative { + // This check was an imperfect workaround for a bug in the old + // intercrate mode; it should be removed when that goes away. + if unbound_input_types && self.intercrate { + debug!("evaluate_stack --> unbound argument, intercrate --> ambiguous",); + // Heuristics: show the diagnostics when there are no candidates in crate. + if self.intercrate_ambiguity_causes.is_some() { + debug!("evaluate_stack: intercrate_ambiguity_causes is some"); + if let Ok(candidate_set) = self.assemble_candidates(stack) { + if !candidate_set.ambiguous && candidate_set.vec.is_empty() { + let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; + let self_ty = trait_ref.self_ty(); + let cause = with_no_trimmed_paths!({ + IntercrateAmbiguityCause::DownstreamCrate { + trait_desc: trait_ref.print_only_trait_path().to_string(), + self_desc: if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + }, + } + }); + + debug!(?cause, "evaluate_stack: pushing cause"); + self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause); + } + } + } + return Ok(EvaluatedToAmbig); + } + } + + if unbound_input_types + && stack.iter().skip(1).any(|prev| { + stack.obligation.param_env == prev.obligation.param_env + && self.match_fresh_trait_refs( + stack.fresh_trait_pred, + prev.fresh_trait_pred, + prev.obligation.param_env, + ) + }) + { + debug!("evaluate_stack --> unbound argument, recursive --> giving up",); + return Ok(EvaluatedToUnknown); + } + + match self.candidate_from_obligation(stack) { + Ok(Some(c)) => self.evaluate_candidate(stack, &c), + Err(SelectionError::Ambiguous(_)) => Ok(EvaluatedToAmbig), + Ok(None) => Ok(EvaluatedToAmbig), + Err(Overflow(OverflowError::Canonical)) => Err(OverflowError::Canonical), + Err(ErrorReporting) => Err(OverflowError::ErrorReporting), + Err(..) => Ok(EvaluatedToErr), + } + } + + /// For defaulted traits, we use a co-inductive strategy to solve, so + /// that recursion is ok. This routine returns `true` if the top of the + /// stack (`cycle[0]`): + /// + /// - is a defaulted trait, + /// - it also appears in the backtrace at some position `X`, + /// - all the predicates at positions `X..` between `X` and the top are + /// also defaulted traits. + pub(crate) fn coinductive_match(&mut self, mut cycle: I) -> bool + where + I: Iterator>, + { + cycle.all(|predicate| self.coinductive_predicate(predicate)) + } + + fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool { + let result = match predicate.kind().skip_binder() { + ty::PredicateKind::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()), + ty::PredicateKind::WellFormed(_) => true, + _ => false, + }; + debug!(?predicate, ?result, "coinductive_predicate"); + result + } + + /// Further evaluates `candidate` to decide whether all type parameters match and whether nested + /// obligations are met. Returns whether `candidate` remains viable after this further + /// scrutiny. + #[instrument( + level = "debug", + skip(self, stack), + fields(depth = stack.obligation.recursion_depth) + )] + fn evaluate_candidate<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + candidate: &SelectionCandidate<'tcx>, + ) -> Result { + let mut result = self.evaluation_probe(|this| { + let candidate = (*candidate).clone(); + match this.confirm_candidate(stack.obligation, candidate) { + Ok(selection) => { + debug!(?selection); + this.evaluate_predicates_recursively( + stack.list(), + selection.nested_obligations().into_iter(), + ) + } + Err(..) => Ok(EvaluatedToErr), + } + })?; + + // If we erased any lifetimes, then we want to use + // `EvaluatedToOkModuloRegions` instead of `EvaluatedToOk` + // as your final result. The result will be cached using + // the freshened trait predicate as a key, so we need + // our result to be correct by *any* choice of original lifetimes, + // not just the lifetime choice for this particular (non-erased) + // predicate. + // See issue #80691 + if stack.fresh_trait_pred.has_erased_regions() { + result = result.max(EvaluatedToOkModuloRegions); + } + + debug!(?result); + Ok(result) + } + + fn check_evaluation_cache( + &self, + param_env: ty::ParamEnv<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, + ) -> Option { + // Neither the global nor local cache is aware of intercrate + // mode, so don't do any caching. In particular, we might + // re-use the same `InferCtxt` with both an intercrate + // and non-intercrate `SelectionContext` + if self.intercrate { + return None; + } + + let tcx = self.tcx(); + if self.can_use_global_caches(param_env) { + if let Some(res) = tcx.evaluation_cache.get(&(param_env, trait_pred), tcx) { + return Some(res); + } + } + self.infcx.evaluation_cache.get(&(param_env, trait_pred), tcx) + } + + fn insert_evaluation_cache( + &mut self, + param_env: ty::ParamEnv<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, + dep_node: DepNodeIndex, + result: EvaluationResult, + ) { + // Avoid caching results that depend on more than just the trait-ref + // - the stack can create recursion. + if result.is_stack_dependent() { + return; + } + + // Neither the global nor local cache is aware of intercrate + // mode, so don't do any caching. In particular, we might + // re-use the same `InferCtxt` with both an intercrate + // and non-intercrate `SelectionContext` + if self.intercrate { + return; + } + + if self.can_use_global_caches(param_env) { + if !trait_pred.needs_infer() { + debug!(?trait_pred, ?result, "insert_evaluation_cache global"); + // This may overwrite the cache with the same value + // FIXME: Due to #50507 this overwrites the different values + // This should be changed to use HashMapExt::insert_same + // when that is fixed + self.tcx().evaluation_cache.insert((param_env, trait_pred), dep_node, result); + return; + } + } + + debug!(?trait_pred, ?result, "insert_evaluation_cache"); + self.infcx.evaluation_cache.insert((param_env, trait_pred), dep_node, result); + } + + /// For various reasons, it's possible for a subobligation + /// to have a *lower* recursion_depth than the obligation used to create it. + /// Projection sub-obligations may be returned from the projection cache, + /// which results in obligations with an 'old' `recursion_depth`. + /// Additionally, methods like `InferCtxt.subtype_predicate` produce + /// subobligations without taking in a 'parent' depth, causing the + /// generated subobligations to have a `recursion_depth` of `0`. + /// + /// To ensure that obligation_depth never decreases, we force all subobligations + /// to have at least the depth of the original obligation. + fn add_depth>>( + &self, + it: I, + min_depth: usize, + ) { + it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1); + } + + fn check_recursion_depth>( + &self, + depth: usize, + error_obligation: &Obligation<'tcx, T>, + ) -> Result<(), OverflowError> { + if !self.infcx.tcx.recursion_limit().value_within_limit(depth) { + match self.query_mode { + TraitQueryMode::Standard => { + if self.infcx.is_tainted_by_errors() { + return Err(OverflowError::Error( + ErrorGuaranteed::unchecked_claim_error_was_emitted(), + )); + } + self.infcx.report_overflow_error(error_obligation, true); + } + TraitQueryMode::Canonical => { + return Err(OverflowError::Canonical); + } + } + } + Ok(()) + } + + /// Checks that the recursion limit has not been exceeded. + /// + /// The weird return type of this function allows it to be used with the `try` (`?`) + /// operator within certain functions. + #[inline(always)] + fn check_recursion_limit, V: Display + TypeFoldable<'tcx>>( + &self, + obligation: &Obligation<'tcx, T>, + error_obligation: &Obligation<'tcx, V>, + ) -> Result<(), OverflowError> { + self.check_recursion_depth(obligation.recursion_depth, error_obligation) + } + + fn in_task(&mut self, op: OP) -> (R, DepNodeIndex) + where + OP: FnOnce(&mut Self) -> R, + { + let (result, dep_node) = + self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self)); + self.tcx().dep_graph.read_index(dep_node); + (result, dep_node) + } + + /// filter_impls filters constant trait obligations and candidates that have a positive impl + /// for a negative goal and a negative impl for a positive goal + #[instrument(level = "debug", skip(self))] + fn filter_impls( + &mut self, + candidates: Vec>, + obligation: &TraitObligation<'tcx>, + ) -> Vec> { + let tcx = self.tcx(); + let mut result = Vec::with_capacity(candidates.len()); + + for candidate in candidates { + // Respect const trait obligations + if obligation.is_const() { + match candidate { + // const impl + ImplCandidate(def_id) if tcx.constness(def_id) == hir::Constness::Const => {} + // const param + ParamCandidate(trait_pred) if trait_pred.is_const_if_const() => {} + // auto trait impl + AutoImplCandidate(..) => {} + // generator, this will raise error in other places + // or ignore error with const_async_blocks feature + GeneratorCandidate => {} + // FnDef where the function is const + FnPointerCandidate { is_const: true } => {} + ConstDestructCandidate(_) => {} + _ => { + // reject all other types of candidates + continue; + } + } + } + + if let ImplCandidate(def_id) = candidate { + if ty::ImplPolarity::Reservation == tcx.impl_polarity(def_id) + || obligation.polarity() == tcx.impl_polarity(def_id) + { + result.push(candidate); + } + } else { + result.push(candidate); + } + } + + result + } + + /// filter_reservation_impls filter reservation impl for any goal as ambiguous + #[instrument(level = "debug", skip(self))] + fn filter_reservation_impls( + &mut self, + candidate: SelectionCandidate<'tcx>, + obligation: &TraitObligation<'tcx>, + ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { + let tcx = self.tcx(); + // Treat reservation impls as ambiguity. + if let ImplCandidate(def_id) = candidate { + if let ty::ImplPolarity::Reservation = tcx.impl_polarity(def_id) { + if let Some(intercrate_ambiguity_clauses) = &mut self.intercrate_ambiguity_causes { + let value = tcx + .get_attr(def_id, sym::rustc_reservation_impl) + .and_then(|a| a.value_str()); + if let Some(value) = value { + debug!( + "filter_reservation_impls: \ + reservation impl ambiguity on {:?}", + def_id + ); + intercrate_ambiguity_clauses.insert( + IntercrateAmbiguityCause::ReservationImpl { + message: value.to_string(), + }, + ); + } + } + return Ok(None); + } + } + Ok(Some(candidate)) + } + + fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option { + debug!("is_knowable(intercrate={:?})", self.intercrate); + + if !self.intercrate || stack.obligation.polarity() == ty::ImplPolarity::Negative { + return None; + } + + let obligation = &stack.obligation; + let predicate = self.infcx().resolve_vars_if_possible(obligation.predicate); + + // Okay to skip binder because of the nature of the + // trait-ref-is-knowable check, which does not care about + // bound regions. + let trait_ref = predicate.skip_binder().trait_ref; + + coherence::trait_ref_is_knowable(self.tcx(), trait_ref) + } + + /// Returns `true` if the global caches can be used. + fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool { + // If there are any inference variables in the `ParamEnv`, then we + // always use a cache local to this particular scope. Otherwise, we + // switch to a global cache. + if param_env.needs_infer() { + return false; + } + + // Avoid using the master cache during coherence and just rely + // on the local cache. This effectively disables caching + // during coherence. It is really just a simplification to + // avoid us having to fear that coherence results "pollute" + // the master cache. Since coherence executes pretty quickly, + // it's not worth going to more trouble to increase the + // hit-rate, I don't think. + if self.intercrate { + return false; + } + + // Otherwise, we can use the global cache. + true + } + + fn check_candidate_cache( + &mut self, + mut param_env: ty::ParamEnv<'tcx>, + cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, + ) -> Option>> { + // Neither the global nor local cache is aware of intercrate + // mode, so don't do any caching. In particular, we might + // re-use the same `InferCtxt` with both an intercrate + // and non-intercrate `SelectionContext` + if self.intercrate { + return None; + } + let tcx = self.tcx(); + let mut pred = cache_fresh_trait_pred.skip_binder(); + pred.remap_constness(&mut param_env); + + if self.can_use_global_caches(param_env) { + if let Some(res) = tcx.selection_cache.get(&(param_env, pred), tcx) { + return Some(res); + } + } + self.infcx.selection_cache.get(&(param_env, pred), tcx) + } + + /// Determines whether can we safely cache the result + /// of selecting an obligation. This is almost always `true`, + /// except when dealing with certain `ParamCandidate`s. + /// + /// Ordinarily, a `ParamCandidate` will contain no inference variables, + /// since it was usually produced directly from a `DefId`. However, + /// certain cases (currently only librustdoc's blanket impl finder), + /// a `ParamEnv` may be explicitly constructed with inference types. + /// When this is the case, we do *not* want to cache the resulting selection + /// candidate. This is due to the fact that it might not always be possible + /// to equate the obligation's trait ref and the candidate's trait ref, + /// if more constraints end up getting added to an inference variable. + /// + /// Because of this, we always want to re-run the full selection + /// process for our obligation the next time we see it, since + /// we might end up picking a different `SelectionCandidate` (or none at all). + fn can_cache_candidate( + &self, + result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>, + ) -> bool { + // Neither the global nor local cache is aware of intercrate + // mode, so don't do any caching. In particular, we might + // re-use the same `InferCtxt` with both an intercrate + // and non-intercrate `SelectionContext` + if self.intercrate { + return false; + } + match result { + Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.needs_infer(), + _ => true, + } + } + + #[instrument(skip(self, param_env, cache_fresh_trait_pred, dep_node), level = "debug")] + fn insert_candidate_cache( + &mut self, + mut param_env: ty::ParamEnv<'tcx>, + cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, + dep_node: DepNodeIndex, + candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>, + ) { + let tcx = self.tcx(); + let mut pred = cache_fresh_trait_pred.skip_binder(); + + pred.remap_constness(&mut param_env); + + if !self.can_cache_candidate(&candidate) { + debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable"); + return; + } + + if self.can_use_global_caches(param_env) { + if let Err(Overflow(OverflowError::Canonical)) = candidate { + // Don't cache overflow globally; we only produce this in certain modes. + } else if !pred.needs_infer() { + if !candidate.needs_infer() { + debug!(?pred, ?candidate, "insert_candidate_cache global"); + // This may overwrite the cache with the same value. + tcx.selection_cache.insert((param_env, pred), dep_node, candidate); + return; + } + } + } + + debug!(?pred, ?candidate, "insert_candidate_cache local"); + self.infcx.selection_cache.insert((param_env, pred), dep_node, candidate); + } + + /// Matches a predicate against the bounds of its self type. + /// + /// Given an obligation like `::Bar: Baz` where the self type is + /// a projection, look at the bounds of `T::Bar`, see if we can find a + /// `Baz` bound. We return indexes into the list returned by + /// `tcx.item_bounds` for any applicable bounds. + #[instrument(level = "debug", skip(self))] + fn match_projection_obligation_against_definition_bounds( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> smallvec::SmallVec<[usize; 2]> { + let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate); + let placeholder_trait_predicate = + self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate); + debug!(?placeholder_trait_predicate); + + let tcx = self.infcx.tcx; + let (def_id, substs) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() { + ty::Projection(ref data) => (data.item_def_id, data.substs), + ty::Opaque(def_id, substs) => (def_id, substs), + _ => { + span_bug!( + obligation.cause.span, + "match_projection_obligation_against_definition_bounds() called \ + but self-ty is not a projection: {:?}", + placeholder_trait_predicate.trait_ref.self_ty() + ); + } + }; + let bounds = tcx.bound_item_bounds(def_id).subst(tcx, substs); + + // The bounds returned by `item_bounds` may contain duplicates after + // normalization, so try to deduplicate when possible to avoid + // unnecessary ambiguity. + let mut distinct_normalized_bounds = FxHashSet::default(); + + let matching_bounds = bounds + .iter() + .enumerate() + .filter_map(|(idx, bound)| { + let bound_predicate = bound.kind(); + if let ty::PredicateKind::Trait(pred) = bound_predicate.skip_binder() { + let bound = bound_predicate.rebind(pred.trait_ref); + if self.infcx.probe(|_| { + match self.match_normalize_trait_ref( + obligation, + bound, + placeholder_trait_predicate.trait_ref, + ) { + Ok(None) => true, + Ok(Some(normalized_trait)) + if distinct_normalized_bounds.insert(normalized_trait) => + { + true + } + _ => false, + } + }) { + return Some(idx); + } + } + None + }) + .collect(); + + debug!(?matching_bounds); + matching_bounds + } + + /// Equates the trait in `obligation` with trait bound. If the two traits + /// can be equated and the normalized trait bound doesn't contain inference + /// variables or placeholders, the normalized bound is returned. + fn match_normalize_trait_ref( + &mut self, + obligation: &TraitObligation<'tcx>, + trait_bound: ty::PolyTraitRef<'tcx>, + placeholder_trait_ref: ty::TraitRef<'tcx>, + ) -> Result>, ()> { + debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars()); + if placeholder_trait_ref.def_id != trait_bound.def_id() { + // Avoid unnecessary normalization + return Err(()); + } + + let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| { + project::normalize_with_depth( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + trait_bound, + ) + }); + self.infcx + .at(&obligation.cause, obligation.param_env) + .define_opaque_types(false) + .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound) + .map(|InferOk { obligations: _, value: () }| { + // This method is called within a probe, so we can't have + // inference variables and placeholders escape. + if !trait_bound.needs_infer() && !trait_bound.has_placeholders() { + Some(trait_bound) + } else { + None + } + }) + .map_err(|_| ()) + } + + fn where_clause_may_apply<'o>( + &mut self, + stack: &TraitObligationStack<'o, 'tcx>, + where_clause_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Result { + self.evaluation_probe(|this| { + match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { + Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations), + Err(()) => Ok(EvaluatedToErr), + } + }) + } + + /// Return `Yes` if the obligation's predicate type applies to the env_predicate, and + /// `No` if it does not. Return `Ambiguous` in the case that the projection type is a GAT, + /// and applying this env_predicate constrains any of the obligation's GAT substitutions. + /// + /// This behavior is a somewhat of a hack to prevent over-constraining inference variables + /// in cases like #91762. + pub(super) fn match_projection_projections( + &mut self, + obligation: &ProjectionTyObligation<'tcx>, + env_predicate: PolyProjectionPredicate<'tcx>, + potentially_unnormalized_candidates: bool, + ) -> ProjectionMatchesProjection { + let mut nested_obligations = Vec::new(); + let infer_predicate = self.infcx.replace_bound_vars_with_fresh_vars( + obligation.cause.span, + LateBoundRegionConversionTime::HigherRankedType, + env_predicate, + ); + let infer_projection = if potentially_unnormalized_candidates { + ensure_sufficient_stack(|| { + project::normalize_with_depth_to( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + infer_predicate.projection_ty, + &mut nested_obligations, + ) + }) + } else { + infer_predicate.projection_ty + }; + + let is_match = self + .infcx + .at(&obligation.cause, obligation.param_env) + .define_opaque_types(false) + .sup(obligation.predicate, infer_projection) + .map_or(false, |InferOk { obligations, value: () }| { + self.evaluate_predicates_recursively( + TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()), + nested_obligations.into_iter().chain(obligations), + ) + .map_or(false, |res| res.may_apply()) + }); + + if is_match { + let generics = self.tcx().generics_of(obligation.predicate.item_def_id); + // FIXME(generic-associated-types): Addresses aggressive inference in #92917. + // If this type is a GAT, and of the GAT substs resolve to something new, + // that means that we must have newly inferred something about the GAT. + // We should give up in that case. + if !generics.params.is_empty() + && obligation.predicate.substs[generics.parent_count..] + .iter() + .any(|&p| p.has_infer_types_or_consts() && self.infcx.shallow_resolve(p) != p) + { + ProjectionMatchesProjection::Ambiguous + } else { + ProjectionMatchesProjection::Yes + } + } else { + ProjectionMatchesProjection::No + } + } + + /////////////////////////////////////////////////////////////////////////// + // WINNOW + // + // Winnowing is the process of attempting to resolve ambiguity by + // probing further. During the winnowing process, we unify all + // type variables and then we also attempt to evaluate recursive + // bounds to see if they are satisfied. + + /// Returns `true` if `victim` should be dropped in favor of + /// `other`. Generally speaking we will drop duplicate + /// candidates and prefer where-clause candidates. + /// + /// See the comment for "SelectionCandidate" for more details. + fn candidate_should_be_dropped_in_favor_of( + &mut self, + victim: &EvaluatedCandidate<'tcx>, + other: &EvaluatedCandidate<'tcx>, + needs_infer: bool, + ) -> bool { + if victim.candidate == other.candidate { + return true; + } + + // Check if a bound would previously have been removed when normalizing + // the param_env so that it can be given the lowest priority. See + // #50825 for the motivation for this. + let is_global = |cand: &ty::PolyTraitPredicate<'tcx>| { + cand.is_global() && !cand.has_late_bound_regions() + }; + + // (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`, + // `DiscriminantKindCandidate`, and `ConstDestructCandidate` to anything else. + // + // This is a fix for #53123 and prevents winnowing from accidentally extending the + // lifetime of a variable. + match (&other.candidate, &victim.candidate) { + (_, AutoImplCandidate(..)) | (AutoImplCandidate(..), _) => { + bug!( + "default implementations shouldn't be recorded \ + when there are other valid candidates" + ); + } + + // FIXME(@jswrenn): this should probably be more sophisticated + (TransmutabilityCandidate, _) | (_, TransmutabilityCandidate) => false, + + // (*) + ( + BuiltinCandidate { has_nested: false } + | DiscriminantKindCandidate + | PointeeCandidate + | ConstDestructCandidate(_), + _, + ) => true, + ( + _, + BuiltinCandidate { has_nested: false } + | DiscriminantKindCandidate + | PointeeCandidate + | ConstDestructCandidate(_), + ) => false, + + (ParamCandidate(other), ParamCandidate(victim)) => { + let same_except_bound_vars = other.skip_binder().trait_ref + == victim.skip_binder().trait_ref + && other.skip_binder().constness == victim.skip_binder().constness + && other.skip_binder().polarity == victim.skip_binder().polarity + && !other.skip_binder().trait_ref.has_escaping_bound_vars(); + if same_except_bound_vars { + // See issue #84398. In short, we can generate multiple ParamCandidates which are + // the same except for unused bound vars. Just pick the one with the fewest bound vars + // or the current one if tied (they should both evaluate to the same answer). This is + // probably best characterized as a "hack", since we might prefer to just do our + // best to *not* create essentially duplicate candidates in the first place. + other.bound_vars().len() <= victim.bound_vars().len() + } else if other.skip_binder().trait_ref == victim.skip_binder().trait_ref + && victim.skip_binder().constness == ty::BoundConstness::NotConst + && other.skip_binder().polarity == victim.skip_binder().polarity + { + // Drop otherwise equivalent non-const candidates in favor of const candidates. + true + } else { + false + } + } + + // Drop otherwise equivalent non-const fn pointer candidates + (FnPointerCandidate { .. }, FnPointerCandidate { is_const: false }) => true, + + // Global bounds from the where clause should be ignored + // here (see issue #50825). Otherwise, we have a where + // clause so don't go around looking for impls. + // Arbitrarily give param candidates priority + // over projection and object candidates. + ( + ParamCandidate(ref cand), + ImplCandidate(..) + | ClosureCandidate + | GeneratorCandidate + | FnPointerCandidate { .. } + | BuiltinObjectCandidate + | BuiltinUnsizeCandidate + | TraitUpcastingUnsizeCandidate(_) + | BuiltinCandidate { .. } + | TraitAliasCandidate(..) + | ObjectCandidate(_) + | ProjectionCandidate(_), + ) => !is_global(cand), + (ObjectCandidate(_) | ProjectionCandidate(_), ParamCandidate(ref cand)) => { + // Prefer these to a global where-clause bound + // (see issue #50825). + is_global(cand) + } + ( + ImplCandidate(_) + | ClosureCandidate + | GeneratorCandidate + | FnPointerCandidate { .. } + | BuiltinObjectCandidate + | BuiltinUnsizeCandidate + | TraitUpcastingUnsizeCandidate(_) + | BuiltinCandidate { has_nested: true } + | TraitAliasCandidate(..), + ParamCandidate(ref cand), + ) => { + // Prefer these to a global where-clause bound + // (see issue #50825). + is_global(cand) && other.evaluation.must_apply_modulo_regions() + } + + (ProjectionCandidate(i), ProjectionCandidate(j)) + | (ObjectCandidate(i), ObjectCandidate(j)) => { + // Arbitrarily pick the lower numbered candidate for backwards + // compatibility reasons. Don't let this affect inference. + i < j && !needs_infer + } + (ObjectCandidate(_), ProjectionCandidate(_)) + | (ProjectionCandidate(_), ObjectCandidate(_)) => { + bug!("Have both object and projection candidate") + } + + // Arbitrarily give projection and object candidates priority. + ( + ObjectCandidate(_) | ProjectionCandidate(_), + ImplCandidate(..) + | ClosureCandidate + | GeneratorCandidate + | FnPointerCandidate { .. } + | BuiltinObjectCandidate + | BuiltinUnsizeCandidate + | TraitUpcastingUnsizeCandidate(_) + | BuiltinCandidate { .. } + | TraitAliasCandidate(..), + ) => true, + + ( + ImplCandidate(..) + | ClosureCandidate + | GeneratorCandidate + | FnPointerCandidate { .. } + | BuiltinObjectCandidate + | BuiltinUnsizeCandidate + | TraitUpcastingUnsizeCandidate(_) + | BuiltinCandidate { .. } + | TraitAliasCandidate(..), + ObjectCandidate(_) | ProjectionCandidate(_), + ) => false, + + (&ImplCandidate(other_def), &ImplCandidate(victim_def)) => { + // See if we can toss out `victim` based on specialization. + // This requires us to know *for sure* that the `other` impl applies + // i.e., `EvaluatedToOk`. + // + // FIXME(@lcnr): Using `modulo_regions` here seems kind of scary + // to me but is required for `std` to compile, so I didn't change it + // for now. + let tcx = self.tcx(); + if other.evaluation.must_apply_modulo_regions() { + if tcx.specializes((other_def, victim_def)) { + return true; + } + } + + if other.evaluation.must_apply_considering_regions() { + match tcx.impls_are_allowed_to_overlap(other_def, victim_def) { + Some(ty::ImplOverlapKind::Permitted { marker: true }) => { + // Subtle: If the predicate we are evaluating has inference + // variables, do *not* allow discarding candidates due to + // marker trait impls. + // + // Without this restriction, we could end up accidentally + // constraining inference variables based on an arbitrarily + // chosen trait impl. + // + // Imagine we have the following code: + // + // ```rust + // #[marker] trait MyTrait {} + // impl MyTrait for u8 {} + // impl MyTrait for bool {} + // ``` + // + // And we are evaluating the predicate `<_#0t as MyTrait>`. + // + // During selection, we will end up with one candidate for each + // impl of `MyTrait`. If we were to discard one impl in favor + // of the other, we would be left with one candidate, causing + // us to "successfully" select the predicate, unifying + // _#0t with (for example) `u8`. + // + // However, we have no reason to believe that this unification + // is correct - we've essentially just picked an arbitrary + // *possibility* for _#0t, and required that this be the *only* + // possibility. + // + // Eventually, we will either: + // 1) Unify all inference variables in the predicate through + // some other means (e.g. type-checking of a function). We will + // then be in a position to drop marker trait candidates + // without constraining inference variables (since there are + // none left to constrain) + // 2) Be left with some unconstrained inference variables. We + // will then correctly report an inference error, since the + // existence of multiple marker trait impls tells us nothing + // about which one should actually apply. + !needs_infer + } + Some(_) => true, + None => false, + } + } else { + false + } + } + + // Everything else is ambiguous + ( + ImplCandidate(_) + | ClosureCandidate + | GeneratorCandidate + | FnPointerCandidate { .. } + | BuiltinObjectCandidate + | BuiltinUnsizeCandidate + | TraitUpcastingUnsizeCandidate(_) + | BuiltinCandidate { has_nested: true } + | TraitAliasCandidate(..), + ImplCandidate(_) + | ClosureCandidate + | GeneratorCandidate + | FnPointerCandidate { .. } + | BuiltinObjectCandidate + | BuiltinUnsizeCandidate + | TraitUpcastingUnsizeCandidate(_) + | BuiltinCandidate { has_nested: true } + | TraitAliasCandidate(..), + ) => false, + } + } + + fn sized_conditions( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> BuiltinImplConditions<'tcx> { + use self::BuiltinImplConditions::{Ambiguous, None, Where}; + + // NOTE: binder moved to (*) + let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); + + match self_ty.kind() { + ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) + | ty::Uint(_) + | ty::Int(_) + | ty::Bool + | ty::Float(_) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::RawPtr(..) + | ty::Char + | ty::Ref(..) + | ty::Generator(..) + | ty::GeneratorWitness(..) + | ty::Array(..) + | ty::Closure(..) + | ty::Never + | ty::Error(_) => { + // safe for everything + Where(ty::Binder::dummy(Vec::new())) + } + + ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None, + + ty::Tuple(tys) => Where( + obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])), + ), + + ty::Adt(def, substs) => { + let sized_crit = def.sized_constraint(self.tcx()); + // (*) binder moved here + Where(obligation.predicate.rebind({ + sized_crit + .0 + .iter() + .map(|ty| sized_crit.rebind(*ty).subst(self.tcx(), substs)) + .collect() + })) + } + + ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None, + ty::Infer(ty::TyVar(_)) => Ambiguous, + + ty::Placeholder(..) + | ty::Bound(..) + | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); + } + } + } + + fn copy_clone_conditions( + &mut self, + obligation: &TraitObligation<'tcx>, + ) -> BuiltinImplConditions<'tcx> { + // NOTE: binder moved to (*) + let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); + + use self::BuiltinImplConditions::{Ambiguous, None, Where}; + + match *self_ty.kind() { + ty::Infer(ty::IntVar(_)) + | ty::Infer(ty::FloatVar(_)) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())), + + ty::Uint(_) + | ty::Int(_) + | ty::Bool + | ty::Float(_) + | ty::Char + | ty::RawPtr(..) + | ty::Never + | ty::Ref(_, _, hir::Mutability::Not) + | ty::Array(..) => { + // Implementations provided in libcore + None + } + + ty::Dynamic(..) + | ty::Str + | ty::Slice(..) + | ty::Generator(..) + | ty::GeneratorWitness(..) + | ty::Foreign(..) + | ty::Ref(_, _, hir::Mutability::Mut) => None, + + ty::Tuple(tys) => { + // (*) binder moved here + Where(obligation.predicate.rebind(tys.iter().collect())) + } + + ty::Closure(_, substs) => { + // (*) binder moved here + let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty()); + if let ty::Infer(ty::TyVar(_)) = ty.kind() { + // Not yet resolved. + Ambiguous + } else { + Where(obligation.predicate.rebind(substs.as_closure().upvar_tys().collect())) + } + } + + ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => { + // Fallback to whatever user-defined impls exist in this case. + None + } + + ty::Infer(ty::TyVar(_)) => { + // Unbound type variable. Might or might not have + // applicable impls and so forth, depending on what + // those type variables wind up being bound to. + Ambiguous + } + + ty::Placeholder(..) + | ty::Bound(..) + | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); + } + } + } + + /// For default impls, we need to break apart a type into its + /// "constituent types" -- meaning, the types that it contains. + /// + /// Here are some (simple) examples: + /// + /// ```ignore (illustrative) + /// (i32, u32) -> [i32, u32] + /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] + /// Bar where struct Bar { x: T, y: u32 } -> [i32, u32] + /// Zed where enum Zed { A(T), B(u32) } -> [i32, u32] + /// ``` + fn constituent_types_for_ty( + &self, + t: ty::Binder<'tcx, Ty<'tcx>>, + ) -> ty::Binder<'tcx, Vec>> { + match *t.skip_binder().kind() { + ty::Uint(_) + | ty::Int(_) + | ty::Bool + | ty::Float(_) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Str + | ty::Error(_) + | ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) + | ty::Never + | ty::Char => ty::Binder::dummy(Vec::new()), + + ty::Placeholder(..) + | ty::Dynamic(..) + | ty::Param(..) + | ty::Foreign(..) + | ty::Projection(..) + | ty::Bound(..) + | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + bug!("asked to assemble constituent types of unexpected type: {:?}", t); + } + + ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => { + t.rebind(vec![element_ty]) + } + + ty::Array(element_ty, _) | ty::Slice(element_ty) => t.rebind(vec![element_ty]), + + ty::Tuple(ref tys) => { + // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet + t.rebind(tys.iter().collect()) + } + + ty::Closure(_, ref substs) => { + let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty()); + t.rebind(vec![ty]) + } + + ty::Generator(_, ref substs, _) => { + let ty = self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty()); + let witness = substs.as_generator().witness(); + t.rebind([ty].into_iter().chain(iter::once(witness)).collect()) + } + + ty::GeneratorWitness(types) => { + debug_assert!(!types.has_escaping_bound_vars()); + types.map_bound(|types| types.to_vec()) + } + + // For `PhantomData`, we pass `T`. + ty::Adt(def, substs) if def.is_phantom_data() => t.rebind(substs.types().collect()), + + ty::Adt(def, substs) => { + t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect()) + } + + ty::Opaque(def_id, substs) => { + // We can resolve the `impl Trait` to its concrete type, + // which enforces a DAG between the functions requiring + // the auto trait bounds in question. + t.rebind(vec![self.tcx().bound_type_of(def_id).subst(self.tcx(), substs)]) + } + } + } + + fn collect_predicates_for_types( + &mut self, + param_env: ty::ParamEnv<'tcx>, + cause: ObligationCause<'tcx>, + recursion_depth: usize, + trait_def_id: DefId, + types: ty::Binder<'tcx, Vec>>, + ) -> Vec> { + // Because the types were potentially derived from + // higher-ranked obligations they may reference late-bound + // regions. For example, `for<'a> Foo<&'a i32> : Copy` would + // yield a type like `for<'a> &'a i32`. In general, we + // maintain the invariant that we never manipulate bound + // regions, so we have to process these bound regions somehow. + // + // The strategy is to: + // + // 1. Instantiate those regions to placeholder regions (e.g., + // `for<'a> &'a i32` becomes `&0 i32`. + // 2. Produce something like `&'0 i32 : Copy` + // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy` + + types + .as_ref() + .skip_binder() // binder moved -\ + .iter() + .flat_map(|ty| { + let ty: ty::Binder<'tcx, Ty<'tcx>> = types.rebind(*ty); // <----/ + + let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(ty); + let Normalized { value: normalized_ty, mut obligations } = + ensure_sufficient_stack(|| { + project::normalize_with_depth( + self, + param_env, + cause.clone(), + recursion_depth, + placeholder_ty, + ) + }); + let placeholder_obligation = predicate_for_trait_def( + self.tcx(), + param_env, + cause.clone(), + trait_def_id, + recursion_depth, + normalized_ty, + &[], + ); + obligations.push(placeholder_obligation); + obligations + }) + .collect() + } + + /////////////////////////////////////////////////////////////////////////// + // Matching + // + // Matching is a common path used for both evaluation and + // confirmation. It basically unifies types that appear in impls + // and traits. This does affect the surrounding environment; + // therefore, when used during evaluation, match routines must be + // run inside of a `probe()` so that their side-effects are + // contained. + + fn rematch_impl( + &mut self, + impl_def_id: DefId, + obligation: &TraitObligation<'tcx>, + ) -> Normalized<'tcx, SubstsRef<'tcx>> { + let impl_trait_ref = self.tcx().bound_impl_trait_ref(impl_def_id).unwrap(); + match self.match_impl(impl_def_id, impl_trait_ref, obligation) { + Ok(substs) => substs, + Err(()) => { + self.infcx.tcx.sess.delay_span_bug( + obligation.cause.span, + &format!( + "Impl {:?} was matchable against {:?} but now is not", + impl_def_id, obligation + ), + ); + let value = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id); + let err = self.tcx().ty_error(); + let value = value.fold_with(&mut BottomUpFolder { + tcx: self.tcx(), + ty_op: |_| err, + lt_op: |l| l, + ct_op: |c| c, + }); + Normalized { value, obligations: vec![] } + } + } + } + + #[tracing::instrument(level = "debug", skip(self))] + fn match_impl( + &mut self, + impl_def_id: DefId, + impl_trait_ref: EarlyBinder>, + obligation: &TraitObligation<'tcx>, + ) -> Result>, ()> { + let placeholder_obligation = + self.infcx().replace_bound_vars_with_placeholders(obligation.predicate); + let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref; + + let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id); + + let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs); + + debug!(?impl_trait_ref); + + let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } = + ensure_sufficient_stack(|| { + project::normalize_with_depth( + self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + impl_trait_ref, + ) + }); + + debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref); + + let cause = ObligationCause::new( + obligation.cause.span, + obligation.cause.body_id, + ObligationCauseCode::MatchImpl(obligation.cause.clone(), impl_def_id), + ); + + let InferOk { obligations, .. } = self + .infcx + .at(&cause, obligation.param_env) + .define_opaque_types(false) + .eq(placeholder_obligation_trait_ref, impl_trait_ref) + .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?; + nested_obligations.extend(obligations); + + if !self.intercrate + && self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation + { + debug!("match_impl: reservation impls only apply in intercrate mode"); + return Err(()); + } + + debug!(?impl_substs, ?nested_obligations, "match_impl: success"); + Ok(Normalized { value: impl_substs, obligations: nested_obligations }) + } + + fn fast_reject_trait_refs( + &mut self, + obligation: &TraitObligation<'tcx>, + impl_trait_ref: &ty::TraitRef<'tcx>, + ) -> bool { + // We can avoid creating type variables and doing the full + // substitution if we find that any of the input types, when + // simplified, do not match. + let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::AsPlaceholder }; + iter::zip(obligation.predicate.skip_binder().trait_ref.substs, impl_trait_ref.substs) + .any(|(obl, imp)| !drcx.generic_args_may_unify(obl, imp)) + } + + /// Normalize `where_clause_trait_ref` and try to match it against + /// `obligation`. If successful, return any predicates that + /// result from the normalization. + fn match_where_clause_trait_ref( + &mut self, + obligation: &TraitObligation<'tcx>, + where_clause_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Result>, ()> { + self.match_poly_trait_ref(obligation, where_clause_trait_ref) + } + + /// Returns `Ok` if `poly_trait_ref` being true implies that the + /// obligation is satisfied. + #[instrument(skip(self), level = "debug")] + fn match_poly_trait_ref( + &mut self, + obligation: &TraitObligation<'tcx>, + poly_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Result>, ()> { + self.infcx + .at(&obligation.cause, obligation.param_env) + // We don't want predicates for opaque types to just match all other types, + // if there is an obligation on the opaque type, then that obligation must be met + // opaquely. Otherwise we'd match any obligation to the opaque type and then error + // out later. + .define_opaque_types(false) + .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref) + .map(|InferOk { obligations, .. }| obligations) + .map_err(|_| ()) + } + + /////////////////////////////////////////////////////////////////////////// + // Miscellany + + fn match_fresh_trait_refs( + &self, + previous: ty::PolyTraitPredicate<'tcx>, + current: ty::PolyTraitPredicate<'tcx>, + param_env: ty::ParamEnv<'tcx>, + ) -> bool { + let mut matcher = ty::_match::Match::new(self.tcx(), param_env); + matcher.relate(previous, current).is_ok() + } + + fn push_stack<'o>( + &mut self, + previous_stack: TraitObligationStackList<'o, 'tcx>, + obligation: &'o TraitObligation<'tcx>, + ) -> TraitObligationStack<'o, 'tcx> { + let fresh_trait_pred = obligation.predicate.fold_with(&mut self.freshener); + + let dfn = previous_stack.cache.next_dfn(); + let depth = previous_stack.depth() + 1; + TraitObligationStack { + obligation, + fresh_trait_pred, + reached_depth: Cell::new(depth), + previous: previous_stack, + dfn, + depth, + } + } + + #[instrument(skip(self), level = "debug")] + fn closure_trait_ref_unnormalized( + &mut self, + obligation: &TraitObligation<'tcx>, + substs: SubstsRef<'tcx>, + ) -> ty::PolyTraitRef<'tcx> { + let closure_sig = substs.as_closure().sig(); + + debug!(?closure_sig); + + // (1) Feels icky to skip the binder here, but OTOH we know + // that the self-type is an unboxed closure type and hence is + // in fact unparameterized (or at least does not reference any + // regions bound in the obligation). Still probably some + // refactoring could make this nicer. + closure_trait_ref_and_return_type( + self.tcx(), + obligation.predicate.def_id(), + obligation.predicate.skip_binder().self_ty(), // (1) + closure_sig, + util::TupleArgumentsFlag::No, + ) + .map_bound(|(trait_ref, _)| trait_ref) + } + + fn generator_trait_ref_unnormalized( + &mut self, + obligation: &TraitObligation<'tcx>, + substs: SubstsRef<'tcx>, + ) -> ty::PolyTraitRef<'tcx> { + let gen_sig = substs.as_generator().poly_sig(); + + // (1) Feels icky to skip the binder here, but OTOH we know + // that the self-type is an generator type and hence is + // in fact unparameterized (or at least does not reference any + // regions bound in the obligation). Still probably some + // refactoring could make this nicer. + + super::util::generator_trait_ref_and_outputs( + self.tcx(), + obligation.predicate.def_id(), + obligation.predicate.skip_binder().self_ty(), // (1) + gen_sig, + ) + .map_bound(|(trait_ref, ..)| trait_ref) + } + + /// Returns the obligations that are implied by instantiating an + /// impl or trait. The obligations are substituted and fully + /// normalized. This is used when confirming an impl or default + /// impl. + #[tracing::instrument(level = "debug", skip(self, cause, param_env))] + fn impl_or_trait_obligations( + &mut self, + cause: &ObligationCause<'tcx>, + recursion_depth: usize, + param_env: ty::ParamEnv<'tcx>, + def_id: DefId, // of impl or trait + substs: SubstsRef<'tcx>, // for impl or trait + parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>, + ) -> Vec> { + let tcx = self.tcx(); + + // To allow for one-pass evaluation of the nested obligation, + // each predicate must be preceded by the obligations required + // to normalize it. + // for example, if we have: + // impl, V: Iterator> Foo for V + // the impl will have the following predicates: + // ::Item = U, + // U: Iterator, U: Sized, + // V: Iterator, V: Sized, + // ::Item: Copy + // When we substitute, say, `V => IntoIter, U => $0`, the last + // obligation will normalize to `<$0 as Iterator>::Item = $1` and + // `$1: Copy`, so we must ensure the obligations are emitted in + // that order. + let predicates = tcx.bound_predicates_of(def_id); + debug!(?predicates); + assert_eq!(predicates.0.parent, None); + let mut obligations = Vec::with_capacity(predicates.0.predicates.len()); + for (predicate, span) in predicates.0.predicates { + let span = *span; + let cause = cause.clone().derived_cause(parent_trait_pred, |derived| { + ImplDerivedObligation(Box::new(ImplDerivedObligationCause { + derived, + impl_def_id: def_id, + span, + })) + }); + let predicate = normalize_with_depth_to( + self, + param_env, + cause.clone(), + recursion_depth, + predicates.rebind(*predicate).subst(tcx, substs), + &mut obligations, + ); + obligations.push(Obligation { cause, recursion_depth, param_env, predicate }); + } + + obligations + } +} + +impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> { + fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> { + TraitObligationStackList::with(self) + } + + fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> { + self.previous.cache + } + + fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> { + self.list() + } + + /// Indicates that attempting to evaluate this stack entry + /// required accessing something from the stack at depth `reached_depth`. + fn update_reached_depth(&self, reached_depth: usize) { + assert!( + self.depth >= reached_depth, + "invoked `update_reached_depth` with something under this stack: \ + self.depth={} reached_depth={}", + self.depth, + reached_depth, + ); + debug!(reached_depth, "update_reached_depth"); + let mut p = self; + while reached_depth < p.depth { + debug!(?p.fresh_trait_pred, "update_reached_depth: marking as cycle participant"); + p.reached_depth.set(p.reached_depth.get().min(reached_depth)); + p = p.previous.head.unwrap(); + } + } +} + +/// The "provisional evaluation cache" is used to store intermediate cache results +/// when solving auto traits. Auto traits are unusual in that they can support +/// cycles. So, for example, a "proof tree" like this would be ok: +/// +/// - `Foo: Send` :- +/// - `Bar: Send` :- +/// - `Foo: Send` -- cycle, but ok +/// - `Baz: Send` +/// +/// Here, to prove `Foo: Send`, we have to prove `Bar: Send` and +/// `Baz: Send`. Proving `Bar: Send` in turn required `Foo: Send`. +/// For non-auto traits, this cycle would be an error, but for auto traits (because +/// they are coinductive) it is considered ok. +/// +/// However, there is a complication: at the point where we have +/// "proven" `Bar: Send`, we have in fact only proven it +/// *provisionally*. In particular, we proved that `Bar: Send` +/// *under the assumption* that `Foo: Send`. But what if we later +/// find out this assumption is wrong? Specifically, we could +/// encounter some kind of error proving `Baz: Send`. In that case, +/// `Bar: Send` didn't turn out to be true. +/// +/// In Issue #60010, we found a bug in rustc where it would cache +/// these intermediate results. This was fixed in #60444 by disabling +/// *all* caching for things involved in a cycle -- in our example, +/// that would mean we don't cache that `Bar: Send`. But this led +/// to large slowdowns. +/// +/// Specifically, imagine this scenario, where proving `Baz: Send` +/// first requires proving `Bar: Send` (which is true: +/// +/// - `Foo: Send` :- +/// - `Bar: Send` :- +/// - `Foo: Send` -- cycle, but ok +/// - `Baz: Send` +/// - `Bar: Send` -- would be nice for this to be a cache hit! +/// - `*const T: Send` -- but what if we later encounter an error? +/// +/// The *provisional evaluation cache* resolves this issue. It stores +/// cache results that we've proven but which were involved in a cycle +/// in some way. We track the minimal stack depth (i.e., the +/// farthest from the top of the stack) that we are dependent on. +/// The idea is that the cache results within are all valid -- so long as +/// none of the nodes in between the current node and the node at that minimum +/// depth result in an error (in which case the cached results are just thrown away). +/// +/// During evaluation, we consult this provisional cache and rely on +/// it. Accessing a cached value is considered equivalent to accessing +/// a result at `reached_depth`, so it marks the *current* solution as +/// provisional as well. If an error is encountered, we toss out any +/// provisional results added from the subtree that encountered the +/// error. When we pop the node at `reached_depth` from the stack, we +/// can commit all the things that remain in the provisional cache. +struct ProvisionalEvaluationCache<'tcx> { + /// next "depth first number" to issue -- just a counter + dfn: Cell, + + /// Map from cache key to the provisionally evaluated thing. + /// The cache entries contain the result but also the DFN in which they + /// were added. The DFN is used to clear out values on failure. + /// + /// Imagine we have a stack like: + /// + /// - `A B C` and we add a cache for the result of C (DFN 2) + /// - Then we have a stack `A B D` where `D` has DFN 3 + /// - We try to solve D by evaluating E: `A B D E` (DFN 4) + /// - `E` generates various cache entries which have cyclic dependencies on `B` + /// - `A B D E F` and so forth + /// - the DFN of `F` for example would be 5 + /// - then we determine that `E` is in error -- we will then clear + /// all cache values whose DFN is >= 4 -- in this case, that + /// means the cached value for `F`. + map: RefCell, ProvisionalEvaluation>>, + + /// The stack of args that we assume to be true because a `WF(arg)` predicate + /// is on the stack above (and because of wellformedness is coinductive). + /// In an "ideal" world, this would share a stack with trait predicates in + /// `TraitObligationStack`. However, trait predicates are *much* hotter than + /// `WellFormed` predicates, and it's very likely that the additional matches + /// will have a perf effect. The value here is the well-formed `GenericArg` + /// and the depth of the trait predicate *above* that well-formed predicate. + wf_args: RefCell, usize)>>, +} + +/// A cache value for the provisional cache: contains the depth-first +/// number (DFN) and result. +#[derive(Copy, Clone, Debug)] +struct ProvisionalEvaluation { + from_dfn: usize, + reached_depth: usize, + result: EvaluationResult, +} + +impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> { + fn default() -> Self { + Self { dfn: Cell::new(0), map: Default::default(), wf_args: Default::default() } + } +} + +impl<'tcx> ProvisionalEvaluationCache<'tcx> { + /// Get the next DFN in sequence (basically a counter). + fn next_dfn(&self) -> usize { + let result = self.dfn.get(); + self.dfn.set(result + 1); + result + } + + /// Check the provisional cache for any result for + /// `fresh_trait_ref`. If there is a hit, then you must consider + /// it an access to the stack slots at depth + /// `reached_depth` (from the returned value). + fn get_provisional( + &self, + fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, + ) -> Option { + debug!( + ?fresh_trait_pred, + "get_provisional = {:#?}", + self.map.borrow().get(&fresh_trait_pred), + ); + Some(*self.map.borrow().get(&fresh_trait_pred)?) + } + + /// Insert a provisional result into the cache. The result came + /// from the node with the given DFN. It accessed a minimum depth + /// of `reached_depth` to compute. It evaluated `fresh_trait_pred` + /// and resulted in `result`. + fn insert_provisional( + &self, + from_dfn: usize, + reached_depth: usize, + fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, + result: EvaluationResult, + ) { + debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional"); + + let mut map = self.map.borrow_mut(); + + // Subtle: when we complete working on the DFN `from_dfn`, anything + // that remains in the provisional cache must be dependent on some older + // stack entry than `from_dfn`. We have to update their depth with our transitive + // depth in that case or else it would be referring to some popped note. + // + // Example: + // A (reached depth 0) + // ... + // B // depth 1 -- reached depth = 0 + // C // depth 2 -- reached depth = 1 (should be 0) + // B + // A // depth 0 + // D (reached depth 1) + // C (cache -- reached depth = 2) + for (_k, v) in &mut *map { + if v.from_dfn >= from_dfn { + v.reached_depth = reached_depth.min(v.reached_depth); + } + } + + map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result }); + } + + /// Invoked when the node with dfn `dfn` does not get a successful + /// result. This will clear out any provisional cache entries + /// that were added since `dfn` was created. This is because the + /// provisional entries are things which must assume that the + /// things on the stack at the time of their creation succeeded -- + /// since the failing node is presently at the top of the stack, + /// these provisional entries must either depend on it or some + /// ancestor of it. + fn on_failure(&self, dfn: usize) { + debug!(?dfn, "on_failure"); + self.map.borrow_mut().retain(|key, eval| { + if !eval.from_dfn >= dfn { + debug!("on_failure: removing {:?}", key); + false + } else { + true + } + }); + } + + /// Invoked when the node at depth `depth` completed without + /// depending on anything higher in the stack (if that completion + /// was a failure, then `on_failure` should have been invoked + /// already). + /// + /// Note that we may still have provisional cache items remaining + /// in the cache when this is done. For example, if there is a + /// cycle: + /// + /// * A depends on... + /// * B depends on A + /// * C depends on... + /// * D depends on C + /// * ... + /// + /// Then as we complete the C node we will have a provisional cache + /// with results for A, B, C, and D. This method would clear out + /// the C and D results, but leave A and B provisional. + /// + /// This is determined based on the DFN: we remove any provisional + /// results created since `dfn` started (e.g., in our example, dfn + /// would be 2, representing the C node, and hence we would + /// remove the result for D, which has DFN 3, but not the results for + /// A and B, which have DFNs 0 and 1 respectively). + /// + /// Note that we *do not* attempt to cache these cycle participants + /// in the evaluation cache. Doing so would require carefully computing + /// the correct `DepNode` to store in the cache entry: + /// cycle participants may implicitly depend on query results + /// related to other participants in the cycle, due to our logic + /// which examines the evaluation stack. + /// + /// We used to try to perform this caching, + /// but it lead to multiple incremental compilation ICEs + /// (see #92987 and #96319), and was very hard to understand. + /// Fortunately, removing the caching didn't seem to + /// have a performance impact in practice. + fn on_completion(&self, dfn: usize) { + debug!(?dfn, "on_completion"); + + for (fresh_trait_pred, eval) in + self.map.borrow_mut().drain_filter(|_k, eval| eval.from_dfn >= dfn) + { + debug!(?fresh_trait_pred, ?eval, "on_completion"); + } + } +} + +#[derive(Copy, Clone)] +struct TraitObligationStackList<'o, 'tcx> { + cache: &'o ProvisionalEvaluationCache<'tcx>, + head: Option<&'o TraitObligationStack<'o, 'tcx>>, +} + +impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> { + fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> { + TraitObligationStackList { cache, head: None } + } + + fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> { + TraitObligationStackList { cache: r.cache(), head: Some(r) } + } + + fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { + self.head + } + + fn depth(&self) -> usize { + if let Some(head) = self.head { head.depth } else { 0 } + } +} + +impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> { + type Item = &'o TraitObligationStack<'o, 'tcx>; + + fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { + let o = self.head?; + *self = o.previous; + Some(o) + } +} + +impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TraitObligationStack({:?})", self.obligation) + } +} + +pub enum ProjectionMatchesProjection { + Yes, + Ambiguous, + No, +} -- cgit v1.2.3