summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_hir_typeck
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:13:23 +0000
commit20431706a863f92cb37dc512fef6e48d192aaf2c (patch)
tree2867f13f5fd5437ba628c67d7f87309ccadcd286 /compiler/rustc_hir_typeck
parentReleasing progress-linux version 1.65.0+dfsg1-2~progress7.99u1. (diff)
downloadrustc-20431706a863f92cb37dc512fef6e48d192aaf2c.tar.xz
rustc-20431706a863f92cb37dc512fef6e48d192aaf2c.zip
Merging upstream version 1.66.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_hir_typeck')
-rw-r--r--compiler/rustc_hir_typeck/Cargo.toml28
-rw-r--r--compiler/rustc_hir_typeck/src/_match.rs560
-rw-r--r--compiler/rustc_hir_typeck/src/autoderef.rs78
-rw-r--r--compiler/rustc_hir_typeck/src/callee.rs831
-rw-r--r--compiler/rustc_hir_typeck/src/cast.rs1105
-rw-r--r--compiler/rustc_hir_typeck/src/check.rs324
-rw-r--r--compiler/rustc_hir_typeck/src/closure.rs824
-rw-r--r--compiler/rustc_hir_typeck/src/coercion.rs1950
-rw-r--r--compiler/rustc_hir_typeck/src/demand.rs1454
-rw-r--r--compiler/rustc_hir_typeck/src/diverges.rs78
-rw-r--r--compiler/rustc_hir_typeck/src/errors.rs126
-rw-r--r--compiler/rustc_hir_typeck/src/expectation.rs122
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs2896
-rw-r--r--compiler/rustc_hir_typeck/src/expr_use_visitor.rs908
-rw-r--r--compiler/rustc_hir_typeck/src/fallback.rs398
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs1540
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs383
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs2236
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs312
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs1250
-rw-r--r--compiler/rustc_hir_typeck/src/gather_locals.rs161
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs563
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs92
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs91
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs309
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs241
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/mod.rs647
-rw-r--r--compiler/rustc_hir_typeck/src/inherited.rs213
-rw-r--r--compiler/rustc_hir_typeck/src/intrinsicck.rs108
-rw-r--r--compiler/rustc_hir_typeck/src/lib.rs507
-rw-r--r--compiler/rustc_hir_typeck/src/mem_categorization.rs786
-rw-r--r--compiler/rustc_hir_typeck/src/method/confirm.rs594
-rw-r--r--compiler/rustc_hir_typeck/src/method/mod.rs625
-rw-r--r--compiler/rustc_hir_typeck/src/method/prelude2021.rs415
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs1926
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs2605
-rw-r--r--compiler/rustc_hir_typeck/src/op.rs994
-rw-r--r--compiler/rustc_hir_typeck/src/pat.rs2185
-rw-r--r--compiler/rustc_hir_typeck/src/place_op.rs451
-rw-r--r--compiler/rustc_hir_typeck/src/rvalue_scopes.rs83
-rw-r--r--compiler/rustc_hir_typeck/src/upvar.rs2274
-rw-r--r--compiler/rustc_hir_typeck/src/writeback.rs807
42 files changed, 34080 insertions, 0 deletions
diff --git a/compiler/rustc_hir_typeck/Cargo.toml b/compiler/rustc_hir_typeck/Cargo.toml
new file mode 100644
index 000000000..093f9bb84
--- /dev/null
+++ b/compiler/rustc_hir_typeck/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "rustc_hir_typeck"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_analysis = { path = "../rustc_hir_analysis" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_hir_typeck/src/_match.rs b/compiler/rustc_hir_typeck/src/_match.rs
new file mode 100644
index 000000000..2b15d4dcd
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/_match.rs
@@ -0,0 +1,560 @@
+use crate::coercion::{AsCoercionSite, CoerceMany};
+use crate::{Diverges, Expectation, FnCtxt, Needs};
+use rustc_errors::{Applicability, MultiSpan};
+use rustc_hir::{self as hir, ExprKind};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::traits::Obligation;
+use rustc_middle::ty::{self, ToPredicate, Ty};
+use rustc_span::Span;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+use rustc_trait_selection::traits::{
+ IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
+};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ #[instrument(skip(self), level = "debug", ret)]
+ pub fn check_match(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ scrut: &'tcx hir::Expr<'tcx>,
+ arms: &'tcx [hir::Arm<'tcx>],
+ orig_expected: Expectation<'tcx>,
+ match_src: hir::MatchSource,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ let acrb = arms_contain_ref_bindings(arms);
+ let scrutinee_ty = self.demand_scrutinee_type(scrut, acrb, arms.is_empty());
+ debug!(?scrutinee_ty);
+
+ // If there are no arms, that is a diverging match; a special case.
+ if arms.is_empty() {
+ self.diverges.set(self.diverges.get() | Diverges::always(expr.span));
+ return tcx.types.never;
+ }
+
+ self.warn_arms_when_scrutinee_diverges(arms);
+
+ // Otherwise, we have to union together the types that the arms produce and so forth.
+ let scrut_diverges = self.diverges.replace(Diverges::Maybe);
+
+ // #55810: Type check patterns first so we get types for all bindings.
+ let scrut_span = scrut.span.find_ancestor_inside(expr.span).unwrap_or(scrut.span);
+ for arm in arms {
+ self.check_pat_top(&arm.pat, scrutinee_ty, Some(scrut_span), true);
+ }
+
+ // Now typecheck the blocks.
+ //
+ // The result of the match is the common supertype of all the
+ // arms. Start out the value as bottom, since it's the, well,
+ // bottom the type lattice, and we'll be moving up the lattice as
+ // we process each arm. (Note that any match with 0 arms is matching
+ // on any empty type and is therefore unreachable; should the flow
+ // of execution reach it, we will panic, so bottom is an appropriate
+ // type in that case)
+ let mut all_arms_diverge = Diverges::WarnedAlways;
+
+ let expected = orig_expected.adjust_for_branches(self);
+ debug!(?expected);
+
+ let mut coercion = {
+ let coerce_first = match expected {
+ // We don't coerce to `()` so that if the match expression is a
+ // statement it's branches can have any consistent type. That allows
+ // us to give better error messages (pointing to a usually better
+ // arm for inconsistent arms or to the whole match when a `()` type
+ // is required).
+ Expectation::ExpectHasType(ety) if ety != self.tcx.mk_unit() => ety,
+ _ => self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: expr.span,
+ }),
+ };
+ CoerceMany::with_coercion_sites(coerce_first, arms)
+ };
+
+ let mut other_arms = vec![]; // Used only for diagnostics.
+ let mut prior_arm = None;
+ for arm in arms {
+ if let Some(g) = &arm.guard {
+ self.diverges.set(Diverges::Maybe);
+ match g {
+ hir::Guard::If(e) => {
+ self.check_expr_has_type_or_error(e, tcx.types.bool, |_| {});
+ }
+ hir::Guard::IfLet(l) => {
+ self.check_expr_let(l);
+ }
+ };
+ }
+
+ self.diverges.set(Diverges::Maybe);
+
+ let arm_ty = self.check_expr_with_expectation(&arm.body, expected);
+ all_arms_diverge &= self.diverges.get();
+
+ let opt_suggest_box_span = prior_arm.and_then(|(_, prior_arm_ty, _)| {
+ self.opt_suggest_box_span(prior_arm_ty, arm_ty, orig_expected)
+ });
+
+ let (arm_block_id, arm_span) = if let hir::ExprKind::Block(blk, _) = arm.body.kind {
+ (Some(blk.hir_id), self.find_block_span(blk))
+ } else {
+ (None, arm.body.span)
+ };
+
+ let (span, code) = match prior_arm {
+ // The reason for the first arm to fail is not that the match arms diverge,
+ // but rather that there's a prior obligation that doesn't hold.
+ None => (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)),
+ Some((prior_arm_block_id, prior_arm_ty, prior_arm_span)) => (
+ expr.span,
+ ObligationCauseCode::MatchExpressionArm(Box::new(MatchExpressionArmCause {
+ arm_block_id,
+ arm_span,
+ arm_ty,
+ prior_arm_block_id,
+ prior_arm_ty,
+ prior_arm_span,
+ scrut_span: scrut.span,
+ source: match_src,
+ prior_arms: other_arms.clone(),
+ scrut_hir_id: scrut.hir_id,
+ opt_suggest_box_span,
+ })),
+ ),
+ };
+ let cause = self.cause(span, code);
+
+ // This is the moral equivalent of `coercion.coerce(self, cause, arm.body, arm_ty)`.
+ // We use it this way to be able to expand on the potential error and detect when a
+ // `match` tail statement could be a tail expression instead. If so, we suggest
+ // removing the stray semicolon.
+ coercion.coerce_inner(
+ self,
+ &cause,
+ Some(&arm.body),
+ arm_ty,
+ Some(&mut |err| {
+ let Some(ret) = self
+ .tcx
+ .hir()
+ .find_by_def_id(self.body_id.owner.def_id)
+ .and_then(|owner| owner.fn_decl())
+ .map(|decl| decl.output.span())
+ else { return; };
+ let Expectation::IsLast(stmt) = orig_expected else {
+ return
+ };
+ let can_coerce_to_return_ty = match self.ret_coercion.as_ref() {
+ Some(ret_coercion) if self.in_tail_expr => {
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
+ self.can_coerce(arm_ty, ret_ty)
+ && prior_arm.map_or(true, |(_, t, _)| self.can_coerce(t, ret_ty))
+ // The match arms need to unify for the case of `impl Trait`.
+ && !matches!(ret_ty.kind(), ty::Opaque(..))
+ }
+ _ => false,
+ };
+ if !can_coerce_to_return_ty {
+ return;
+ }
+
+ let semi_span = expr.span.shrink_to_hi().with_hi(stmt.hi());
+ let mut ret_span: MultiSpan = semi_span.into();
+ ret_span.push_span_label(
+ expr.span,
+ "this could be implicitly returned but it is a statement, not a \
+ tail expression",
+ );
+ ret_span
+ .push_span_label(ret, "the `match` arms can conform to this return type");
+ ret_span.push_span_label(
+ semi_span,
+ "the `match` is a statement because of this semicolon, consider \
+ removing it",
+ );
+ err.span_note(
+ ret_span,
+ "you might have meant to return the `match` expression",
+ );
+ err.tool_only_span_suggestion(
+ semi_span,
+ "remove this semicolon",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }),
+ false,
+ );
+
+ other_arms.push(arm_span);
+ if other_arms.len() > 5 {
+ other_arms.remove(0);
+ }
+
+ prior_arm = Some((arm_block_id, arm_ty, arm_span));
+ }
+
+ // If all of the arms in the `match` diverge,
+ // and we're dealing with an actual `match` block
+ // (as opposed to a `match` desugared from something else'),
+ // we can emit a better note. Rather than pointing
+ // at a diverging expression in an arbitrary arm,
+ // we can point at the entire `match` expression
+ if let (Diverges::Always { .. }, hir::MatchSource::Normal) = (all_arms_diverge, match_src) {
+ all_arms_diverge = Diverges::Always {
+ span: expr.span,
+ custom_note: Some(
+ "any code following this `match` expression is unreachable, as all arms diverge",
+ ),
+ };
+ }
+
+ // We won't diverge unless the scrutinee or all arms diverge.
+ self.diverges.set(scrut_diverges | all_arms_diverge);
+
+ coercion.complete(self)
+ }
+
+ /// When the previously checked expression (the scrutinee) diverges,
+ /// warn the user about the match arms being unreachable.
+ fn warn_arms_when_scrutinee_diverges(&self, arms: &'tcx [hir::Arm<'tcx>]) {
+ for arm in arms {
+ self.warn_if_unreachable(arm.body.hir_id, arm.body.span, "arm");
+ }
+ }
+
+ /// Handle the fallback arm of a desugared if(-let) like a missing else.
+ ///
+ /// Returns `true` if there was an error forcing the coercion to the `()` type.
+ pub(super) fn if_fallback_coercion<T>(
+ &self,
+ span: Span,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ coercion: &mut CoerceMany<'tcx, '_, T>,
+ ) -> bool
+ where
+ T: AsCoercionSite,
+ {
+ // If this `if` expr is the parent's function return expr,
+ // the cause of the type coercion is the return type, point at it. (#25228)
+ let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, span);
+ let cause = self.cause(span, ObligationCauseCode::IfExpressionWithNoElse);
+ let mut error = false;
+ coercion.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |err| {
+ if let Some((span, msg)) = &ret_reason {
+ err.span_label(*span, msg);
+ } else if let ExprKind::Block(block, _) = &then_expr.kind
+ && let Some(expr) = &block.expr
+ {
+ err.span_label(expr.span, "found here");
+ }
+ err.note("`if` expressions without `else` evaluate to `()`");
+ err.help("consider adding an `else` block that evaluates to the expected type");
+ error = true;
+ },
+ false,
+ );
+ error
+ }
+
+ fn maybe_get_coercion_reason(&self, hir_id: hir::HirId, sp: Span) -> Option<(Span, String)> {
+ let node = {
+ let rslt = self.tcx.hir().get_parent_node(self.tcx.hir().get_parent_node(hir_id));
+ self.tcx.hir().get(rslt)
+ };
+ if let hir::Node::Block(block) = node {
+ // check that the body's parent is an fn
+ let parent = self
+ .tcx
+ .hir()
+ .get(self.tcx.hir().get_parent_node(self.tcx.hir().get_parent_node(block.hir_id)));
+ if let (Some(expr), hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })) =
+ (&block.expr, parent)
+ {
+ // check that the `if` expr without `else` is the fn body's expr
+ if expr.span == sp {
+ return self.get_fn_decl(hir_id).and_then(|(fn_decl, _)| {
+ let span = fn_decl.output.span();
+ let snippet = self.tcx.sess.source_map().span_to_snippet(span).ok()?;
+ Some((span, format!("expected `{snippet}` because of this return type")))
+ });
+ }
+ }
+ }
+ if let hir::Node::Local(hir::Local { ty: Some(_), pat, .. }) = node {
+ return Some((pat.span, "expected because of this assignment".to_string()));
+ }
+ None
+ }
+
+ pub(crate) fn if_cause(
+ &self,
+ span: Span,
+ cond_span: Span,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ else_expr: &'tcx hir::Expr<'tcx>,
+ then_ty: Ty<'tcx>,
+ else_ty: Ty<'tcx>,
+ opt_suggest_box_span: Option<Span>,
+ ) -> ObligationCause<'tcx> {
+ let mut outer_span = if self.tcx.sess.source_map().is_multiline(span) {
+ // The `if`/`else` isn't in one line in the output, include some context to make it
+ // clear it is an if/else expression:
+ // ```
+ // LL | let x = if true {
+ // | _____________-
+ // LL || 10i32
+ // || ----- expected because of this
+ // LL || } else {
+ // LL || 10u32
+ // || ^^^^^ expected `i32`, found `u32`
+ // LL || };
+ // ||_____- `if` and `else` have incompatible types
+ // ```
+ Some(span)
+ } else {
+ // The entire expression is in one line, only point at the arms
+ // ```
+ // LL | let x = if true { 10i32 } else { 10u32 };
+ // | ----- ^^^^^ expected `i32`, found `u32`
+ // | |
+ // | expected because of this
+ // ```
+ None
+ };
+
+ let (error_sp, else_id) = if let ExprKind::Block(block, _) = &else_expr.kind {
+ let block = block.innermost_block();
+
+ // Avoid overlapping spans that aren't as readable:
+ // ```
+ // 2 | let x = if true {
+ // | _____________-
+ // 3 | | 3
+ // | | - expected because of this
+ // 4 | | } else {
+ // | |____________^
+ // 5 | ||
+ // 6 | || };
+ // | || ^
+ // | ||_____|
+ // | |______if and else have incompatible types
+ // | expected integer, found `()`
+ // ```
+ // by not pointing at the entire expression:
+ // ```
+ // 2 | let x = if true {
+ // | ------- `if` and `else` have incompatible types
+ // 3 | 3
+ // | - expected because of this
+ // 4 | } else {
+ // | ____________^
+ // 5 | |
+ // 6 | | };
+ // | |_____^ expected integer, found `()`
+ // ```
+ if block.expr.is_none() && block.stmts.is_empty()
+ && let Some(outer_span) = &mut outer_span
+ && let Some(cond_span) = cond_span.find_ancestor_inside(*outer_span)
+ {
+ *outer_span = outer_span.with_hi(cond_span.hi())
+ }
+
+ (self.find_block_span(block), block.hir_id)
+ } else {
+ (else_expr.span, else_expr.hir_id)
+ };
+
+ let then_id = if let ExprKind::Block(block, _) = &then_expr.kind {
+ let block = block.innermost_block();
+ // Exclude overlapping spans
+ if block.expr.is_none() && block.stmts.is_empty() {
+ outer_span = None;
+ }
+ block.hir_id
+ } else {
+ then_expr.hir_id
+ };
+
+ // Finally construct the cause:
+ self.cause(
+ error_sp,
+ ObligationCauseCode::IfExpression(Box::new(IfExpressionCause {
+ else_id,
+ then_id,
+ then_ty,
+ else_ty,
+ outer_span,
+ opt_suggest_box_span,
+ })),
+ )
+ }
+
+ pub(super) fn demand_scrutinee_type(
+ &self,
+ scrut: &'tcx hir::Expr<'tcx>,
+ contains_ref_bindings: Option<hir::Mutability>,
+ no_arms: bool,
+ ) -> Ty<'tcx> {
+ // Not entirely obvious: if matches may create ref bindings, we want to
+ // use the *precise* type of the scrutinee, *not* some supertype, as
+ // the "scrutinee type" (issue #23116).
+ //
+ // arielb1 [writes here in this comment thread][c] that there
+ // is certainly *some* potential danger, e.g., for an example
+ // like:
+ //
+ // [c]: https://github.com/rust-lang/rust/pull/43399#discussion_r130223956
+ //
+ // ```
+ // let Foo(x) = f()[0];
+ // ```
+ //
+ // Then if the pattern matches by reference, we want to match
+ // `f()[0]` as a lexpr, so we can't allow it to be
+ // coerced. But if the pattern matches by value, `f()[0]` is
+ // still syntactically a lexpr, but we *do* want to allow
+ // coercions.
+ //
+ // However, *likely* we are ok with allowing coercions to
+ // happen if there are no explicit ref mut patterns - all
+ // implicit ref mut patterns must occur behind a reference, so
+ // they will have the "correct" variance and lifetime.
+ //
+ // This does mean that the following pattern would be legal:
+ //
+ // ```
+ // struct Foo(Bar);
+ // struct Bar(u32);
+ // impl Deref for Foo {
+ // type Target = Bar;
+ // fn deref(&self) -> &Bar { &self.0 }
+ // }
+ // impl DerefMut for Foo {
+ // fn deref_mut(&mut self) -> &mut Bar { &mut self.0 }
+ // }
+ // fn foo(x: &mut Foo) {
+ // {
+ // let Bar(z): &mut Bar = x;
+ // *z = 42;
+ // }
+ // assert_eq!(foo.0.0, 42);
+ // }
+ // ```
+ //
+ // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which
+ // is problematic as the HIR is being scraped, but ref bindings may be
+ // implicit after #42640. We need to make sure that pat_adjustments
+ // (once introduced) is populated by the time we get here.
+ //
+ // See #44848.
+ if let Some(m) = contains_ref_bindings {
+ self.check_expr_with_needs(scrut, Needs::maybe_mut_place(m))
+ } else if no_arms {
+ self.check_expr(scrut)
+ } else {
+ // ...but otherwise we want to use any supertype of the
+ // scrutinee. This is sort of a workaround, see note (*) in
+ // `check_pat` for some details.
+ let scrut_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: scrut.span,
+ });
+ self.check_expr_has_type_or_error(scrut, scrut_ty, |_| {});
+ scrut_ty
+ }
+ }
+
+ /// When we have a `match` as a tail expression in a `fn` with a returned `impl Trait`
+ /// we check if the different arms would work with boxed trait objects instead and
+ /// provide a structured suggestion in that case.
+ pub(crate) fn opt_suggest_box_span(
+ &self,
+ first_ty: Ty<'tcx>,
+ second_ty: Ty<'tcx>,
+ orig_expected: Expectation<'tcx>,
+ ) -> Option<Span> {
+ // FIXME(compiler-errors): This really shouldn't need to be done during the
+ // "good" path of typeck, but here we are.
+ match orig_expected {
+ Expectation::ExpectHasType(expected) => {
+ let TypeVariableOrigin {
+ span,
+ kind: TypeVariableOriginKind::OpaqueTypeInference(rpit_def_id),
+ ..
+ } = self.type_var_origin(expected)? else { return None; };
+
+ let sig = *self
+ .typeck_results
+ .borrow()
+ .liberated_fn_sigs()
+ .get(hir::HirId::make_owner(self.body_id.owner.def_id))?;
+
+ let substs = sig.output().walk().find_map(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Opaque(def_id, substs) = *ty.kind()
+ && def_id == rpit_def_id
+ {
+ Some(substs)
+ } else {
+ None
+ }
+ })?;
+ let opaque_ty = self.tcx.mk_opaque(rpit_def_id, substs);
+
+ if !self.can_coerce(first_ty, expected) || !self.can_coerce(second_ty, expected) {
+ return None;
+ }
+
+ for ty in [first_ty, second_ty] {
+ for (pred, _) in self
+ .tcx
+ .bound_explicit_item_bounds(rpit_def_id)
+ .subst_iter_copied(self.tcx, substs)
+ {
+ let pred = match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(mut trait_pred) => {
+ assert_eq!(trait_pred.trait_ref.self_ty(), opaque_ty);
+ trait_pred.trait_ref.substs =
+ self.tcx.mk_substs_trait(ty, &trait_pred.trait_ref.substs[1..]);
+ pred.kind().rebind(trait_pred).to_predicate(self.tcx)
+ }
+ ty::PredicateKind::Projection(mut proj_pred) => {
+ assert_eq!(proj_pred.projection_ty.self_ty(), opaque_ty);
+ proj_pred.projection_ty.substs = self
+ .tcx
+ .mk_substs_trait(ty, &proj_pred.projection_ty.substs[1..]);
+ pred.kind().rebind(proj_pred).to_predicate(self.tcx)
+ }
+ _ => continue,
+ };
+ if !self.predicate_must_hold_modulo_regions(&Obligation::new(
+ ObligationCause::misc(span, self.body_id),
+ self.param_env,
+ pred,
+ )) {
+ return None;
+ }
+ }
+ }
+
+ Some(span)
+ }
+ _ => None,
+ }
+ }
+}
+
+fn arms_contain_ref_bindings<'tcx>(arms: &'tcx [hir::Arm<'tcx>]) -> Option<hir::Mutability> {
+ arms.iter().filter_map(|a| a.pat.contains_explicit_ref_binding()).max_by_key(|m| match *m {
+ hir::Mutability::Mut => 1,
+ hir::Mutability::Not => 0,
+ })
+}
diff --git a/compiler/rustc_hir_typeck/src/autoderef.rs b/compiler/rustc_hir_typeck/src/autoderef.rs
new file mode 100644
index 000000000..59c366ad7
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/autoderef.rs
@@ -0,0 +1,78 @@
+//! Some helper functions for `AutoDeref`
+use super::method::MethodCallee;
+use super::{FnCtxt, PlaceOp};
+
+use rustc_infer::infer::InferOk;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use rustc_trait_selection::autoderef::{Autoderef, AutoderefKind};
+
+use std::iter;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn autoderef(&'a self, span: Span, base_ty: Ty<'tcx>) -> Autoderef<'a, 'tcx> {
+ Autoderef::new(self, self.param_env, self.body_id, span, base_ty, span)
+ }
+
+ /// Like `autoderef`, but provides a custom `Span` to use for calls to
+ /// an overloaded `Deref` operator
+ pub fn autoderef_overloaded_span(
+ &'a self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ overloaded_span: Span,
+ ) -> Autoderef<'a, 'tcx> {
+ Autoderef::new(self, self.param_env, self.body_id, span, base_ty, overloaded_span)
+ }
+
+ pub fn try_overloaded_deref(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ self.try_overloaded_place_op(span, base_ty, &[], PlaceOp::Deref)
+ }
+
+ /// Returns the adjustment steps.
+ pub fn adjust_steps(&self, autoderef: &Autoderef<'a, 'tcx>) -> Vec<Adjustment<'tcx>> {
+ self.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(autoderef))
+ }
+
+ pub fn adjust_steps_as_infer_ok(
+ &self,
+ autoderef: &Autoderef<'a, 'tcx>,
+ ) -> InferOk<'tcx, Vec<Adjustment<'tcx>>> {
+ let mut obligations = vec![];
+ let steps = autoderef.steps();
+ let targets =
+ steps.iter().skip(1).map(|&(ty, _)| ty).chain(iter::once(autoderef.final_ty(false)));
+ let steps: Vec<_> = steps
+ .iter()
+ .map(|&(source, kind)| {
+ if let AutoderefKind::Overloaded = kind {
+ self.try_overloaded_deref(autoderef.span(), source).and_then(
+ |InferOk { value: method, obligations: o }| {
+ obligations.extend(o);
+ if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() {
+ Some(OverloadedDeref {
+ region,
+ mutbl,
+ span: autoderef.overloaded_span(),
+ })
+ } else {
+ None
+ }
+ },
+ )
+ } else {
+ None
+ }
+ })
+ .zip(targets)
+ .map(|(autoderef, target)| Adjustment { kind: Adjust::Deref(autoderef), target })
+ .collect();
+
+ InferOk { obligations, value: steps }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
new file mode 100644
index 000000000..1b33f2f02
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -0,0 +1,831 @@
+use super::method::probe::{IsSuggestion, Mode, ProbeScope};
+use super::method::MethodCallee;
+use super::{Expectation, FnCtxt, TupleArgumentsFlag};
+
+use crate::type_error_struct;
+use rustc_ast::util::parser::PREC_POSTFIX;
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, StashKey};
+use rustc_hir as hir;
+use rustc_hir::def::{self, Namespace, Res};
+use rustc_hir::def_id::DefId;
+use rustc_infer::{
+ infer,
+ traits::{self, Obligation},
+};
+use rustc_infer::{
+ infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind},
+ traits::ObligationCause,
+};
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
+};
+use rustc_middle::ty::SubstsRef;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_target::spec::abi;
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::DefIdOrName;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+
+use std::iter;
+
+/// Checks that it is legal to call methods of the trait corresponding
+/// to `trait_id` (this only cares about the trait, not the specific
+/// method that is called).
+pub fn check_legal_trait_for_method_call(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ receiver: Option<Span>,
+ expr_span: Span,
+ trait_id: DefId,
+) {
+ if tcx.lang_items().drop_trait() == Some(trait_id) {
+ let mut err = struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method");
+ err.span_label(span, "explicit destructor calls not allowed");
+
+ let (sp, suggestion) = receiver
+ .and_then(|s| tcx.sess.source_map().span_to_snippet(s).ok())
+ .filter(|snippet| !snippet.is_empty())
+ .map(|snippet| (expr_span, format!("drop({snippet})")))
+ .unwrap_or_else(|| (span, "drop".to_string()));
+
+ err.span_suggestion(
+ sp,
+ "consider using `drop` function",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+
+ err.emit();
+ }
+}
+
+#[derive(Debug)]
+enum CallStep<'tcx> {
+ Builtin(Ty<'tcx>),
+ DeferredClosure(LocalDefId, ty::FnSig<'tcx>),
+ /// E.g., enum variant constructors.
+ Overloaded(MethodCallee<'tcx>),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn check_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let original_callee_ty = match &callee_expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(..) | hir::QPath::TypeRelative(..)) => self
+ .check_expr_with_expectation_and_args(
+ callee_expr,
+ Expectation::NoExpectation,
+ arg_exprs,
+ ),
+ _ => self.check_expr(callee_expr),
+ };
+
+ let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty);
+
+ let mut autoderef = self.autoderef(callee_expr.span, expr_ty);
+ let mut result = None;
+ while result.is_none() && autoderef.next().is_some() {
+ result = self.try_overloaded_call_step(call_expr, callee_expr, arg_exprs, &autoderef);
+ }
+ self.register_predicates(autoderef.into_obligations());
+
+ let output = match result {
+ None => {
+ // this will report an error since original_callee_ty is not a fn
+ self.confirm_builtin_call(
+ call_expr,
+ callee_expr,
+ original_callee_ty,
+ arg_exprs,
+ expected,
+ )
+ }
+
+ Some(CallStep::Builtin(callee_ty)) => {
+ self.confirm_builtin_call(call_expr, callee_expr, callee_ty, arg_exprs, expected)
+ }
+
+ Some(CallStep::DeferredClosure(def_id, fn_sig)) => {
+ self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, def_id, fn_sig)
+ }
+
+ Some(CallStep::Overloaded(method_callee)) => {
+ self.confirm_overloaded_call(call_expr, arg_exprs, expected, method_callee)
+ }
+ };
+
+ // we must check that return type of called functions is WF:
+ self.register_wf_obligation(output.into(), call_expr.span, traits::WellFormed(None));
+
+ output
+ }
+
+ fn try_overloaded_call_step(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ autoderef: &Autoderef<'a, 'tcx>,
+ ) -> Option<CallStep<'tcx>> {
+ let adjusted_ty =
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+ debug!(
+ "try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})",
+ call_expr, adjusted_ty
+ );
+
+ // If the callee is a bare function or a closure, then we're all set.
+ match *adjusted_ty.kind() {
+ ty::FnDef(..) | ty::FnPtr(_) => {
+ let adjustments = self.adjust_steps(autoderef);
+ self.apply_adjustments(callee_expr, adjustments);
+ return Some(CallStep::Builtin(adjusted_ty));
+ }
+
+ ty::Closure(def_id, substs) => {
+ let def_id = def_id.expect_local();
+
+ // Check whether this is a call to a closure where we
+ // haven't yet decided on whether the closure is fn vs
+ // fnmut vs fnonce. If so, we have to defer further processing.
+ if self.closure_kind(substs).is_none() {
+ let closure_sig = substs.as_closure().sig();
+ let closure_sig = self.replace_bound_vars_with_fresh_vars(
+ call_expr.span,
+ infer::FnCall,
+ closure_sig,
+ );
+ let adjustments = self.adjust_steps(autoderef);
+ self.record_deferred_call_resolution(
+ def_id,
+ DeferredCallResolution {
+ call_expr,
+ callee_expr,
+ adjusted_ty,
+ adjustments,
+ fn_sig: closure_sig,
+ closure_substs: substs,
+ },
+ );
+ return Some(CallStep::DeferredClosure(def_id, closure_sig));
+ }
+ }
+
+ // Hack: we know that there are traits implementing Fn for &F
+ // where F:Fn and so forth. In the particular case of types
+ // like `x: &mut FnMut()`, if there is a call `x()`, we would
+ // normally translate to `FnMut::call_mut(&mut x, ())`, but
+ // that winds up requiring `mut x: &mut FnMut()`. A little
+ // over the top. The simplest fix by far is to just ignore
+ // this case and deref again, so we wind up with
+ // `FnMut::call_mut(&mut *x, ())`.
+ ty::Ref(..) if autoderef.step_count() == 0 => {
+ return None;
+ }
+
+ ty::Error(_) => {
+ return None;
+ }
+
+ _ => {}
+ }
+
+ // Now, we look for the implementation of a Fn trait on the object's type.
+ // We first do it with the explicit instruction to look for an impl of
+ // `Fn<Tuple>`, with the tuple `Tuple` having an arity corresponding
+ // to the number of call parameters.
+ // If that fails (or_else branch), we try again without specifying the
+ // shape of the tuple (hence the None). This allows to detect an Fn trait
+ // is implemented, and use this information for diagnostic.
+ self.try_overloaded_call_traits(call_expr, adjusted_ty, Some(arg_exprs))
+ .or_else(|| self.try_overloaded_call_traits(call_expr, adjusted_ty, None))
+ .map(|(autoref, method)| {
+ let mut adjustments = self.adjust_steps(autoderef);
+ adjustments.extend(autoref);
+ self.apply_adjustments(callee_expr, adjustments);
+ CallStep::Overloaded(method)
+ })
+ }
+
+ fn try_overloaded_call_traits(
+ &self,
+ call_expr: &hir::Expr<'_>,
+ adjusted_ty: Ty<'tcx>,
+ opt_arg_exprs: Option<&'tcx [hir::Expr<'tcx>]>,
+ ) -> Option<(Option<Adjustment<'tcx>>, MethodCallee<'tcx>)> {
+ // Try the options that are least restrictive on the caller first.
+ for (opt_trait_def_id, method_name, borrow) in [
+ (self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true),
+ (self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true),
+ (self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false),
+ ] {
+ let Some(trait_def_id) = opt_trait_def_id else { continue };
+
+ let opt_input_types = opt_arg_exprs.map(|arg_exprs| {
+ [self.tcx.mk_tup(arg_exprs.iter().map(|e| {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: e.span,
+ })
+ }))]
+ });
+ let opt_input_types = opt_input_types.as_ref().map(AsRef::as_ref);
+
+ if let Some(ok) = self.lookup_method_in_trait(
+ call_expr.span,
+ method_name,
+ trait_def_id,
+ adjusted_ty,
+ opt_input_types,
+ ) {
+ let method = self.register_infer_ok_obligations(ok);
+ let mut autoref = None;
+ if borrow {
+ // Check for &self vs &mut self in the method signature. Since this is either
+ // the Fn or FnMut trait, it should be one of those.
+ let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() else {
+ // The `fn`/`fn_mut` lang item is ill-formed, which should have
+ // caused an error elsewhere.
+ self.tcx
+ .sess
+ .delay_span_bug(call_expr.span, "input to call/call_mut is not a ref?");
+ return None;
+ };
+
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // For initial two-phase borrow
+ // deployment, conservatively omit
+ // overloaded function call ops.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ },
+ };
+ autoref = Some(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[0],
+ });
+ }
+ return Some((autoref, method));
+ }
+ }
+
+ None
+ }
+
+ /// Give appropriate suggestion when encountering `||{/* not callable */}()`, where the
+ /// likely intention is to call the closure, suggest `(||{})()`. (#55851)
+ fn identify_bad_closure_def_and_call(
+ &self,
+ err: &mut Diagnostic,
+ hir_id: hir::HirId,
+ callee_node: &hir::ExprKind<'_>,
+ callee_span: Span,
+ ) {
+ let hir = self.tcx.hir();
+ let parent_hir_id = hir.get_parent_node(hir_id);
+ let parent_node = hir.get(parent_hir_id);
+ if let (
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, body, .. }),
+ ..
+ }),
+ hir::ExprKind::Block(..),
+ ) = (parent_node, callee_node)
+ {
+ let fn_decl_span = if hir.body(body).generator_kind
+ == Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure))
+ {
+ // Actually need to unwrap a few more layers of HIR to get to
+ // the _real_ closure...
+ let async_closure = hir.get_parent_node(hir.get_parent_node(parent_hir_id));
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }),
+ ..
+ }) = hir.get(async_closure)
+ {
+ fn_decl_span
+ } else {
+ return;
+ }
+ } else {
+ fn_decl_span
+ };
+
+ let start = fn_decl_span.shrink_to_lo();
+ let end = callee_span.shrink_to_hi();
+ err.multipart_suggestion(
+ "if you meant to create this closure and immediately call it, surround the \
+ closure with parentheses",
+ vec![(start, "(".to_string()), (end, ")".to_string())],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ /// Give appropriate suggestion when encountering `[("a", 0) ("b", 1)]`, where the
+ /// likely intention is to create an array containing tuples.
+ fn maybe_suggest_bad_array_definition(
+ &self,
+ err: &mut Diagnostic,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ ) -> bool {
+ let hir_id = self.tcx.hir().get_parent_node(call_expr.hir_id);
+ let parent_node = self.tcx.hir().get(hir_id);
+ if let (
+ hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Array(_), .. }),
+ hir::ExprKind::Tup(exp),
+ hir::ExprKind::Call(_, args),
+ ) = (parent_node, &callee_expr.kind, &call_expr.kind)
+ && args.len() == exp.len()
+ {
+ let start = callee_expr.span.shrink_to_hi();
+ err.span_suggestion(
+ start,
+ "consider separating array elements with a comma",
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ false
+ }
+
+ fn confirm_builtin_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ callee_ty: Ty<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let (fn_sig, def_id) = match *callee_ty.kind() {
+ ty::FnDef(def_id, subst) => {
+ let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, subst);
+
+ // Unit testing: function items annotated with
+ // `#[rustc_evaluate_where_clauses]` trigger special output
+ // to let us test the trait evaluation system.
+ if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
+ let predicates = self.tcx.predicates_of(def_id);
+ let predicates = predicates.instantiate(self.tcx, subst);
+ for (predicate, predicate_span) in
+ predicates.predicates.iter().zip(&predicates.spans)
+ {
+ let obligation = Obligation::new(
+ ObligationCause::dummy_with_span(callee_expr.span),
+ self.param_env,
+ *predicate,
+ );
+ let result = self.evaluate_obligation(&obligation);
+ self.tcx
+ .sess
+ .struct_span_err(
+ callee_expr.span,
+ &format!("evaluate({:?}) = {:?}", predicate, result),
+ )
+ .span_label(*predicate_span, "predicate")
+ .emit();
+ }
+ }
+ (fn_sig, Some(def_id))
+ }
+ ty::FnPtr(sig) => (sig, None),
+ _ => {
+ if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &callee_expr.kind
+ && let [segment] = path.segments
+ && let Some(mut diag) = self
+ .tcx
+ .sess
+ .diagnostic()
+ .steal_diagnostic(segment.ident.span, StashKey::CallIntoMethod)
+ {
+ // Try suggesting `foo(a)` -> `a.foo()` if possible.
+ if let Some(ty) =
+ self.suggest_call_as_method(
+ &mut diag,
+ segment,
+ arg_exprs,
+ call_expr,
+ expected
+ )
+ {
+ diag.emit();
+ return ty;
+ } else {
+ diag.emit();
+ }
+ }
+
+ self.report_invalid_callee(call_expr, callee_expr, callee_ty, arg_exprs);
+
+ // This is the "default" function signature, used in case of error.
+ // In that case, we check each argument against "error" in order to
+ // set up all the node type bindings.
+ (
+ ty::Binder::dummy(self.tcx.mk_fn_sig(
+ self.err_args(arg_exprs.len()).into_iter(),
+ self.tcx.ty_error(),
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ )),
+ None,
+ )
+ }
+ };
+
+ // Replace any late-bound regions that appear in the function
+ // signature with region variables. We also have to
+ // renormalize the associated types at this point, since they
+ // previously appeared within a `Binder<>` and hence would not
+ // have been normalized before.
+ let fn_sig = self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, fn_sig);
+ let fn_sig = self.normalize_associated_types_in(call_expr.span, fn_sig);
+
+ // Call the generic checker.
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
+ call_expr.span,
+ expected,
+ fn_sig.output(),
+ fn_sig.inputs(),
+ );
+ self.check_argument_types(
+ call_expr.span,
+ call_expr,
+ fn_sig.inputs(),
+ expected_arg_tys,
+ arg_exprs,
+ fn_sig.c_variadic,
+ TupleArgumentsFlag::DontTupleArguments,
+ def_id,
+ );
+
+ fn_sig.output()
+ }
+
+ /// Attempts to reinterpret `method(rcvr, args...)` as `rcvr.method(args...)`
+ /// and suggesting the fix if the method probe is successful.
+ fn suggest_call_as_method(
+ &self,
+ diag: &mut Diagnostic,
+ segment: &'tcx hir::PathSegment<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ call_expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ if let [callee_expr, rest @ ..] = arg_exprs {
+ let callee_ty = self.check_expr(callee_expr);
+ // First, do a probe with `IsSuggestion(true)` to avoid emitting
+ // any strange errors. If it's successful, then we'll do a true
+ // method lookup.
+ let Ok(pick) = self
+ .probe_for_name(
+ call_expr.span,
+ Mode::MethodCall,
+ segment.ident,
+ IsSuggestion(true),
+ callee_ty,
+ call_expr.hir_id,
+ // We didn't record the in scope traits during late resolution
+ // so we need to probe AllTraits unfortunately
+ ProbeScope::AllTraits,
+ ) else {
+ return None;
+ };
+
+ let pick = self.confirm_method(
+ call_expr.span,
+ callee_expr,
+ call_expr,
+ callee_ty,
+ pick,
+ segment,
+ );
+ if pick.illegal_sized_bound.is_some() {
+ return None;
+ }
+
+ let up_to_rcvr_span = segment.ident.span.until(callee_expr.span);
+ let rest_span = callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
+ let rest_snippet = if let Some(first) = rest.first() {
+ self.tcx
+ .sess
+ .source_map()
+ .span_to_snippet(first.span.to(call_expr.span.shrink_to_hi()))
+ } else {
+ Ok(")".to_string())
+ };
+
+ if let Ok(rest_snippet) = rest_snippet {
+ let sugg = if callee_expr.precedence().order() >= PREC_POSTFIX {
+ vec![
+ (up_to_rcvr_span, "".to_string()),
+ (rest_span, format!(".{}({rest_snippet}", segment.ident)),
+ ]
+ } else {
+ vec![
+ (up_to_rcvr_span, "(".to_string()),
+ (rest_span, format!(").{}({rest_snippet}", segment.ident)),
+ ]
+ };
+ let self_ty = self.resolve_vars_if_possible(pick.callee.sig.inputs()[0]);
+ diag.multipart_suggestion(
+ format!(
+ "use the `.` operator to call the method `{}{}` on `{self_ty}`",
+ self.tcx
+ .associated_item(pick.callee.def_id)
+ .trait_container(self.tcx)
+ .map_or_else(
+ || String::new(),
+ |trait_def_id| self.tcx.def_path_str(trait_def_id) + "::"
+ ),
+ segment.ident
+ ),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+
+ // Let's check the method fully now
+ let return_ty = self.check_method_argument_types(
+ segment.ident.span,
+ call_expr,
+ Ok(pick.callee),
+ rest,
+ TupleArgumentsFlag::DontTupleArguments,
+ expected,
+ );
+
+ return Some(return_ty);
+ }
+ }
+
+ None
+ }
+
+ fn report_invalid_callee(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ callee_ty: Ty<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ ) {
+ let mut unit_variant = None;
+ if let hir::ExprKind::Path(qpath) = &callee_expr.kind
+ && let Res::Def(def::DefKind::Ctor(kind, def::CtorKind::Const), _)
+ = self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ // Only suggest removing parens if there are no arguments
+ && arg_exprs.is_empty()
+ {
+ let descr = match kind {
+ def::CtorOf::Struct => "struct",
+ def::CtorOf::Variant => "enum variant",
+ };
+ let removal_span = callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
+ unit_variant = Some((removal_span, descr, rustc_hir_pretty::qpath_to_string(qpath)));
+ }
+
+ let callee_ty = self.resolve_vars_if_possible(callee_ty);
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ callee_expr.span,
+ callee_ty,
+ E0618,
+ "expected function, found {}",
+ match &unit_variant {
+ Some((_, kind, path)) => format!("{kind} `{path}`"),
+ None => format!("`{callee_ty}`"),
+ }
+ );
+
+ self.identify_bad_closure_def_and_call(
+ &mut err,
+ call_expr.hir_id,
+ &callee_expr.kind,
+ callee_expr.span,
+ );
+
+ if let Some((removal_span, kind, path)) = &unit_variant {
+ err.span_suggestion_verbose(
+ *removal_span,
+ &format!(
+ "`{path}` is a unit {kind}, and does not take parentheses to be constructed",
+ ),
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+
+ let mut inner_callee_path = None;
+ let def = match callee_expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ }
+ hir::ExprKind::Call(ref inner_callee, _) => {
+ // If the call spans more than one line and the callee kind is
+ // itself another `ExprCall`, that's a clue that we might just be
+ // missing a semicolon (Issue #51055)
+ let call_is_multiline = self.tcx.sess.source_map().is_multiline(call_expr.span);
+ if call_is_multiline {
+ err.span_suggestion(
+ callee_expr.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
+ inner_callee_path = Some(inner_qpath);
+ self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id)
+ } else {
+ Res::Err
+ }
+ }
+ _ => Res::Err,
+ };
+
+ if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) {
+ if let Some((maybe_def, output_ty, _)) =
+ self.extract_callable_info(callee_expr, callee_ty)
+ && !self.type_is_sized_modulo_regions(self.param_env, output_ty, callee_expr.span)
+ {
+ let descr = match maybe_def {
+ DefIdOrName::DefId(def_id) => self.tcx.def_kind(def_id).descr(def_id),
+ DefIdOrName::Name(name) => name,
+ };
+ err.span_label(
+ callee_expr.span,
+ format!("this {descr} returns an unsized value `{output_ty}`, so it cannot be called")
+ );
+ if let DefIdOrName::DefId(def_id) = maybe_def
+ && let Some(def_span) = self.tcx.hir().span_if_local(def_id)
+ {
+ err.span_label(def_span, "the callable type is defined here");
+ }
+ } else {
+ err.span_label(call_expr.span, "call expression requires function");
+ }
+ }
+
+ if let Some(span) = self.tcx.hir().res_span(def) {
+ let callee_ty = callee_ty.to_string();
+ let label = match (unit_variant, inner_callee_path) {
+ (Some((_, kind, path)), _) => Some(format!("{kind} `{path}` defined here")),
+ (_, Some(hir::QPath::Resolved(_, path))) => self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(path.span)
+ .ok()
+ .map(|p| format!("`{p}` defined here returns `{callee_ty}`")),
+ _ => {
+ match def {
+ // Emit a different diagnostic for local variables, as they are not
+ // type definitions themselves, but rather variables *of* that type.
+ Res::Local(hir_id) => Some(format!(
+ "`{}` has type `{}`",
+ self.tcx.hir().name(hir_id),
+ callee_ty
+ )),
+ Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => {
+ Some(format!("`{}` defined here", self.tcx.def_path_str(def_id),))
+ }
+ _ => Some(format!("`{callee_ty}` defined here")),
+ }
+ }
+ };
+ if let Some(label) = label {
+ err.span_label(span, label);
+ }
+ }
+ err.emit();
+ }
+
+ fn confirm_deferred_closure_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ closure_def_id: LocalDefId,
+ fn_sig: ty::FnSig<'tcx>,
+ ) -> Ty<'tcx> {
+ // `fn_sig` is the *signature* of the closure being called. We
+ // don't know the full details yet (`Fn` vs `FnMut` etc), but we
+ // do know the types expected for each argument and the return
+ // type.
+
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
+ call_expr.span,
+ expected,
+ fn_sig.output(),
+ fn_sig.inputs(),
+ );
+
+ self.check_argument_types(
+ call_expr.span,
+ call_expr,
+ fn_sig.inputs(),
+ expected_arg_tys,
+ arg_exprs,
+ fn_sig.c_variadic,
+ TupleArgumentsFlag::TupleArguments,
+ Some(closure_def_id.to_def_id()),
+ );
+
+ fn_sig.output()
+ }
+
+ fn confirm_overloaded_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ method_callee: MethodCallee<'tcx>,
+ ) -> Ty<'tcx> {
+ let output_type = self.check_method_argument_types(
+ call_expr.span,
+ call_expr,
+ Ok(method_callee),
+ arg_exprs,
+ TupleArgumentsFlag::TupleArguments,
+ expected,
+ );
+
+ self.write_method_call(call_expr.hir_id, method_callee);
+ output_type
+ }
+}
+
+#[derive(Debug)]
+pub struct DeferredCallResolution<'tcx> {
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ adjusted_ty: Ty<'tcx>,
+ adjustments: Vec<Adjustment<'tcx>>,
+ fn_sig: ty::FnSig<'tcx>,
+ closure_substs: SubstsRef<'tcx>,
+}
+
+impl<'a, 'tcx> DeferredCallResolution<'tcx> {
+ pub fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) {
+ debug!("DeferredCallResolution::resolve() {:?}", self);
+
+ // we should not be invoked until the closure kind has been
+ // determined by upvar inference
+ assert!(fcx.closure_kind(self.closure_substs).is_some());
+
+ // We may now know enough to figure out fn vs fnmut etc.
+ match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) {
+ Some((autoref, method_callee)) => {
+ // One problem is that when we get here, we are going
+ // to have a newly instantiated function signature
+ // from the call trait. This has to be reconciled with
+ // the older function signature we had before. In
+ // principle we *should* be able to fn_sigs(), but we
+ // can't because of the annoying need for a TypeTrace.
+ // (This always bites me, should find a way to
+ // refactor it.)
+ let method_sig = method_callee.sig;
+
+ debug!("attempt_resolution: method_callee={:?}", method_callee);
+
+ for (method_arg_ty, self_arg_ty) in
+ iter::zip(method_sig.inputs().iter().skip(1), self.fn_sig.inputs())
+ {
+ fcx.demand_eqtype(self.call_expr.span, *self_arg_ty, *method_arg_ty);
+ }
+
+ fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output());
+
+ let mut adjustments = self.adjustments;
+ adjustments.extend(autoref);
+ fcx.apply_adjustments(self.callee_expr, adjustments);
+
+ fcx.write_method_call(self.call_expr.hir_id, method_callee);
+ }
+ None => {
+ // This can happen if `#![no_core]` is used and the `fn/fn_mut/fn_once`
+ // lang items are not defined (issue #86238).
+ let mut err = fcx.inh.tcx.sess.struct_span_err(
+ self.call_expr.span,
+ "failed to find an overloaded call trait for closure call",
+ );
+ err.help(
+ "make sure the `fn`/`fn_mut`/`fn_once` lang items are defined \
+ and have associated `call`/`call_mut`/`call_once` functions",
+ );
+ err.emit();
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs
new file mode 100644
index 000000000..d1dab0540
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/cast.rs
@@ -0,0 +1,1105 @@
+//! Code for type-checking cast expressions.
+//!
+//! A cast `e as U` is valid if one of the following holds:
+//! * `e` has type `T` and `T` coerces to `U`; *coercion-cast*
+//! * `e` has type `*T`, `U` is `*U_0`, and either `U_0: Sized` or
+//! pointer_kind(`T`) = pointer_kind(`U_0`); *ptr-ptr-cast*
+//! * `e` has type `*T` and `U` is a numeric type, while `T: Sized`; *ptr-addr-cast*
+//! * `e` is an integer and `U` is `*U_0`, while `U_0: Sized`; *addr-ptr-cast*
+//! * `e` has type `T` and `T` and `U` are any numeric types; *numeric-cast*
+//! * `e` is a C-like enum and `U` is an integer type; *enum-cast*
+//! * `e` has type `bool` or `char` and `U` is an integer; *prim-int-cast*
+//! * `e` has type `u8` and `U` is `char`; *u8-char-cast*
+//! * `e` has type `&[T; n]` and `U` is `*const T`; *array-ptr-cast*
+//! * `e` is a function pointer type and `U` has type `*T`,
+//! while `T: Sized`; *fptr-ptr-cast*
+//! * `e` is a function pointer type and `U` is an integer; *fptr-addr-cast*
+//!
+//! where `&.T` and `*T` are references of either mutability,
+//! and where pointer_kind(`T`) is the kind of the unsize info
+//! in `T` - the vtable for a trait definition (e.g., `fmt::Display` or
+//! `Iterator`, not `Iterator<Item=u8>`) or a length (or `()` if `T: Sized`).
+//!
+//! Note that lengths are not adjusted when casting raw slices -
+//! `T: *const [u16] as *const [u8]` creates a slice that only includes
+//! half of the original memory.
+//!
+//! Casting is not transitive, that is, even if `e as U1 as U2` is a valid
+//! expression, `e as U2` is not necessarily so (in fact it will only be valid if
+//! `U1` coerces to `U2`).
+
+use super::FnCtxt;
+
+use crate::type_error_struct;
+use rustc_errors::{struct_span_err, Applicability, DelayDm, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::cast::{CastKind, CastTy};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, Ty, TypeAndMut, TypeVisitable, VariantDef};
+use rustc_session::lint;
+use rustc_session::Session;
+use rustc_span::def_id::{DefId, LOCAL_CRATE};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::report_object_safety_error;
+
+/// Reifies a cast check to be checked once we have full type information for
+/// a function context.
+#[derive(Debug)]
+pub struct CastCheck<'tcx> {
+ /// The expression whose value is being casted
+ expr: &'tcx hir::Expr<'tcx>,
+ /// The source type for the cast expression
+ expr_ty: Ty<'tcx>,
+ expr_span: Span,
+ /// The target type. That is, the type we are casting to.
+ cast_ty: Ty<'tcx>,
+ cast_span: Span,
+ span: Span,
+ /// whether the cast is made in a const context or not.
+ pub constness: hir::Constness,
+}
+
+/// The kind of pointer and associated metadata (thin, length or vtable) - we
+/// only allow casts between fat pointers if their metadata have the same
+/// kind.
+#[derive(Copy, Clone, PartialEq, Eq)]
+enum PointerKind<'tcx> {
+ /// No metadata attached, ie pointer to sized type or foreign type
+ Thin,
+ /// A trait object
+ VTable(Option<DefId>),
+ /// Slice
+ Length,
+ /// The unsize info of this projection
+ OfProjection(&'tcx ty::ProjectionTy<'tcx>),
+ /// The unsize info of this opaque ty
+ OfOpaque(DefId, SubstsRef<'tcx>),
+ /// The unsize info of this parameter
+ OfParam(&'tcx ty::ParamTy),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Returns the kind of unsize information of t, or None
+ /// if t is unknown.
+ fn pointer_kind(
+ &self,
+ t: Ty<'tcx>,
+ span: Span,
+ ) -> Result<Option<PointerKind<'tcx>>, ErrorGuaranteed> {
+ debug!("pointer_kind({:?}, {:?})", t, span);
+
+ let t = self.resolve_vars_if_possible(t);
+
+ if let Some(reported) = t.error_reported() {
+ return Err(reported);
+ }
+
+ if self.type_is_sized_modulo_regions(self.param_env, t, span) {
+ return Ok(Some(PointerKind::Thin));
+ }
+
+ Ok(match *t.kind() {
+ ty::Slice(_) | ty::Str => Some(PointerKind::Length),
+ ty::Dynamic(ref tty, _, ty::Dyn) => Some(PointerKind::VTable(tty.principal_def_id())),
+ ty::Adt(def, substs) if def.is_struct() => match def.non_enum_variant().fields.last() {
+ None => Some(PointerKind::Thin),
+ Some(f) => {
+ let field_ty = self.field_ty(span, f, substs);
+ self.pointer_kind(field_ty, span)?
+ }
+ },
+ ty::Tuple(fields) => match fields.last() {
+ None => Some(PointerKind::Thin),
+ Some(&f) => self.pointer_kind(f, span)?,
+ },
+
+ // Pointers to foreign types are thin, despite being unsized
+ ty::Foreign(..) => Some(PointerKind::Thin),
+ // We should really try to normalize here.
+ ty::Projection(ref pi) => Some(PointerKind::OfProjection(pi)),
+ ty::Opaque(def_id, substs) => Some(PointerKind::OfOpaque(def_id, substs)),
+ ty::Param(ref p) => Some(PointerKind::OfParam(p)),
+ // Insufficient type information.
+ ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) => None,
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(_)
+ | ty::Array(..)
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::Adt(..)
+ | ty::Never
+ | ty::Dynamic(_, _, ty::DynStar)
+ | ty::Error(_) => {
+ let reported = self
+ .tcx
+ .sess
+ .delay_span_bug(span, &format!("`{:?}` should be sized but is not?", t));
+ return Err(reported);
+ }
+ })
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum CastError {
+ ErrorGuaranteed,
+
+ CastToBool,
+ CastToChar,
+ DifferingKinds,
+ /// Cast of thin to fat raw ptr (e.g., `*const () as *const [u8]`).
+ SizedUnsizedCast,
+ IllegalCast,
+ NeedDeref,
+ NeedViaPtr,
+ NeedViaThinPtr,
+ NeedViaInt,
+ NonScalar,
+ UnknownExprPtrKind,
+ UnknownCastPtrKind,
+ /// Cast of int to (possibly) fat raw pointer.
+ ///
+ /// Argument is the specific name of the metadata in plain words, such as "a vtable"
+ /// or "a length". If this argument is None, then the metadata is unknown, for example,
+ /// when we're typechecking a type parameter with a ?Sized bound.
+ IntToFatCast(Option<&'static str>),
+ ForeignNonExhaustiveAdt,
+}
+
+impl From<ErrorGuaranteed> for CastError {
+ fn from(_: ErrorGuaranteed) -> Self {
+ CastError::ErrorGuaranteed
+ }
+}
+
+fn make_invalid_casting_error<'a, 'tcx>(
+ sess: &'a Session,
+ span: Span,
+ expr_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ fcx: &FnCtxt<'a, 'tcx>,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ type_error_struct!(
+ sess,
+ span,
+ expr_ty,
+ E0606,
+ "casting `{}` as `{}` is invalid",
+ fcx.ty_to_string(expr_ty),
+ fcx.ty_to_string(cast_ty)
+ )
+}
+
+impl<'a, 'tcx> CastCheck<'tcx> {
+ pub fn new(
+ fcx: &FnCtxt<'a, 'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ expr_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ cast_span: Span,
+ span: Span,
+ constness: hir::Constness,
+ ) -> Result<CastCheck<'tcx>, ErrorGuaranteed> {
+ let expr_span = expr.span.find_ancestor_inside(span).unwrap_or(expr.span);
+ let check = CastCheck { expr, expr_ty, expr_span, cast_ty, cast_span, span, constness };
+
+ // For better error messages, check for some obviously unsized
+ // cases now. We do a more thorough check at the end, once
+ // inference is more completely known.
+ match cast_ty.kind() {
+ ty::Dynamic(_, _, ty::Dyn) | ty::Slice(..) => {
+ let reported = check.report_cast_to_unsized_type(fcx);
+ Err(reported)
+ }
+ _ => Ok(check),
+ }
+ }
+
+ fn report_cast_error(&self, fcx: &FnCtxt<'a, 'tcx>, e: CastError) {
+ match e {
+ CastError::ErrorGuaranteed => {
+ // an error has already been reported
+ }
+ CastError::NeedDeref => {
+ let error_span = self.span;
+ let mut err = make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ );
+ let cast_ty = fcx.ty_to_string(self.cast_ty);
+ err.span_label(
+ error_span,
+ format!("cannot cast `{}` as `{}`", fcx.ty_to_string(self.expr_ty), cast_ty),
+ );
+ if let Ok(snippet) = fcx.sess().source_map().span_to_snippet(self.expr_span) {
+ err.span_suggestion(
+ self.expr_span,
+ "dereference the expression",
+ format!("*{}", snippet),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_help(self.expr_span, "dereference the expression with `*`");
+ }
+ err.emit();
+ }
+ CastError::NeedViaThinPtr | CastError::NeedViaPtr => {
+ let mut err = make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ );
+ if self.cast_ty.is_integral() {
+ err.help(&format!(
+ "cast through {} first",
+ match e {
+ CastError::NeedViaPtr => "a raw pointer",
+ CastError::NeedViaThinPtr => "a thin pointer",
+ _ => bug!(),
+ }
+ ));
+ }
+ err.emit();
+ }
+ CastError::NeedViaInt => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .help(&format!(
+ "cast through {} first",
+ match e {
+ CastError::NeedViaInt => "an integer",
+ _ => bug!(),
+ }
+ ))
+ .emit();
+ }
+ CastError::IllegalCast => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .emit();
+ }
+ CastError::DifferingKinds => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .note("vtable kinds may not match")
+ .emit();
+ }
+ CastError::CastToBool => {
+ let mut err =
+ struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`");
+
+ if self.expr_ty.is_numeric() {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.expr_span) {
+ Ok(snippet) => {
+ err.span_suggestion(
+ self.span,
+ "compare with zero instead",
+ format!("{snippet} != 0"),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(_) => {
+ err.span_help(self.span, "compare with zero instead");
+ }
+ }
+ } else {
+ err.span_label(self.span, "unsupported cast");
+ }
+
+ err.emit();
+ }
+ CastError::CastToChar => {
+ let mut err = type_error_struct!(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ E0604,
+ "only `u8` can be cast as `char`, not `{}`",
+ self.expr_ty
+ );
+ err.span_label(self.span, "invalid cast");
+ if self.expr_ty.is_numeric() {
+ if self.expr_ty == fcx.tcx.types.u32 {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) {
+ Ok(snippet) => err.span_suggestion(
+ self.span,
+ "try `char::from_u32` instead",
+ format!("char::from_u32({snippet})"),
+ Applicability::MachineApplicable,
+ ),
+
+ Err(_) => err.span_help(self.span, "try `char::from_u32` instead"),
+ };
+ } else if self.expr_ty == fcx.tcx.types.i8 {
+ err.span_help(self.span, "try casting from `u8` instead");
+ } else {
+ err.span_help(self.span, "try `char::from_u32` instead (via a `u32`)");
+ };
+ }
+ err.emit();
+ }
+ CastError::NonScalar => {
+ let mut err = type_error_struct!(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ E0605,
+ "non-primitive cast: `{}` as `{}`",
+ self.expr_ty,
+ fcx.ty_to_string(self.cast_ty)
+ );
+ let mut sugg = None;
+ let mut sugg_mutref = false;
+ if let ty::Ref(reg, cast_ty, mutbl) = *self.cast_ty.kind() {
+ if let ty::RawPtr(TypeAndMut { ty: expr_ty, .. }) = *self.expr_ty.kind()
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(
+ fcx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty: expr_ty, mutbl },
+ ),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg = Some((format!("&{}*", mutbl.prefix_str()), cast_ty == expr_ty));
+ } else if let ty::Ref(expr_reg, expr_ty, expr_mutbl) = *self.expr_ty.kind()
+ && expr_mutbl == Mutability::Not
+ && mutbl == Mutability::Mut
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(
+ expr_reg,
+ TypeAndMut { ty: expr_ty, mutbl: Mutability::Mut },
+ ),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg_mutref = true;
+ }
+
+ if !sugg_mutref
+ && sugg == None
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(reg, TypeAndMut { ty: self.expr_ty, mutbl }),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg = Some((format!("&{}", mutbl.prefix_str()), false));
+ }
+ } else if let ty::RawPtr(TypeAndMut { mutbl, .. }) = *self.cast_ty.kind()
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(
+ fcx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty: self.expr_ty, mutbl },
+ ),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg = Some((format!("&{}", mutbl.prefix_str()), false));
+ }
+ if sugg_mutref {
+ err.span_label(self.span, "invalid cast");
+ err.span_note(self.expr_span, "this reference is immutable");
+ err.span_note(self.cast_span, "trying to cast to a mutable reference type");
+ } else if let Some((sugg, remove_cast)) = sugg {
+ err.span_label(self.span, "invalid cast");
+
+ let has_parens = fcx
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(self.expr_span)
+ .map_or(false, |snip| snip.starts_with('('));
+
+ // Very crude check to see whether the expression must be wrapped
+ // in parentheses for the suggestion to work (issue #89497).
+ // Can/should be extended in the future.
+ let needs_parens =
+ !has_parens && matches!(self.expr.kind, hir::ExprKind::Cast(..));
+
+ let mut suggestion = vec![(self.expr_span.shrink_to_lo(), sugg)];
+ if needs_parens {
+ suggestion[0].1 += "(";
+ suggestion.push((self.expr_span.shrink_to_hi(), ")".to_string()));
+ }
+ if remove_cast {
+ suggestion.push((
+ self.expr_span.shrink_to_hi().to(self.cast_span),
+ String::new(),
+ ));
+ }
+
+ err.multipart_suggestion_verbose(
+ "consider borrowing the value",
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if !matches!(
+ self.cast_ty.kind(),
+ ty::FnDef(..) | ty::FnPtr(..) | ty::Closure(..)
+ ) {
+ let mut label = true;
+ // Check `impl From<self.expr_ty> for self.cast_ty {}` for accurate suggestion:
+ if let Ok(snippet) = fcx.tcx.sess.source_map().span_to_snippet(self.expr_span)
+ && let Some(from_trait) = fcx.tcx.get_diagnostic_item(sym::From)
+ {
+ let ty = fcx.resolve_vars_if_possible(self.cast_ty);
+ // Erase regions to avoid panic in `prove_value` when calling
+ // `type_implements_trait`.
+ let ty = fcx.tcx.erase_regions(ty);
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let expr_ty = fcx.tcx.erase_regions(expr_ty);
+ let ty_params = fcx.tcx.mk_substs_trait(expr_ty, &[]);
+ if fcx
+ .infcx
+ .type_implements_trait(from_trait, ty, ty_params, fcx.param_env)
+ .must_apply_modulo_regions()
+ {
+ label = false;
+ err.span_suggestion(
+ self.span,
+ "consider using the `From` trait instead",
+ format!("{}::from({})", self.cast_ty, snippet),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ let msg = "an `as` expression can only be used to convert between primitive \
+ types or to coerce to a specific trait object";
+ if label {
+ err.span_label(self.span, msg);
+ } else {
+ err.note(msg);
+ }
+ } else {
+ err.span_label(self.span, "invalid cast");
+ }
+ err.emit();
+ }
+ CastError::SizedUnsizedCast => {
+ use rustc_hir_analysis::structured_errors::{
+ SizedUnsizedCast, StructuredDiagnostic,
+ };
+
+ SizedUnsizedCast {
+ sess: &fcx.tcx.sess,
+ span: self.span,
+ expr_ty: self.expr_ty,
+ cast_ty: fcx.ty_to_string(self.cast_ty),
+ }
+ .diagnostic()
+ .emit();
+ }
+ CastError::IntToFatCast(known_metadata) => {
+ let mut err = struct_span_err!(
+ fcx.tcx.sess,
+ self.cast_span,
+ E0606,
+ "cannot cast `{}` to a pointer that {} wide",
+ fcx.ty_to_string(self.expr_ty),
+ if known_metadata.is_some() { "is" } else { "may be" }
+ );
+
+ err.span_label(
+ self.cast_span,
+ format!(
+ "creating a `{}` requires both an address and {}",
+ self.cast_ty,
+ known_metadata.unwrap_or("type-specific metadata"),
+ ),
+ );
+
+ if fcx.tcx.sess.is_nightly_build() {
+ err.span_label(
+ self.expr_span,
+ "consider casting this expression to `*const ()`, \
+ then using `core::ptr::from_raw_parts`",
+ );
+ }
+
+ err.emit();
+ }
+ CastError::UnknownCastPtrKind | CastError::UnknownExprPtrKind => {
+ let unknown_cast_to = match e {
+ CastError::UnknownCastPtrKind => true,
+ CastError::UnknownExprPtrKind => false,
+ _ => bug!(),
+ };
+ let mut err = struct_span_err!(
+ fcx.tcx.sess,
+ if unknown_cast_to { self.cast_span } else { self.span },
+ E0641,
+ "cannot cast {} a pointer of an unknown kind",
+ if unknown_cast_to { "to" } else { "from" }
+ );
+ if unknown_cast_to {
+ err.span_label(self.cast_span, "needs more type information");
+ err.note(
+ "the type information given here is insufficient to check whether \
+ the pointer cast is valid",
+ );
+ } else {
+ err.span_label(
+ self.span,
+ "the type information given here is insufficient to check whether \
+ the pointer cast is valid",
+ );
+ }
+ err.emit();
+ }
+ CastError::ForeignNonExhaustiveAdt => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .note("cannot cast an enum with a non-exhaustive variant when it's defined in another crate")
+ .emit();
+ }
+ }
+ }
+
+ fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'tcx>) -> ErrorGuaranteed {
+ if let Some(reported) =
+ self.cast_ty.error_reported().or_else(|| self.expr_ty.error_reported())
+ {
+ return reported;
+ }
+
+ let tstr = fcx.ty_to_string(self.cast_ty);
+ let mut err = type_error_struct!(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ E0620,
+ "cast to unsized type: `{}` as `{}`",
+ fcx.resolve_vars_if_possible(self.expr_ty),
+ tstr
+ );
+ match self.expr_ty.kind() {
+ ty::Ref(_, _, mt) => {
+ let mtstr = mt.prefix_str();
+ if self.cast_ty.is_trait() {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
+ Ok(s) => {
+ err.span_suggestion(
+ self.cast_span,
+ "try casting to a reference instead",
+ format!("&{}{}", mtstr, s),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(_) => {
+ let msg = &format!("did you mean `&{}{}`?", mtstr, tstr);
+ err.span_help(self.cast_span, msg);
+ }
+ }
+ } else {
+ let msg =
+ &format!("consider using an implicit coercion to `&{mtstr}{tstr}` instead");
+ err.span_help(self.span, msg);
+ }
+ }
+ ty::Adt(def, ..) if def.is_box() => {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
+ Ok(s) => {
+ err.span_suggestion(
+ self.cast_span,
+ "you can cast to a `Box` instead",
+ format!("Box<{s}>"),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(_) => {
+ err.span_help(
+ self.cast_span,
+ &format!("you might have meant `Box<{tstr}>`"),
+ );
+ }
+ }
+ }
+ _ => {
+ err.span_help(self.expr_span, "consider using a box or reference as appropriate");
+ }
+ }
+ err.emit()
+ }
+
+ fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ let t_cast = self.cast_ty;
+ let t_expr = self.expr_ty;
+ let type_asc_or =
+ if fcx.tcx.features().type_ascription { "type ascription or " } else { "" };
+ let (adjective, lint) = if t_cast.is_numeric() && t_expr.is_numeric() {
+ ("numeric ", lint::builtin::TRIVIAL_NUMERIC_CASTS)
+ } else {
+ ("", lint::builtin::TRIVIAL_CASTS)
+ };
+ fcx.tcx.struct_span_lint_hir(
+ lint,
+ self.expr.hir_id,
+ self.span,
+ DelayDm(|| {
+ format!(
+ "trivial {}cast: `{}` as `{}`",
+ adjective,
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)
+ )
+ }),
+ |lint| {
+ lint.help(format!(
+ "cast can be replaced by coercion; this might \
+ require {type_asc_or}a temporary variable"
+ ))
+ },
+ );
+ }
+
+ #[instrument(skip(fcx), level = "debug")]
+ pub fn check(mut self, fcx: &FnCtxt<'a, 'tcx>) {
+ self.expr_ty = fcx.structurally_resolved_type(self.expr_span, self.expr_ty);
+ self.cast_ty = fcx.structurally_resolved_type(self.cast_span, self.cast_ty);
+
+ debug!("check_cast({}, {:?} as {:?})", self.expr.hir_id, self.expr_ty, self.cast_ty);
+
+ if !fcx.type_is_sized_modulo_regions(fcx.param_env, self.cast_ty, self.span)
+ && !self.cast_ty.has_infer_types()
+ {
+ self.report_cast_to_unsized_type(fcx);
+ } else if self.expr_ty.references_error() || self.cast_ty.references_error() {
+ // No sense in giving duplicate error messages
+ } else {
+ match self.try_coercion_cast(fcx) {
+ Ok(()) => {
+ self.trivial_cast_lint(fcx);
+ debug!(" -> CoercionCast");
+ fcx.typeck_results.borrow_mut().set_coercion_cast(self.expr.hir_id.local_id);
+ }
+ Err(ty::error::TypeError::ObjectUnsafeCoercion(did)) => {
+ self.report_object_unsafe_cast(&fcx, did);
+ }
+ Err(_) => {
+ match self.do_check(fcx) {
+ Ok(k) => {
+ debug!(" -> {:?}", k);
+ }
+ Err(e) => self.report_cast_error(fcx, e),
+ };
+ }
+ };
+ }
+ }
+
+ fn report_object_unsafe_cast(&self, fcx: &FnCtxt<'a, 'tcx>, did: DefId) {
+ let violations = fcx.tcx.object_safety_violations(did);
+ let mut err = report_object_safety_error(fcx.tcx, self.cast_span, did, violations);
+ err.note(&format!("required by cast to type '{}'", fcx.ty_to_string(self.cast_ty)));
+ err.emit();
+ }
+
+ /// Checks a cast, and report an error if one exists. In some cases, this
+ /// can return Ok and create type errors in the fcx rather than returning
+ /// directly. coercion-cast is handled in check instead of here.
+ pub fn do_check(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<CastKind, CastError> {
+ use rustc_middle::ty::cast::CastTy::*;
+ use rustc_middle::ty::cast::IntTy::*;
+
+ let (t_from, t_cast) = match (CastTy::from_ty(self.expr_ty), CastTy::from_ty(self.cast_ty))
+ {
+ (Some(t_from), Some(t_cast)) => (t_from, t_cast),
+ // Function item types may need to be reified before casts.
+ (None, Some(t_cast)) => {
+ match *self.expr_ty.kind() {
+ ty::FnDef(..) => {
+ // Attempt a coercion to a fn pointer type.
+ let f = fcx.normalize_associated_types_in(
+ self.expr_span,
+ self.expr_ty.fn_sig(fcx.tcx),
+ );
+ let res = fcx.try_coerce(
+ self.expr,
+ self.expr_ty,
+ fcx.tcx.mk_fn_ptr(f),
+ AllowTwoPhase::No,
+ None,
+ );
+ if let Err(TypeError::IntrinsicCast) = res {
+ return Err(CastError::IllegalCast);
+ }
+ if res.is_err() {
+ return Err(CastError::NonScalar);
+ }
+ (FnPtr, t_cast)
+ }
+ // Special case some errors for references, and check for
+ // array-ptr-casts. `Ref` is not a CastTy because the cast
+ // is split into a coercion to a pointer type, followed by
+ // a cast.
+ ty::Ref(_, inner_ty, mutbl) => {
+ return match t_cast {
+ Int(_) | Float => match *inner_ty.kind() {
+ ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) => {
+ Err(CastError::NeedDeref)
+ }
+ _ => Err(CastError::NeedViaPtr),
+ },
+ // array-ptr-cast
+ Ptr(mt) => {
+ self.check_ref_cast(fcx, TypeAndMut { mutbl, ty: inner_ty }, mt)
+ }
+ _ => Err(CastError::NonScalar),
+ };
+ }
+ _ => return Err(CastError::NonScalar),
+ }
+ }
+ _ => return Err(CastError::NonScalar),
+ };
+
+ if let ty::Adt(adt_def, _) = *self.expr_ty.kind() {
+ if adt_def.did().krate != LOCAL_CRATE {
+ if adt_def.variants().iter().any(VariantDef::is_field_list_non_exhaustive) {
+ return Err(CastError::ForeignNonExhaustiveAdt);
+ }
+ }
+ }
+
+ match (t_from, t_cast) {
+ // These types have invariants! can't cast into them.
+ (_, Int(CEnum) | FnPtr) => Err(CastError::NonScalar),
+
+ // * -> Bool
+ (_, Int(Bool)) => Err(CastError::CastToBool),
+
+ // * -> Char
+ (Int(U(ty::UintTy::U8)), Int(Char)) => Ok(CastKind::U8CharCast), // u8-char-cast
+ (_, Int(Char)) => Err(CastError::CastToChar),
+
+ // prim -> float,ptr
+ (Int(Bool) | Int(CEnum) | Int(Char), Float) => Err(CastError::NeedViaInt),
+
+ (Int(Bool) | Int(CEnum) | Int(Char) | Float, Ptr(_)) | (Ptr(_) | FnPtr, Float) => {
+ Err(CastError::IllegalCast)
+ }
+
+ // ptr -> *
+ (Ptr(m_e), Ptr(m_c)) => self.check_ptr_ptr_cast(fcx, m_e, m_c), // ptr-ptr-cast
+
+ // ptr-addr-cast
+ (Ptr(m_expr), Int(t_c)) => {
+ self.lossy_provenance_ptr2int_lint(fcx, t_c);
+ self.check_ptr_addr_cast(fcx, m_expr)
+ }
+ (FnPtr, Int(_)) => {
+ // FIXME(#95489): there should eventually be a lint for these casts
+ Ok(CastKind::FnPtrAddrCast)
+ }
+ // addr-ptr-cast
+ (Int(_), Ptr(mt)) => {
+ self.fuzzy_provenance_int2ptr_lint(fcx);
+ self.check_addr_ptr_cast(fcx, mt)
+ }
+ // fn-ptr-cast
+ (FnPtr, Ptr(mt)) => self.check_fptr_ptr_cast(fcx, mt),
+
+ // prim -> prim
+ (Int(CEnum), Int(_)) => {
+ self.cenum_impl_drop_lint(fcx);
+ Ok(CastKind::EnumCast)
+ }
+ (Int(Char) | Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast),
+
+ (Int(_) | Float, Int(_) | Float) => Ok(CastKind::NumericCast),
+
+ (_, DynStar) | (DynStar, _) => {
+ if fcx.tcx.features().dyn_star {
+ bug!("should be handled by `try_coerce`")
+ } else {
+ Err(CastError::IllegalCast)
+ }
+ }
+ }
+ }
+
+ fn check_ptr_ptr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_expr: ty::TypeAndMut<'tcx>,
+ m_cast: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}", m_expr, m_cast);
+ // ptr-ptr cast. vtables must match.
+
+ let expr_kind = fcx.pointer_kind(m_expr.ty, self.span)?;
+ let cast_kind = fcx.pointer_kind(m_cast.ty, self.span)?;
+
+ let Some(cast_kind) = cast_kind else {
+ // We can't cast if target pointer kind is unknown
+ return Err(CastError::UnknownCastPtrKind);
+ };
+
+ // Cast to thin pointer is OK
+ if cast_kind == PointerKind::Thin {
+ return Ok(CastKind::PtrPtrCast);
+ }
+
+ let Some(expr_kind) = expr_kind else {
+ // We can't cast to fat pointer if source pointer kind is unknown
+ return Err(CastError::UnknownExprPtrKind);
+ };
+
+ // thin -> fat? report invalid cast (don't complain about vtable kinds)
+ if expr_kind == PointerKind::Thin {
+ return Err(CastError::SizedUnsizedCast);
+ }
+
+ // vtable kinds must match
+ if cast_kind == expr_kind {
+ Ok(CastKind::PtrPtrCast)
+ } else {
+ Err(CastError::DifferingKinds)
+ }
+ }
+
+ fn check_fptr_ptr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_cast: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // fptr-ptr cast. must be to thin ptr
+
+ match fcx.pointer_kind(m_cast.ty, self.span)? {
+ None => Err(CastError::UnknownCastPtrKind),
+ Some(PointerKind::Thin) => Ok(CastKind::FnPtrPtrCast),
+ _ => Err(CastError::IllegalCast),
+ }
+ }
+
+ fn check_ptr_addr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_expr: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // ptr-addr cast. must be from thin ptr
+
+ match fcx.pointer_kind(m_expr.ty, self.span)? {
+ None => Err(CastError::UnknownExprPtrKind),
+ Some(PointerKind::Thin) => Ok(CastKind::PtrAddrCast),
+ _ => Err(CastError::NeedViaThinPtr),
+ }
+ }
+
+ fn check_ref_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_expr: ty::TypeAndMut<'tcx>,
+ m_cast: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // array-ptr-cast: allow mut-to-mut, mut-to-const, const-to-const
+ if m_expr.mutbl == hir::Mutability::Mut || m_cast.mutbl == hir::Mutability::Not {
+ if let ty::Array(ety, _) = m_expr.ty.kind() {
+ // Due to the limitations of LLVM global constants,
+ // region pointers end up pointing at copies of
+ // vector elements instead of the original values.
+ // To allow raw pointers to work correctly, we
+ // need to special-case obtaining a raw pointer
+ // from a region pointer to a vector.
+
+ // Coerce to a raw pointer so that we generate AddressOf in MIR.
+ let array_ptr_type = fcx.tcx.mk_ptr(m_expr);
+ fcx.try_coerce(self.expr, self.expr_ty, array_ptr_type, AllowTwoPhase::No, None)
+ .unwrap_or_else(|_| {
+ bug!(
+ "could not cast from reference to array to pointer to array ({:?} to {:?})",
+ self.expr_ty,
+ array_ptr_type,
+ )
+ });
+
+ // this will report a type mismatch if needed
+ fcx.demand_eqtype(self.span, *ety, m_cast.ty);
+ return Ok(CastKind::ArrayPtrCast);
+ }
+ }
+
+ Err(CastError::IllegalCast)
+ }
+
+ fn check_addr_ptr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_cast: TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // ptr-addr cast. pointer must be thin.
+ match fcx.pointer_kind(m_cast.ty, self.span)? {
+ None => Err(CastError::UnknownCastPtrKind),
+ Some(PointerKind::Thin) => Ok(CastKind::AddrPtrCast),
+ Some(PointerKind::VTable(_)) => Err(CastError::IntToFatCast(Some("a vtable"))),
+ Some(PointerKind::Length) => Err(CastError::IntToFatCast(Some("a length"))),
+ Some(
+ PointerKind::OfProjection(_)
+ | PointerKind::OfOpaque(_, _)
+ | PointerKind::OfParam(_),
+ ) => Err(CastError::IntToFatCast(None)),
+ }
+ }
+
+ fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<(), ty::error::TypeError<'tcx>> {
+ match fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty, AllowTwoPhase::No, None) {
+ Ok(_) => Ok(()),
+ Err(err) => Err(err),
+ }
+ }
+
+ fn cenum_impl_drop_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ if let ty::Adt(d, _) = self.expr_ty.kind()
+ && d.has_dtor(fcx.tcx)
+ {
+ fcx.tcx.struct_span_lint_hir(
+ lint::builtin::CENUM_IMPL_DROP_CAST,
+ self.expr.hir_id,
+ self.span,
+ DelayDm(|| format!(
+ "cannot cast enum `{}` into integer `{}` because it implements `Drop`",
+ self.expr_ty, self.cast_ty
+ )),
+ |lint| {
+ lint
+ },
+ );
+ }
+ }
+
+ fn lossy_provenance_ptr2int_lint(&self, fcx: &FnCtxt<'a, 'tcx>, t_c: ty::cast::IntTy) {
+ fcx.tcx.struct_span_lint_hir(
+ lint::builtin::LOSSY_PROVENANCE_CASTS,
+ self.expr.hir_id,
+ self.span,
+ DelayDm(|| format!(
+ "under strict provenance it is considered bad style to cast pointer `{}` to integer `{}`",
+ self.expr_ty, self.cast_ty
+ )),
+ |lint| {
+ let msg = "use `.addr()` to obtain the address of a pointer";
+
+ let expr_prec = self.expr.precedence().order();
+ let needs_parens = expr_prec < rustc_ast::util::parser::PREC_POSTFIX;
+
+ let scalar_cast = match t_c {
+ ty::cast::IntTy::U(ty::UintTy::Usize) => String::new(),
+ _ => format!(" as {}", self.cast_ty),
+ };
+
+ let cast_span = self.expr_span.shrink_to_hi().to(self.cast_span);
+
+ if needs_parens {
+ let suggestions = vec![
+ (self.expr_span.shrink_to_lo(), String::from("(")),
+ (cast_span, format!(").addr(){scalar_cast}")),
+ ];
+
+ lint.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
+ } else {
+ lint.span_suggestion(
+ cast_span,
+ msg,
+ format!(".addr(){scalar_cast}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ lint.help(
+ "if you can't comply with strict provenance and need to expose the pointer \
+ provenance you can use `.expose_addr()` instead"
+ );
+
+ lint
+ },
+ );
+ }
+
+ fn fuzzy_provenance_int2ptr_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ fcx.tcx.struct_span_lint_hir(
+ lint::builtin::FUZZY_PROVENANCE_CASTS,
+ self.expr.hir_id,
+ self.span,
+ DelayDm(|| format!(
+ "strict provenance disallows casting integer `{}` to pointer `{}`",
+ self.expr_ty, self.cast_ty
+ )),
+ |lint| {
+ let msg = "use `.with_addr()` to adjust a valid pointer in the same allocation, to this address";
+ let suggestions = vec![
+ (self.expr_span.shrink_to_lo(), String::from("(...).with_addr(")),
+ (self.expr_span.shrink_to_hi().to(self.cast_span), String::from(")")),
+ ];
+
+ lint.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
+ lint.help(
+ "if you can't comply with strict provenance and don't have a pointer with \
+ the correct provenance you can use `std::ptr::from_exposed_addr()` instead"
+ );
+
+ lint
+ },
+ );
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs
new file mode 100644
index 000000000..7f76364e1
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/check.rs
@@ -0,0 +1,324 @@
+use crate::coercion::CoerceMany;
+use crate::gather_locals::GatherLocalsVisitor;
+use crate::{FnCtxt, Inherited};
+use crate::{GeneratorTypes, UnsafetyState};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ImplicitSelfKind, ItemKind, Node};
+use rustc_hir_analysis::check::fn_maybe_err;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::RegionVariableOrigin;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::LocalDefId;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits;
+use std::cell::RefCell;
+
+/// Helper used for fns and closures. Does the grungy work of checking a function
+/// body and returns the function context used for that purpose, since in the case of a fn item
+/// there is still a bit more to do.
+///
+/// * ...
+/// * inherited: other fields inherited from the enclosing fn (if any)
+#[instrument(skip(inherited, body), level = "debug")]
+pub(super) fn check_fn<'a, 'tcx>(
+ inherited: &'a Inherited<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ fn_sig: ty::FnSig<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ fn_id: hir::HirId,
+ body: &'tcx hir::Body<'tcx>,
+ can_be_generator: Option<hir::Movability>,
+ return_type_pre_known: bool,
+) -> (FnCtxt<'a, 'tcx>, Option<GeneratorTypes<'tcx>>) {
+ // Create the function context. This is either derived from scratch or,
+ // in the case of closures, based on the outer context.
+ let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id);
+ fcx.ps.set(UnsafetyState::function(fn_sig.unsafety, fn_id));
+ fcx.return_type_pre_known = return_type_pre_known;
+
+ let tcx = fcx.tcx;
+ let hir = tcx.hir();
+
+ let declared_ret_ty = fn_sig.output();
+
+ let ret_ty =
+ fcx.register_infer_ok_obligations(fcx.infcx.replace_opaque_types_with_inference_vars(
+ declared_ret_ty,
+ body.value.hir_id,
+ decl.output.span(),
+ param_env,
+ ));
+ // If we replaced declared_ret_ty with infer vars, then we must be inferring
+ // an opaque type, so set a flag so we can improve diagnostics.
+ fcx.return_type_has_opaque = ret_ty != declared_ret_ty;
+
+ fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(ret_ty)));
+
+ let span = body.value.span;
+
+ fn_maybe_err(tcx, span, fn_sig.abi);
+
+ if fn_sig.abi == Abi::RustCall {
+ let expected_args = if let ImplicitSelfKind::None = decl.implicit_self { 1 } else { 2 };
+
+ let err = || {
+ let item = match tcx.hir().get(fn_id) {
+ Node::Item(hir::Item { kind: ItemKind::Fn(header, ..), .. }) => Some(header),
+ Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(header, ..), ..
+ }) => Some(header),
+ Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(header, ..),
+ ..
+ }) => Some(header),
+ // Closures are RustCall, but they tuple their arguments, so shouldn't be checked
+ Node::Expr(hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => None,
+ node => bug!("Item being checked wasn't a function/closure: {:?}", node),
+ };
+
+ if let Some(header) = item {
+ tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple");
+ }
+ };
+
+ if fn_sig.inputs().len() != expected_args {
+ err()
+ } else {
+ // FIXME(CraftSpider) Add a check on parameter expansion, so we don't just make the ICE happen later on
+ // This will probably require wide-scale changes to support a TupleKind obligation
+ // We can't resolve this without knowing the type of the param
+ if !matches!(fn_sig.inputs()[expected_args - 1].kind(), ty::Tuple(_) | ty::Param(_)) {
+ err()
+ }
+ }
+ }
+
+ if body.generator_kind.is_some() && can_be_generator.is_some() {
+ let yield_ty = fcx
+ .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span });
+ fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
+
+ // Resume type defaults to `()` if the generator has no argument.
+ let resume_ty = fn_sig.inputs().get(0).copied().unwrap_or_else(|| tcx.mk_unit());
+
+ fcx.resume_yield_tys = Some((resume_ty, yield_ty));
+ }
+
+ GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
+ // (as it's created inside the body itself, not passed in from outside).
+ let maybe_va_list = if fn_sig.c_variadic {
+ let span = body.params.last().unwrap().span;
+ let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(span));
+ let region = fcx.next_region_var(RegionVariableOrigin::MiscVariable(span));
+
+ Some(tcx.bound_type_of(va_list_did).subst(tcx, &[region.into()]))
+ } else {
+ None
+ };
+
+ // Add formal parameters.
+ let inputs_hir = hir.fn_decl_by_hir_id(fn_id).map(|decl| &decl.inputs);
+ let inputs_fn = fn_sig.inputs().iter().copied();
+ for (idx, (param_ty, param)) in inputs_fn.chain(maybe_va_list).zip(body.params).enumerate() {
+ // Check the pattern.
+ let ty_span = try { inputs_hir?.get(idx)?.span };
+ fcx.check_pat_top(&param.pat, param_ty, ty_span, false);
+
+ // Check that argument is Sized.
+ // The check for a non-trivial pattern is a hack to avoid duplicate warnings
+ // for simple cases like `fn foo(x: Trait)`,
+ // where we would error once on the parameter as a whole, and once on the binding `x`.
+ if param.pat.simple_ident().is_none() && !tcx.features().unsized_fn_params {
+ fcx.require_type_is_sized(param_ty, param.pat.span, traits::SizedArgumentType(ty_span));
+ }
+
+ fcx.write_ty(param.hir_id, param_ty);
+ }
+
+ inherited.typeck_results.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig);
+
+ fcx.in_tail_expr = true;
+ if let ty::Dynamic(..) = declared_ret_ty.kind() {
+ // FIXME: We need to verify that the return type is `Sized` after the return expression has
+ // been evaluated so that we have types available for all the nodes being returned, but that
+ // requires the coerced evaluated type to be stored. Moving `check_return_expr` before this
+ // causes unsized errors caused by the `declared_ret_ty` to point at the return expression,
+ // while keeping the current ordering we will ignore the tail expression's type because we
+ // don't know it yet. We can't do `check_expr_kind` while keeping `check_return_expr`
+ // because we will trigger "unreachable expression" lints unconditionally.
+ // Because of all of this, we perform a crude check to know whether the simplest `!Sized`
+ // case that a newcomer might make, returning a bare trait, and in that case we populate
+ // the tail expression's type so that the suggestion will be correct, but ignore all other
+ // possible cases.
+ fcx.check_expr(&body.value);
+ fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ } else {
+ fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ fcx.check_return_expr(&body.value, false);
+ }
+ fcx.in_tail_expr = false;
+
+ // We insert the deferred_generator_interiors entry after visiting the body.
+ // This ensures that all nested generators appear before the entry of this generator.
+ // resolve_generator_interiors relies on this property.
+ let gen_ty = if let (Some(_), Some(gen_kind)) = (can_be_generator, body.generator_kind) {
+ let interior = fcx
+ .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span });
+ fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior, gen_kind));
+
+ let (resume_ty, yield_ty) = fcx.resume_yield_tys.unwrap();
+ Some(GeneratorTypes {
+ resume_ty,
+ yield_ty,
+ interior,
+ movability: can_be_generator.unwrap(),
+ })
+ } else {
+ None
+ };
+
+ // Finalize the return check by taking the LUB of the return types
+ // we saw and assigning it to the expected return type. This isn't
+ // really expected to fail, since the coercions would have failed
+ // earlier when trying to find a LUB.
+ let coercion = fcx.ret_coercion.take().unwrap().into_inner();
+ let mut actual_return_ty = coercion.complete(&fcx);
+ debug!("actual_return_ty = {:?}", actual_return_ty);
+ if let ty::Dynamic(..) = declared_ret_ty.kind() {
+ // We have special-cased the case where the function is declared
+ // `-> dyn Foo` and we don't actually relate it to the
+ // `fcx.ret_coercion`, so just substitute a type variable.
+ actual_return_ty =
+ fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::DynReturnFn, span });
+ debug!("actual_return_ty replaced with {:?}", actual_return_ty);
+ }
+
+ // HACK(oli-obk, compiler-errors): We should be comparing this against
+ // `declared_ret_ty`, but then anything uninferred would be inferred to
+ // the opaque type itself. That again would cause writeback to assume
+ // we have a recursive call site and do the sadly stabilized fallback to `()`.
+ fcx.demand_suptype(span, ret_ty, actual_return_ty);
+
+ // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !`
+ if let Some(panic_impl_did) = tcx.lang_items().panic_impl()
+ && panic_impl_did == hir.local_def_id(fn_id).to_def_id()
+ {
+ check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ }
+
+ // Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !`
+ if let Some(alloc_error_handler_did) = tcx.lang_items().oom()
+ && alloc_error_handler_did == hir.local_def_id(fn_id).to_def_id()
+ {
+ check_alloc_error_fn(tcx, alloc_error_handler_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ }
+
+ (fcx, gen_ty)
+}
+
+fn check_panic_info_fn(
+ tcx: TyCtxt<'_>,
+ fn_id: LocalDefId,
+ fn_sig: ty::FnSig<'_>,
+ decl: &hir::FnDecl<'_>,
+ declared_ret_ty: Ty<'_>,
+) {
+ let Some(panic_info_did) = tcx.lang_items().panic_info() else {
+ tcx.sess.err("language item required, but not found: `panic_info`");
+ return;
+ };
+
+ if *declared_ret_ty.kind() != ty::Never {
+ tcx.sess.span_err(decl.output.span(), "return type should be `!`");
+ }
+
+ let inputs = fn_sig.inputs();
+ if inputs.len() != 1 {
+ tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
+ return;
+ }
+
+ let arg_is_panic_info = match *inputs[0].kind() {
+ ty::Ref(region, ty, mutbl) => match *ty.kind() {
+ ty::Adt(ref adt, _) => {
+ adt.did() == panic_info_did && mutbl == hir::Mutability::Not && !region.is_static()
+ }
+ _ => false,
+ },
+ _ => false,
+ };
+
+ if !arg_is_panic_info {
+ tcx.sess.span_err(decl.inputs[0].span, "argument should be `&PanicInfo`");
+ }
+
+ let DefKind::Fn = tcx.def_kind(fn_id) else {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should be a function");
+ return;
+ };
+
+ let generic_counts = tcx.generics_of(fn_id).own_counts();
+ if generic_counts.types != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should have no type parameters");
+ }
+ if generic_counts.consts != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should have no const parameters");
+ }
+}
+
+fn check_alloc_error_fn(
+ tcx: TyCtxt<'_>,
+ fn_id: LocalDefId,
+ fn_sig: ty::FnSig<'_>,
+ decl: &hir::FnDecl<'_>,
+ declared_ret_ty: Ty<'_>,
+) {
+ let Some(alloc_layout_did) = tcx.lang_items().alloc_layout() else {
+ tcx.sess.err("language item required, but not found: `alloc_layout`");
+ return;
+ };
+
+ if *declared_ret_ty.kind() != ty::Never {
+ tcx.sess.span_err(decl.output.span(), "return type should be `!`");
+ }
+
+ let inputs = fn_sig.inputs();
+ if inputs.len() != 1 {
+ tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
+ return;
+ }
+
+ let arg_is_alloc_layout = match inputs[0].kind() {
+ ty::Adt(ref adt, _) => adt.did() == alloc_layout_did,
+ _ => false,
+ };
+
+ if !arg_is_alloc_layout {
+ tcx.sess.span_err(decl.inputs[0].span, "argument should be `Layout`");
+ }
+
+ let DefKind::Fn = tcx.def_kind(fn_id) else {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "`#[alloc_error_handler]` should be a function");
+ return;
+ };
+
+ let generic_counts = tcx.generics_of(fn_id).own_counts();
+ if generic_counts.types != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "`#[alloc_error_handler]` function should have no type parameters");
+ }
+ if generic_counts.consts != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess
+ .span_err(span, "`#[alloc_error_handler]` function should have no const parameters");
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/closure.rs b/compiler/rustc_hir_typeck/src/closure.rs
new file mode 100644
index 000000000..a5a45f75e
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/closure.rs
@@ -0,0 +1,824 @@
+//! Code for type-checking closure expressions.
+
+use super::{check_fn, Expectation, FnCtxt, GeneratorTypes};
+
+use hir::def::DefKind;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::LateBoundRegionConversionTime;
+use rustc_infer::infer::{InferOk, InferResult};
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::ArgKind;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use std::cmp;
+use std::iter;
+
+/// What signature do we *expect* the closure to have from context?
+#[derive(Debug)]
+struct ExpectedSig<'tcx> {
+ /// Span that gave us this expectation, if we know that.
+ cause_span: Option<Span>,
+ sig: ty::PolyFnSig<'tcx>,
+}
+
+struct ClosureSignatures<'tcx> {
+ /// The signature users of the closure see.
+ bound_sig: ty::PolyFnSig<'tcx>,
+ /// The signature within the function body.
+ /// This mostly differs in the sense that lifetimes are now early bound and any
+ /// opaque types from the signature expectation are overriden in case there are
+ /// explicit hidden types written by the user in the closure signature.
+ liberated_sig: ty::FnSig<'tcx>,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ #[instrument(skip(self, expr, _capture, decl, body_id), level = "debug")]
+ pub fn check_expr_closure(
+ &self,
+ expr: &hir::Expr<'_>,
+ _capture: hir::CaptureBy,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ gen: Option<hir::Movability>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ trace!("decl = {:#?}", decl);
+ trace!("expr = {:#?}", expr);
+
+ // It's always helpful for inference if we know the kind of
+ // closure sooner rather than later, so first examine the expected
+ // type, and see if can glean a closure kind from there.
+ let (expected_sig, expected_kind) = match expected.to_option(self) {
+ Some(ty) => self.deduce_expectations_from_expected_type(ty),
+ None => (None, None),
+ };
+ let body = self.tcx.hir().body(body_id);
+ self.check_closure(expr, expected_kind, decl, body, gen, expected_sig)
+ }
+
+ #[instrument(skip(self, expr, body, decl), level = "debug", ret)]
+ fn check_closure(
+ &self,
+ expr: &hir::Expr<'_>,
+ opt_kind: Option<ty::ClosureKind>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body: &'tcx hir::Body<'tcx>,
+ gen: Option<hir::Movability>,
+ expected_sig: Option<ExpectedSig<'tcx>>,
+ ) -> Ty<'tcx> {
+ trace!("decl = {:#?}", decl);
+ let expr_def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ debug!(?expr_def_id);
+
+ let ClosureSignatures { bound_sig, liberated_sig } =
+ self.sig_of_closure(expr.hir_id, expr_def_id.to_def_id(), decl, body, expected_sig);
+
+ debug!(?bound_sig, ?liberated_sig);
+
+ let return_type_pre_known = !liberated_sig.output().is_ty_infer();
+
+ let generator_types = check_fn(
+ self,
+ self.param_env.without_const(),
+ liberated_sig,
+ decl,
+ expr.hir_id,
+ body,
+ gen,
+ return_type_pre_known,
+ )
+ .1;
+
+ let parent_substs = InternalSubsts::identity_for_item(
+ self.tcx,
+ self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
+ );
+
+ let tupled_upvars_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::ClosureSynthetic,
+ span: self.tcx.hir().span(expr.hir_id),
+ });
+
+ if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types
+ {
+ let generator_substs = ty::GeneratorSubsts::new(
+ self.tcx,
+ ty::GeneratorSubstsParts {
+ parent_substs,
+ resume_ty,
+ yield_ty,
+ return_ty: liberated_sig.output(),
+ witness: interior,
+ tupled_upvars_ty,
+ },
+ );
+
+ return self.tcx.mk_generator(
+ expr_def_id.to_def_id(),
+ generator_substs.substs,
+ movability,
+ );
+ }
+
+ // Tuple up the arguments and insert the resulting function type into
+ // the `closures` table.
+ let sig = bound_sig.map_bound(|sig| {
+ self.tcx.mk_fn_sig(
+ iter::once(self.tcx.intern_tup(sig.inputs())),
+ sig.output(),
+ sig.c_variadic,
+ sig.unsafety,
+ sig.abi,
+ )
+ });
+
+ debug!(?sig, ?opt_kind);
+
+ let closure_kind_ty = match opt_kind {
+ Some(kind) => kind.to_ty(self.tcx),
+
+ // Create a type variable (for now) to represent the closure kind.
+ // It will be unified during the upvar inference phase (`upvar.rs`)
+ None => self.next_ty_var(TypeVariableOrigin {
+ // FIXME(eddyb) distinguish closure kind inference variables from the rest.
+ kind: TypeVariableOriginKind::ClosureSynthetic,
+ span: expr.span,
+ }),
+ };
+
+ let closure_substs = ty::ClosureSubsts::new(
+ self.tcx,
+ ty::ClosureSubstsParts {
+ parent_substs,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty: self.tcx.mk_fn_ptr(sig),
+ tupled_upvars_ty,
+ },
+ );
+
+ self.tcx.mk_closure(expr_def_id.to_def_id(), closure_substs.substs)
+ }
+
+ /// Given the expected type, figures out what it can about this closure we
+ /// are about to type check:
+ #[instrument(skip(self), level = "debug")]
+ fn deduce_expectations_from_expected_type(
+ &self,
+ expected_ty: Ty<'tcx>,
+ ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
+ match *expected_ty.kind() {
+ ty::Opaque(def_id, substs) => {
+ let bounds = self.tcx.bound_explicit_item_bounds(def_id);
+ let sig =
+ bounds.subst_iter_copied(self.tcx, substs).find_map(|(pred, span)| match pred
+ .kind()
+ .skip_binder()
+ {
+ ty::PredicateKind::Projection(proj_predicate) => self
+ .deduce_sig_from_projection(
+ Some(span),
+ pred.kind().rebind(proj_predicate),
+ ),
+ _ => None,
+ });
+
+ let kind = bounds
+ .0
+ .iter()
+ .filter_map(|(pred, _)| match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(tp) => {
+ self.tcx.fn_trait_kind_from_lang_item(tp.def_id())
+ }
+ _ => None,
+ })
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+ trace!(?sig, ?kind);
+ (sig, kind)
+ }
+ ty::Dynamic(ref object_type, ..) => {
+ let sig = object_type.projection_bounds().find_map(|pb| {
+ let pb = pb.with_self_ty(self.tcx, self.tcx.types.trait_object_dummy_self);
+ self.deduce_sig_from_projection(None, pb)
+ });
+ let kind = object_type
+ .principal_def_id()
+ .and_then(|did| self.tcx.fn_trait_kind_from_lang_item(did));
+ (sig, kind)
+ }
+ ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
+ ty::FnPtr(sig) => {
+ let expected_sig = ExpectedSig { cause_span: None, sig };
+ (Some(expected_sig), Some(ty::ClosureKind::Fn))
+ }
+ _ => (None, None),
+ }
+ }
+
+ fn deduce_expectations_from_obligations(
+ &self,
+ expected_vid: ty::TyVid,
+ ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
+ let expected_sig =
+ self.obligations_for_self_ty(expected_vid).find_map(|(_, obligation)| {
+ debug!(?obligation.predicate);
+
+ let bound_predicate = obligation.predicate.kind();
+ if let ty::PredicateKind::Projection(proj_predicate) =
+ obligation.predicate.kind().skip_binder()
+ {
+ // Given a Projection predicate, we can potentially infer
+ // the complete signature.
+ self.deduce_sig_from_projection(
+ Some(obligation.cause.span),
+ bound_predicate.rebind(proj_predicate),
+ )
+ } else {
+ None
+ }
+ });
+
+ // Even if we can't infer the full signature, we may be able to
+ // infer the kind. This can occur when we elaborate a predicate
+ // like `F : Fn<A>`. Note that due to subtyping we could encounter
+ // many viable options, so pick the most restrictive.
+ let expected_kind = self
+ .obligations_for_self_ty(expected_vid)
+ .filter_map(|(tr, _)| self.tcx.fn_trait_kind_from_lang_item(tr.def_id()))
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+
+ (expected_sig, expected_kind)
+ }
+
+ /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce
+ /// everything we need to know about a closure or generator.
+ ///
+ /// The `cause_span` should be the span that caused us to
+ /// have this expected signature, or `None` if we can't readily
+ /// know that.
+ #[instrument(level = "debug", skip(self, cause_span), ret)]
+ fn deduce_sig_from_projection(
+ &self,
+ cause_span: Option<Span>,
+ projection: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<ExpectedSig<'tcx>> {
+ let tcx = self.tcx;
+
+ let trait_def_id = projection.trait_def_id(tcx);
+
+ let is_fn = tcx.fn_trait_kind_from_lang_item(trait_def_id).is_some();
+ let gen_trait = tcx.require_lang_item(LangItem::Generator, cause_span);
+ let is_gen = gen_trait == trait_def_id;
+ if !is_fn && !is_gen {
+ debug!("not fn or generator");
+ return None;
+ }
+
+ if is_gen {
+ // Check that we deduce the signature from the `<_ as std::ops::Generator>::Return`
+ // associated item and not yield.
+ let return_assoc_item = self.tcx.associated_item_def_ids(gen_trait)[1];
+ if return_assoc_item != projection.projection_def_id() {
+ debug!("not return assoc item of generator");
+ return None;
+ }
+ }
+
+ let input_tys = if is_fn {
+ let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1);
+ let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty);
+ debug!(?arg_param_ty);
+
+ match arg_param_ty.kind() {
+ &ty::Tuple(tys) => tys,
+ _ => return None,
+ }
+ } else {
+ // Generators with a `()` resume type may be defined with 0 or 1 explicit arguments,
+ // else they must have exactly 1 argument. For now though, just give up in this case.
+ return None;
+ };
+
+ // Since this is a return parameter type it is safe to unwrap.
+ let ret_param_ty = projection.skip_binder().term.ty().unwrap();
+ let ret_param_ty = self.resolve_vars_if_possible(ret_param_ty);
+ debug!(?ret_param_ty);
+
+ let sig = projection.rebind(self.tcx.mk_fn_sig(
+ input_tys.iter(),
+ ret_param_ty,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+
+ Some(ExpectedSig { cause_span, sig })
+ }
+
+ fn sig_of_closure(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: Option<ExpectedSig<'tcx>>,
+ ) -> ClosureSignatures<'tcx> {
+ if let Some(e) = expected_sig {
+ self.sig_of_closure_with_expectation(hir_id, expr_def_id, decl, body, e)
+ } else {
+ self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body)
+ }
+ }
+
+ /// If there is no expected signature, then we will convert the
+ /// types that the user gave into a signature.
+ #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
+ fn sig_of_closure_no_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ ) -> ClosureSignatures<'tcx> {
+ let bound_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
+
+ self.closure_sigs(expr_def_id, body, bound_sig)
+ }
+
+ /// Invoked to compute the signature of a closure expression. This
+ /// combines any user-provided type annotations (e.g., `|x: u32|
+ /// -> u32 { .. }`) with the expected signature.
+ ///
+ /// The approach is as follows:
+ ///
+ /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations.
+ /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any.
+ /// - If we have no expectation `E`, then the signature of the closure is `S`.
+ /// - Otherwise, the signature of the closure is E. Moreover:
+ /// - Skolemize the late-bound regions in `E`, yielding `E'`.
+ /// - Instantiate all the late-bound regions bound in the closure within `S`
+ /// with fresh (existential) variables, yielding `S'`
+ /// - Require that `E' = S'`
+ /// - We could use some kind of subtyping relationship here,
+ /// I imagine, but equality is easier and works fine for
+ /// our purposes.
+ ///
+ /// The key intuition here is that the user's types must be valid
+ /// from "the inside" of the closure, but the expectation
+ /// ultimately drives the overall signature.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (illustrative)
+ /// fn with_closure<F>(_: F)
+ /// where F: Fn(&u32) -> &u32 { .. }
+ ///
+ /// with_closure(|x: &u32| { ... })
+ /// ```
+ ///
+ /// Here:
+ /// - E would be `fn(&u32) -> &u32`.
+ /// - S would be `fn(&u32) ->
+ /// - E' is `&'!0 u32 -> &'!0 u32`
+ /// - S' is `&'?0 u32 -> ?T`
+ ///
+ /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`.
+ ///
+ /// # Arguments
+ ///
+ /// - `expr_def_id`: the `DefId` of the closure expression
+ /// - `decl`: the HIR declaration of the closure
+ /// - `body`: the body of the closure
+ /// - `expected_sig`: the expected signature (if any). Note that
+ /// this is missing a binder: that is, there may be late-bound
+ /// regions with depth 1, which are bound then by the closure.
+ #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
+ fn sig_of_closure_with_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: ExpectedSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ // Watch out for some surprises and just ignore the
+ // expectation if things don't see to match up with what we
+ // expect.
+ if expected_sig.sig.c_variadic() != decl.c_variadic {
+ return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body);
+ } else if expected_sig.sig.skip_binder().inputs_and_output.len() != decl.inputs.len() + 1 {
+ return self.sig_of_closure_with_mismatched_number_of_arguments(
+ expr_def_id,
+ decl,
+ body,
+ expected_sig,
+ );
+ }
+
+ // Create a `PolyFnSig`. Note the oddity that late bound
+ // regions appearing free in `expected_sig` are now bound up
+ // in this binder we are creating.
+ assert!(!expected_sig.sig.skip_binder().has_vars_bound_above(ty::INNERMOST));
+ let bound_sig = expected_sig.sig.map_bound(|sig| {
+ self.tcx.mk_fn_sig(
+ sig.inputs().iter().cloned(),
+ sig.output(),
+ sig.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ )
+ });
+
+ // `deduce_expectations_from_expected_type` introduces
+ // late-bound lifetimes defined elsewhere, which we now
+ // anonymize away, so as not to confuse the user.
+ let bound_sig = self.tcx.anonymize_late_bound_regions(bound_sig);
+
+ let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig);
+
+ // Up till this point, we have ignored the annotations that the user
+ // gave. This function will check that they unify successfully.
+ // Along the way, it also writes out entries for types that the user
+ // wrote into our typeck results, which are then later used by the privacy
+ // check.
+ match self.merge_supplied_sig_with_expectation(
+ hir_id,
+ expr_def_id,
+ decl,
+ body,
+ closure_sigs,
+ ) {
+ Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok),
+ Err(_) => self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body),
+ }
+ }
+
+ fn sig_of_closure_with_mismatched_number_of_arguments(
+ &self,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: ExpectedSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ let hir = self.tcx.hir();
+ let expr_map_node = hir.get_if_local(expr_def_id).unwrap();
+ let expected_args: Vec<_> = expected_sig
+ .sig
+ .skip_binder()
+ .inputs()
+ .iter()
+ .map(|ty| ArgKind::from_expected_ty(*ty, None))
+ .collect();
+ let (closure_span, found_args) = match self.get_fn_like_arguments(expr_map_node) {
+ Some((sp, args)) => (Some(sp), args),
+ None => (None, Vec::new()),
+ };
+ let expected_span =
+ expected_sig.cause_span.unwrap_or_else(|| hir.span_if_local(expr_def_id).unwrap());
+ self.report_arg_count_mismatch(
+ expected_span,
+ closure_span,
+ expected_args,
+ found_args,
+ true,
+ )
+ .emit();
+
+ let error_sig = self.error_sig_of_closure(decl);
+
+ self.closure_sigs(expr_def_id, body, error_sig)
+ }
+
+ /// Enforce the user's types against the expectation. See
+ /// `sig_of_closure_with_expectation` for details on the overall
+ /// strategy.
+ #[instrument(level = "debug", skip(self, hir_id, expr_def_id, decl, body, expected_sigs))]
+ fn merge_supplied_sig_with_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ mut expected_sigs: ClosureSignatures<'tcx>,
+ ) -> InferResult<'tcx, ClosureSignatures<'tcx>> {
+ // Get the signature S that the user gave.
+ //
+ // (See comment on `sig_of_closure_with_expectation` for the
+ // meaning of these letters.)
+ let supplied_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
+
+ debug!(?supplied_sig);
+
+ // FIXME(#45727): As discussed in [this comment][c1], naively
+ // forcing equality here actually results in suboptimal error
+ // messages in some cases. For now, if there would have been
+ // an obvious error, we fallback to declaring the type of the
+ // closure to be the one the user gave, which allows other
+ // error message code to trigger.
+ //
+ // However, I think [there is potential to do even better
+ // here][c2], since in *this* code we have the precise span of
+ // the type parameter in question in hand when we report the
+ // error.
+ //
+ // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706
+ // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796
+ self.commit_if_ok(|_| {
+ let mut all_obligations = vec![];
+ let inputs: Vec<_> = iter::zip(
+ decl.inputs,
+ supplied_sig.inputs().skip_binder(), // binder moved to (*) below
+ )
+ .map(|(hir_ty, &supplied_ty)| {
+ // Instantiate (this part of..) S to S', i.e., with fresh variables.
+ self.replace_bound_vars_with_fresh_vars(
+ hir_ty.span,
+ LateBoundRegionConversionTime::FnCall,
+ // (*) binder moved to here
+ supplied_sig.inputs().rebind(supplied_ty),
+ )
+ })
+ .collect();
+
+ // The liberated version of this signature should be a subtype
+ // of the liberated form of the expectation.
+ for ((hir_ty, &supplied_ty), expected_ty) in iter::zip(
+ iter::zip(decl.inputs, &inputs),
+ expected_sigs.liberated_sig.inputs(), // `liberated_sig` is E'.
+ ) {
+ // Check that E' = S'.
+ let cause = self.misc(hir_ty.span);
+ let InferOk { value: (), obligations } =
+ self.at(&cause, self.param_env).eq(*expected_ty, supplied_ty)?;
+ all_obligations.extend(obligations);
+ }
+
+ let supplied_output_ty = self.replace_bound_vars_with_fresh_vars(
+ decl.output.span(),
+ LateBoundRegionConversionTime::FnCall,
+ supplied_sig.output(),
+ );
+ let cause = &self.misc(decl.output.span());
+ let InferOk { value: (), obligations } = self
+ .at(cause, self.param_env)
+ .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?;
+ all_obligations.extend(obligations);
+
+ let inputs = inputs.into_iter().map(|ty| self.resolve_vars_if_possible(ty));
+
+ expected_sigs.liberated_sig = self.tcx.mk_fn_sig(
+ inputs,
+ supplied_output_ty,
+ expected_sigs.liberated_sig.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ );
+
+ Ok(InferOk { value: expected_sigs, obligations: all_obligations })
+ })
+ }
+
+ /// If there is no expected signature, then we will convert the
+ /// types that the user gave into a signature.
+ ///
+ /// Also, record this closure signature for later.
+ #[instrument(skip(self, decl, body), level = "debug", ret)]
+ fn supplied_sig_of_closure(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ ) -> ty::PolyFnSig<'tcx> {
+ let astconv: &dyn AstConv<'_> = self;
+
+ trace!("decl = {:#?}", decl);
+ debug!(?body.generator_kind);
+
+ let bound_vars = self.tcx.late_bound_vars(hir_id);
+
+ // First, convert the types that the user supplied (if any).
+ let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a));
+ let supplied_return = match decl.output {
+ hir::FnRetTy::Return(ref output) => astconv.ast_ty_to_ty(&output),
+ hir::FnRetTy::DefaultReturn(_) => match body.generator_kind {
+ // In the case of the async block that we create for a function body,
+ // we expect the return type of the block to match that of the enclosing
+ // function.
+ Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn)) => {
+ debug!("closure is async fn body");
+ self.deduce_future_output_from_obligations(expr_def_id, body.id().hir_id)
+ .unwrap_or_else(|| {
+ // AFAIK, deducing the future output
+ // always succeeds *except* in error cases
+ // like #65159. I'd like to return Error
+ // here, but I can't because I can't
+ // easily (and locally) prove that we
+ // *have* reported an
+ // error. --nikomatsakis
+ astconv.ty_infer(None, decl.output.span())
+ })
+ }
+
+ _ => astconv.ty_infer(None, decl.output.span()),
+ },
+ };
+
+ let result = ty::Binder::bind_with_vars(
+ self.tcx.mk_fn_sig(
+ supplied_arguments,
+ supplied_return,
+ decl.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ ),
+ bound_vars,
+ );
+ // Astconv can't normalize inputs or outputs with escaping bound vars,
+ // so normalize them here, after we've wrapped them in a binder.
+ let result = self.normalize_associated_types_in(self.tcx.hir().span(hir_id), result);
+
+ let c_result = self.inh.infcx.canonicalize_response(result);
+ self.typeck_results.borrow_mut().user_provided_sigs.insert(expr_def_id, c_result);
+
+ result
+ }
+
+ /// Invoked when we are translating the generator that results
+ /// from desugaring an `async fn`. Returns the "sugared" return
+ /// type of the `async fn` -- that is, the return type that the
+ /// user specified. The "desugared" return type is an `impl
+ /// Future<Output = T>`, so we do this by searching through the
+ /// obligations to extract the `T`.
+ #[instrument(skip(self), level = "debug", ret)]
+ fn deduce_future_output_from_obligations(
+ &self,
+ expr_def_id: DefId,
+ body_id: hir::HirId,
+ ) -> Option<Ty<'tcx>> {
+ let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
+ span_bug!(self.tcx.def_span(expr_def_id), "async fn generator outside of a fn")
+ });
+
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
+
+ let get_future_output = |predicate: ty::Predicate<'tcx>, span| {
+ // Search for a pending obligation like
+ //
+ // `<R as Future>::Output = T`
+ //
+ // where R is the return type we are expecting. This type `T`
+ // will be our output.
+ let bound_predicate = predicate.kind();
+ if let ty::PredicateKind::Projection(proj_predicate) = bound_predicate.skip_binder() {
+ self.deduce_future_output_from_projection(
+ span,
+ bound_predicate.rebind(proj_predicate),
+ )
+ } else {
+ None
+ }
+ };
+
+ let output_ty = match *ret_ty.kind() {
+ ty::Infer(ty::TyVar(ret_vid)) => {
+ self.obligations_for_self_ty(ret_vid).find_map(|(_, obligation)| {
+ get_future_output(obligation.predicate, obligation.cause.span)
+ })?
+ }
+ ty::Opaque(def_id, substs) => self
+ .tcx
+ .bound_explicit_item_bounds(def_id)
+ .subst_iter_copied(self.tcx, substs)
+ .find_map(|(p, s)| get_future_output(p, s))?,
+ ty::Error(_) => return None,
+ ty::Projection(proj)
+ if self.tcx.def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder =>
+ {
+ self.tcx
+ .bound_explicit_item_bounds(proj.item_def_id)
+ .subst_iter_copied(self.tcx, proj.substs)
+ .find_map(|(p, s)| get_future_output(p, s))?
+ }
+ _ => span_bug!(
+ self.tcx.def_span(expr_def_id),
+ "async fn generator return type not an inference variable: {ret_ty}"
+ ),
+ };
+
+ // async fn that have opaque types in their return type need to redo the conversion to inference variables
+ // as they fetch the still opaque version from the signature.
+ let InferOk { value: output_ty, obligations } = self
+ .replace_opaque_types_with_inference_vars(
+ output_ty,
+ body_id,
+ self.tcx.def_span(expr_def_id),
+ self.param_env,
+ );
+ self.register_predicates(obligations);
+
+ Some(output_ty)
+ }
+
+ /// Given a projection like
+ ///
+ /// `<X as Future>::Output = T`
+ ///
+ /// where `X` is some type that has no late-bound regions, returns
+ /// `Some(T)`. If the projection is for some other trait, returns
+ /// `None`.
+ fn deduce_future_output_from_projection(
+ &self,
+ cause_span: Span,
+ predicate: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ debug!("deduce_future_output_from_projection(predicate={:?})", predicate);
+
+ // We do not expect any bound regions in our predicate, so
+ // skip past the bound vars.
+ let Some(predicate) = predicate.no_bound_vars() else {
+ debug!("deduce_future_output_from_projection: has late-bound regions");
+ return None;
+ };
+
+ // Check that this is a projection from the `Future` trait.
+ let trait_def_id = predicate.projection_ty.trait_def_id(self.tcx);
+ let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(cause_span));
+ if trait_def_id != future_trait {
+ debug!("deduce_future_output_from_projection: not a future");
+ return None;
+ }
+
+ // The `Future` trait has only one associated item, `Output`,
+ // so check that this is what we see.
+ let output_assoc_item = self.tcx.associated_item_def_ids(future_trait)[0];
+ if output_assoc_item != predicate.projection_ty.item_def_id {
+ span_bug!(
+ cause_span,
+ "projecting associated item `{:?}` from future, which is not Output `{:?}`",
+ predicate.projection_ty.item_def_id,
+ output_assoc_item,
+ );
+ }
+
+ // Extract the type from the projection. Note that there can
+ // be no bound variables in this type because the "self type"
+ // does not have any regions in it.
+ let output_ty = self.resolve_vars_if_possible(predicate.term);
+ debug!("deduce_future_output_from_projection: output_ty={:?}", output_ty);
+ // This is a projection on a Fn trait so will always be a type.
+ Some(output_ty.ty().unwrap())
+ }
+
+ /// Converts the types that the user supplied, in case that doing
+ /// so should yield an error, but returns back a signature where
+ /// all parameters are of type `TyErr`.
+ fn error_sig_of_closure(&self, decl: &hir::FnDecl<'_>) -> ty::PolyFnSig<'tcx> {
+ let astconv: &dyn AstConv<'_> = self;
+
+ let supplied_arguments = decl.inputs.iter().map(|a| {
+ // Convert the types that the user supplied (if any), but ignore them.
+ astconv.ast_ty_to_ty(a);
+ self.tcx.ty_error()
+ });
+
+ if let hir::FnRetTy::Return(ref output) = decl.output {
+ astconv.ast_ty_to_ty(&output);
+ }
+
+ let result = ty::Binder::dummy(self.tcx.mk_fn_sig(
+ supplied_arguments,
+ self.tcx.ty_error(),
+ decl.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ ));
+
+ debug!("supplied_sig_of_closure: result={:?}", result);
+
+ result
+ }
+
+ fn closure_sigs(
+ &self,
+ expr_def_id: DefId,
+ body: &hir::Body<'_>,
+ bound_sig: ty::PolyFnSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, bound_sig);
+ let liberated_sig = self.inh.normalize_associated_types_in(
+ body.value.span,
+ body.value.hir_id,
+ self.param_env,
+ liberated_sig,
+ );
+ ClosureSignatures { bound_sig, liberated_sig }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs
new file mode 100644
index 000000000..86597a703
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/coercion.rs
@@ -0,0 +1,1950 @@
+//! # Type Coercion
+//!
+//! Under certain circumstances we will coerce from one type to another,
+//! for example by auto-borrowing. This occurs in situations where the
+//! compiler has a firm 'expected type' that was supplied from the user,
+//! and where the actual type is similar to that expected type in purpose
+//! but not in representation (so actual subtyping is inappropriate).
+//!
+//! ## Reborrowing
+//!
+//! Note that if we are expecting a reference, we will *reborrow*
+//! even if the argument provided was already a reference. This is
+//! useful for freezing mut things (that is, when the expected type is &T
+//! but you have &mut T) and also for avoiding the linearity
+//! of mut things (when the expected is &mut T and you have &mut T). See
+//! the various `src/test/ui/coerce/*.rs` tests for
+//! examples of where this is useful.
+//!
+//! ## Subtle note
+//!
+//! When inferring the generic arguments of functions, the argument
+//! order is relevant, which can lead to the following edge case:
+//!
+//! ```ignore (illustrative)
+//! fn foo<T>(a: T, b: T) {
+//! // ...
+//! }
+//!
+//! foo(&7i32, &mut 7i32);
+//! // This compiles, as we first infer `T` to be `&i32`,
+//! // and then coerce `&mut 7i32` to `&7i32`.
+//!
+//! foo(&mut 7i32, &7i32);
+//! // This does not compile, as we first infer `T` to be `&mut i32`
+//! // and are then unable to coerce `&7i32` to `&mut i32`.
+//! ```
+
+use crate::FnCtxt;
+use rustc_errors::{
+ struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::Expr;
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{Coercion, InferOk, InferResult};
+use rustc_infer::traits::{Obligation, TraitEngine, TraitEngineExt};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast,
+};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::RelateResult;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, ToPredicate, Ty, TypeAndMut};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{self, BytePos, DesugaringKind, Span};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
+
+use smallvec::{smallvec, SmallVec};
+use std::ops::Deref;
+
+struct Coerce<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ use_lub: bool,
+ /// Determines whether or not allow_two_phase_borrow is set on any
+ /// autoref adjustments we create while coercing. We don't want to
+ /// allow deref coercions to create two-phase borrows, at least initially,
+ /// but we do need two-phase borrows for function argument reborrows.
+ /// See #47489 and #48598
+ /// See docs on the "AllowTwoPhase" type for a more detailed discussion
+ allow_two_phase: AllowTwoPhase,
+}
+
+impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> {
+ type Target = FnCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.fcx
+ }
+}
+
+type CoerceResult<'tcx> = InferResult<'tcx, (Vec<Adjustment<'tcx>>, Ty<'tcx>)>;
+
+struct CollectRetsVisitor<'tcx> {
+ ret_exprs: Vec<&'tcx hir::Expr<'tcx>>,
+}
+
+impl<'tcx> Visitor<'tcx> for CollectRetsVisitor<'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ if let hir::ExprKind::Ret(_) = expr.kind {
+ self.ret_exprs.push(expr);
+ }
+ intravisit::walk_expr(self, expr);
+ }
+}
+
+/// Coercing a mutable reference to an immutable works, while
+/// coercing `&T` to `&mut T` should be forbidden.
+fn coerce_mutbls<'tcx>(
+ from_mutbl: hir::Mutability,
+ to_mutbl: hir::Mutability,
+) -> RelateResult<'tcx, ()> {
+ match (from_mutbl, to_mutbl) {
+ (hir::Mutability::Mut, hir::Mutability::Mut | hir::Mutability::Not)
+ | (hir::Mutability::Not, hir::Mutability::Not) => Ok(()),
+ (hir::Mutability::Not, hir::Mutability::Mut) => Err(TypeError::Mutability),
+ }
+}
+
+/// Do not require any adjustments, i.e. coerce `x -> x`.
+fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> {
+ vec![]
+}
+
+fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> {
+ move |target| vec![Adjustment { kind, target }]
+}
+
+/// This always returns `Ok(...)`.
+fn success<'tcx>(
+ adj: Vec<Adjustment<'tcx>>,
+ target: Ty<'tcx>,
+ obligations: traits::PredicateObligations<'tcx>,
+) -> CoerceResult<'tcx> {
+ Ok(InferOk { value: (adj, target), obligations })
+}
+
+impl<'f, 'tcx> Coerce<'f, 'tcx> {
+ fn new(
+ fcx: &'f FnCtxt<'f, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ allow_two_phase: AllowTwoPhase,
+ ) -> Self {
+ Coerce { fcx, cause, allow_two_phase, use_lub: false }
+ }
+
+ fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
+ debug!("unify(a: {:?}, b: {:?}, use_lub: {})", a, b, self.use_lub);
+ self.commit_if_ok(|_| {
+ if self.use_lub {
+ self.at(&self.cause, self.fcx.param_env).lub(b, a)
+ } else {
+ self.at(&self.cause, self.fcx.param_env)
+ .sup(b, a)
+ .map(|InferOk { value: (), obligations }| InferOk { value: a, obligations })
+ }
+ })
+ }
+
+ /// Unify two types (using sub or lub) and produce a specific coercion.
+ fn unify_and<F>(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F) -> CoerceResult<'tcx>
+ where
+ F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ {
+ self.unify(a, b)
+ .and_then(|InferOk { value: ty, obligations }| success(f(ty), ty, obligations))
+ }
+
+ #[instrument(skip(self))]
+ fn coerce(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
+ // First, remove any resolved type variables (at the top level, at least):
+ let a = self.shallow_resolve(a);
+ let b = self.shallow_resolve(b);
+ debug!("Coerce.tys({:?} => {:?})", a, b);
+
+ // Just ignore error types.
+ if a.references_error() || b.references_error() {
+ return success(vec![], self.fcx.tcx.ty_error(), vec![]);
+ }
+
+ // Coercing from `!` to any type is allowed:
+ if a.is_never() {
+ return success(simple(Adjust::NeverToAny)(b), b, vec![]);
+ }
+
+ // Coercing *from* an unresolved inference variable means that
+ // we have no information about the source type. This will always
+ // ultimately fall back to some form of subtyping.
+ if a.is_ty_var() {
+ return self.coerce_from_inference_variable(a, b, identity);
+ }
+
+ // Consider coercing the subtype to a DST
+ //
+ // NOTE: this is wrapped in a `commit_if_ok` because it creates
+ // a "spurious" type variable, and we don't want to have that
+ // type variable in memory if the coercion fails.
+ let unsize = self.commit_if_ok(|_| self.coerce_unsized(a, b));
+ match unsize {
+ Ok(_) => {
+ debug!("coerce: unsize successful");
+ return unsize;
+ }
+ Err(TypeError::ObjectUnsafeCoercion(did)) => {
+ debug!("coerce: unsize not object safe");
+ return Err(TypeError::ObjectUnsafeCoercion(did));
+ }
+ Err(error) => {
+ debug!(?error, "coerce: unsize failed");
+ }
+ }
+
+ // Examine the supertype and consider auto-borrowing.
+ match *b.kind() {
+ ty::RawPtr(mt_b) => {
+ return self.coerce_unsafe_ptr(a, b, mt_b.mutbl);
+ }
+ ty::Ref(r_b, _, mutbl_b) => {
+ return self.coerce_borrowed_pointer(a, b, r_b, mutbl_b);
+ }
+ ty::Dynamic(predicates, region, ty::DynStar) if self.tcx.features().dyn_star => {
+ return self.coerce_dyn_star(a, b, predicates, region);
+ }
+ _ => {}
+ }
+
+ match *a.kind() {
+ ty::FnDef(..) => {
+ // Function items are coercible to any closure
+ // type; function pointers are not (that would
+ // require double indirection).
+ // Additionally, we permit coercion of function
+ // items to drop the unsafe qualifier.
+ self.coerce_from_fn_item(a, b)
+ }
+ ty::FnPtr(a_f) => {
+ // We permit coercion of fn pointers to drop the
+ // unsafe qualifier.
+ self.coerce_from_fn_pointer(a, a_f, b)
+ }
+ ty::Closure(closure_def_id_a, substs_a) => {
+ // Non-capturing closures are coercible to
+ // function pointers or unsafe function pointers.
+ // It cannot convert closures that require unsafe.
+ self.coerce_closure_to_fn(a, closure_def_id_a, substs_a, b)
+ }
+ _ => {
+ // Otherwise, just use unification rules.
+ self.unify_and(a, b, identity)
+ }
+ }
+ }
+
+ /// Coercing *from* an inference variable. In this case, we have no information
+ /// about the source type, so we can't really do a true coercion and we always
+ /// fall back to subtyping (`unify_and`).
+ fn coerce_from_inference_variable(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ make_adjustments: impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ ) -> CoerceResult<'tcx> {
+ debug!("coerce_from_inference_variable(a={:?}, b={:?})", a, b);
+ assert!(a.is_ty_var() && self.shallow_resolve(a) == a);
+ assert!(self.shallow_resolve(b) == b);
+
+ if b.is_ty_var() {
+ // Two unresolved type variables: create a `Coerce` predicate.
+ let target_ty = if self.use_lub {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::LatticeVariable,
+ span: self.cause.span,
+ })
+ } else {
+ b
+ };
+
+ let mut obligations = Vec::with_capacity(2);
+ for &source_ty in &[a, b] {
+ if source_ty != target_ty {
+ obligations.push(Obligation::new(
+ self.cause.clone(),
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::Coerce(ty::CoercePredicate {
+ a: source_ty,
+ b: target_ty,
+ }))
+ .to_predicate(self.tcx()),
+ ));
+ }
+ }
+
+ debug!(
+ "coerce_from_inference_variable: two inference variables, target_ty={:?}, obligations={:?}",
+ target_ty, obligations
+ );
+ let adjustments = make_adjustments(target_ty);
+ InferResult::Ok(InferOk { value: (adjustments, target_ty), obligations })
+ } else {
+ // One unresolved type variable: just apply subtyping, we may be able
+ // to do something useful.
+ self.unify_and(a, b, make_adjustments)
+ }
+ }
+
+ /// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
+ /// To match `A` with `B`, autoderef will be performed,
+ /// calling `deref`/`deref_mut` where necessary.
+ fn coerce_borrowed_pointer(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ r_b: ty::Region<'tcx>,
+ mutbl_b: hir::Mutability,
+ ) -> CoerceResult<'tcx> {
+ debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b);
+
+ // If we have a parameter of type `&M T_a` and the value
+ // provided is `expr`, we will be adding an implicit borrow,
+ // meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
+ // to type check, we will construct the type that `&M*expr` would
+ // yield.
+
+ let (r_a, mt_a) = match *a.kind() {
+ ty::Ref(r_a, ty, mutbl) => {
+ let mt_a = ty::TypeAndMut { ty, mutbl };
+ coerce_mutbls(mt_a.mutbl, mutbl_b)?;
+ (r_a, mt_a)
+ }
+ _ => return self.unify_and(a, b, identity),
+ };
+
+ let span = self.cause.span;
+
+ let mut first_error = None;
+ let mut r_borrow_var = None;
+ let mut autoderef = self.autoderef(span, a);
+ let mut found = None;
+
+ for (referent_ty, autoderefs) in autoderef.by_ref() {
+ if autoderefs == 0 {
+ // Don't let this pass, otherwise it would cause
+ // &T to autoref to &&T.
+ continue;
+ }
+
+ // At this point, we have deref'd `a` to `referent_ty`. So
+ // imagine we are coercing from `&'a mut Vec<T>` to `&'b mut [T]`.
+ // In the autoderef loop for `&'a mut Vec<T>`, we would get
+ // three callbacks:
+ //
+ // - `&'a mut Vec<T>` -- 0 derefs, just ignore it
+ // - `Vec<T>` -- 1 deref
+ // - `[T]` -- 2 deref
+ //
+ // At each point after the first callback, we want to
+ // check to see whether this would match out target type
+ // (`&'b mut [T]`) if we autoref'd it. We can't just
+ // compare the referent types, though, because we still
+ // have to consider the mutability. E.g., in the case
+ // we've been considering, we have an `&mut` reference, so
+ // the `T` in `[T]` needs to be unified with equality.
+ //
+ // Therefore, we construct reference types reflecting what
+ // the types will be after we do the final auto-ref and
+ // compare those. Note that this means we use the target
+ // mutability [1], since it may be that we are coercing
+ // from `&mut T` to `&U`.
+ //
+ // One fine point concerns the region that we use. We
+ // choose the region such that the region of the final
+ // type that results from `unify` will be the region we
+ // want for the autoref:
+ //
+ // - if in sub mode, that means we want to use `'b` (the
+ // region from the target reference) for both
+ // pointers [2]. This is because sub mode (somewhat
+ // arbitrarily) returns the subtype region. In the case
+ // where we are coercing to a target type, we know we
+ // want to use that target type region (`'b`) because --
+ // for the program to type-check -- it must be the
+ // smaller of the two.
+ // - One fine point. It may be surprising that we can
+ // use `'b` without relating `'a` and `'b`. The reason
+ // that this is ok is that what we produce is
+ // effectively a `&'b *x` expression (if you could
+ // annotate the region of a borrow), and regionck has
+ // code that adds edges from the region of a borrow
+ // (`'b`, here) into the regions in the borrowed
+ // expression (`*x`, here). (Search for "link".)
+ // - if in lub mode, things can get fairly complicated. The
+ // easiest thing is just to make a fresh
+ // region variable [4], which effectively means we defer
+ // the decision to region inference (and regionck, which will add
+ // some more edges to this variable). However, this can wind up
+ // creating a crippling number of variables in some cases --
+ // e.g., #32278 -- so we optimize one particular case [3].
+ // Let me try to explain with some examples:
+ // - The "running example" above represents the simple case,
+ // where we have one `&` reference at the outer level and
+ // ownership all the rest of the way down. In this case,
+ // we want `LUB('a, 'b)` as the resulting region.
+ // - However, if there are nested borrows, that region is
+ // too strong. Consider a coercion from `&'a &'x Rc<T>` to
+ // `&'b T`. In this case, `'a` is actually irrelevant.
+ // The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)`
+ // we get spurious errors (`ui/regions-lub-ref-ref-rc.rs`).
+ // (The errors actually show up in borrowck, typically, because
+ // this extra edge causes the region `'a` to be inferred to something
+ // too big, which then results in borrowck errors.)
+ // - We could track the innermost shared reference, but there is already
+ // code in regionck that has the job of creating links between
+ // the region of a borrow and the regions in the thing being
+ // borrowed (here, `'a` and `'x`), and it knows how to handle
+ // all the various cases. So instead we just make a region variable
+ // and let regionck figure it out.
+ let r = if !self.use_lub {
+ r_b // [2] above
+ } else if autoderefs == 1 {
+ r_a // [3] above
+ } else {
+ if r_borrow_var.is_none() {
+ // create var lazily, at most once
+ let coercion = Coercion(span);
+ let r = self.next_region_var(coercion);
+ r_borrow_var = Some(r); // [4] above
+ }
+ r_borrow_var.unwrap()
+ };
+ let derefd_ty_a = self.tcx.mk_ref(
+ r,
+ TypeAndMut {
+ ty: referent_ty,
+ mutbl: mutbl_b, // [1] above
+ },
+ );
+ match self.unify(derefd_ty_a, b) {
+ Ok(ok) => {
+ found = Some(ok);
+ break;
+ }
+ Err(err) => {
+ if first_error.is_none() {
+ first_error = Some(err);
+ }
+ }
+ }
+ }
+
+ // Extract type or return an error. We return the first error
+ // we got, which should be from relating the "base" type
+ // (e.g., in example above, the failure from relating `Vec<T>`
+ // to the target type), since that should be the least
+ // confusing.
+ let Some(InferOk { value: ty, mut obligations }) = found else {
+ let err = first_error.expect("coerce_borrowed_pointer had no error");
+ debug!("coerce_borrowed_pointer: failed with err = {:?}", err);
+ return Err(err);
+ };
+
+ if ty == a && mt_a.mutbl == hir::Mutability::Not && autoderef.step_count() == 1 {
+ // As a special case, if we would produce `&'a *x`, that's
+ // a total no-op. We end up with the type `&'a T` just as
+ // we started with. In that case, just skip it
+ // altogether. This is just an optimization.
+ //
+ // Note that for `&mut`, we DO want to reborrow --
+ // otherwise, this would be a move, which might be an
+ // error. For example `foo(self.x)` where `self` and
+ // `self.x` both have `&mut `type would be a move of
+ // `self.x`, but we auto-coerce it to `foo(&mut *self.x)`,
+ // which is a borrow.
+ assert_eq!(mutbl_b, hir::Mutability::Not); // can only coerce &T -> &U
+ return success(vec![], ty, obligations);
+ }
+
+ let InferOk { value: mut adjustments, obligations: o } =
+ self.adjust_steps_as_infer_ok(&autoderef);
+ obligations.extend(o);
+ obligations.extend(autoderef.into_obligations());
+
+ // Now apply the autoref. We have to extract the region out of
+ // the final ref type we got.
+ let ty::Ref(r_borrow, _, _) = ty.kind() else {
+ span_bug!(span, "expected a ref type, got {:?}", ty);
+ };
+ let mutbl = match mutbl_b {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => {
+ AutoBorrowMutability::Mut { allow_two_phase_borrow: self.allow_two_phase }
+ }
+ };
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*r_borrow, mutbl)),
+ target: ty,
+ });
+
+ debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}", ty, adjustments);
+
+ success(adjustments, ty, obligations)
+ }
+
+ // &[T; n] or &mut [T; n] -> &[T]
+ // or &mut [T; n] -> &mut [T]
+ // or &Concrete -> &Trait, etc.
+ #[instrument(skip(self), level = "debug")]
+ fn coerce_unsized(&self, mut source: Ty<'tcx>, mut target: Ty<'tcx>) -> CoerceResult<'tcx> {
+ source = self.shallow_resolve(source);
+ target = self.shallow_resolve(target);
+ debug!(?source, ?target);
+
+ // These 'if' statements require some explanation.
+ // The `CoerceUnsized` trait is special - it is only
+ // possible to write `impl CoerceUnsized<B> for A` where
+ // A and B have 'matching' fields. This rules out the following
+ // two types of blanket impls:
+ //
+ // `impl<T> CoerceUnsized<T> for SomeType`
+ // `impl<T> CoerceUnsized<SomeType> for T`
+ //
+ // Both of these trigger a special `CoerceUnsized`-related error (E0376)
+ //
+ // We can take advantage of this fact to avoid performing unnecessary work.
+ // If either `source` or `target` is a type variable, then any applicable impl
+ // would need to be generic over the self-type (`impl<T> CoerceUnsized<SomeType> for T`)
+ // or generic over the `CoerceUnsized` type parameter (`impl<T> CoerceUnsized<T> for
+ // SomeType`).
+ //
+ // However, these are exactly the kinds of impls which are forbidden by
+ // the compiler! Therefore, we can be sure that coercion will always fail
+ // when either the source or target type is a type variable. This allows us
+ // to skip performing any trait selection, and immediately bail out.
+ if source.is_ty_var() {
+ debug!("coerce_unsized: source is a TyVar, bailing out");
+ return Err(TypeError::Mismatch);
+ }
+ if target.is_ty_var() {
+ debug!("coerce_unsized: target is a TyVar, bailing out");
+ return Err(TypeError::Mismatch);
+ }
+
+ let traits =
+ (self.tcx.lang_items().unsize_trait(), self.tcx.lang_items().coerce_unsized_trait());
+ let (Some(unsize_did), Some(coerce_unsized_did)) = traits else {
+ debug!("missing Unsize or CoerceUnsized traits");
+ return Err(TypeError::Mismatch);
+ };
+
+ // Note, we want to avoid unnecessary unsizing. We don't want to coerce to
+ // a DST unless we have to. This currently comes out in the wash since
+ // we can't unify [T] with U. But to properly support DST, we need to allow
+ // that, at which point we will need extra checks on the target here.
+
+ // Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
+ let reborrow = match (source.kind(), target.kind()) {
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => {
+ coerce_mutbls(mutbl_a, mutbl_b)?;
+
+ let coercion = Coercion(self.cause.span);
+ let r_borrow = self.next_region_var(coercion);
+ let mutbl = match mutbl_b {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // We don't allow two-phase borrows here, at least for initial
+ // implementation. If it happens that this coercion is a function argument,
+ // the reborrow in coerce_borrowed_ptr will pick it up.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ },
+ };
+ Some((
+ Adjustment { kind: Adjust::Deref(None), target: ty_a },
+ Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
+ target: self
+ .tcx
+ .mk_ref(r_borrow, ty::TypeAndMut { mutbl: mutbl_b, ty: ty_a }),
+ },
+ ))
+ }
+ (&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
+ coerce_mutbls(mt_a, mt_b)?;
+
+ Some((
+ Adjustment { kind: Adjust::Deref(None), target: ty_a },
+ Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)),
+ target: self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mt_b, ty: ty_a }),
+ },
+ ))
+ }
+ _ => None,
+ };
+ let coerce_source = reborrow.as_ref().map_or(source, |&(_, ref r)| r.target);
+
+ // Setup either a subtyping or a LUB relationship between
+ // the `CoerceUnsized` target type and the expected type.
+ // We only have the latter, so we use an inference variable
+ // for the former and let type inference do the rest.
+ let origin = TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.cause.span,
+ };
+ let coerce_target = self.next_ty_var(origin);
+ let mut coercion = self.unify_and(coerce_target, target, |target| {
+ let unsize = Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target };
+ match reborrow {
+ None => vec![unsize],
+ Some((ref deref, ref autoref)) => vec![deref.clone(), autoref.clone(), unsize],
+ }
+ })?;
+
+ let mut selcx = traits::SelectionContext::new(self);
+
+ // Create an obligation for `Source: CoerceUnsized<Target>`.
+ let cause = ObligationCause::new(
+ self.cause.span,
+ self.body_id,
+ ObligationCauseCode::Coercion { source, target },
+ );
+
+ // Use a FIFO queue for this custom fulfillment procedure.
+ //
+ // A Vec (or SmallVec) is not a natural choice for a queue. However,
+ // this code path is hot, and this queue usually has a max length of 1
+ // and almost never more than 3. By using a SmallVec we avoid an
+ // allocation, at the (very small) cost of (occasionally) having to
+ // shift subsequent elements down when removing the front element.
+ let mut queue: SmallVec<[_; 4]> = smallvec![traits::predicate_for_trait_def(
+ self.tcx,
+ self.fcx.param_env,
+ cause,
+ coerce_unsized_did,
+ 0,
+ coerce_source,
+ &[coerce_target.into()]
+ )];
+
+ let mut has_unsized_tuple_coercion = false;
+ let mut has_trait_upcasting_coercion = None;
+
+ // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
+ // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
+ // inference might unify those two inner type variables later.
+ let traits = [coerce_unsized_did, unsize_did];
+ while !queue.is_empty() {
+ let obligation = queue.remove(0);
+ debug!("coerce_unsized resolve step: {:?}", obligation);
+ let bound_predicate = obligation.predicate.kind();
+ let trait_pred = match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_pred) if traits.contains(&trait_pred.def_id()) => {
+ if unsize_did == trait_pred.def_id() {
+ let self_ty = trait_pred.self_ty();
+ let unsize_ty = trait_pred.trait_ref.substs[1].expect_ty();
+ if let (ty::Dynamic(ref data_a, ..), ty::Dynamic(ref data_b, ..)) =
+ (self_ty.kind(), unsize_ty.kind())
+ && data_a.principal_def_id() != data_b.principal_def_id()
+ {
+ debug!("coerce_unsized: found trait upcasting coercion");
+ has_trait_upcasting_coercion = Some((self_ty, unsize_ty));
+ }
+ if let ty::Tuple(..) = unsize_ty.kind() {
+ debug!("coerce_unsized: found unsized tuple coercion");
+ has_unsized_tuple_coercion = true;
+ }
+ }
+ bound_predicate.rebind(trait_pred)
+ }
+ _ => {
+ coercion.obligations.push(obligation);
+ continue;
+ }
+ };
+ match selcx.select(&obligation.with(trait_pred)) {
+ // Uncertain or unimplemented.
+ Ok(None) => {
+ if trait_pred.def_id() == unsize_did {
+ let trait_pred = self.resolve_vars_if_possible(trait_pred);
+ let self_ty = trait_pred.skip_binder().self_ty();
+ let unsize_ty = trait_pred.skip_binder().trait_ref.substs[1].expect_ty();
+ debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_pred);
+ match (&self_ty.kind(), &unsize_ty.kind()) {
+ (ty::Infer(ty::TyVar(v)), ty::Dynamic(..))
+ if self.type_var_is_sized(*v) =>
+ {
+ debug!("coerce_unsized: have sized infer {:?}", v);
+ coercion.obligations.push(obligation);
+ // `$0: Unsize<dyn Trait>` where we know that `$0: Sized`, try going
+ // for unsizing.
+ }
+ _ => {
+ // Some other case for `$0: Unsize<Something>`. Note that we
+ // hit this case even if `Something` is a sized type, so just
+ // don't do the coercion.
+ debug!("coerce_unsized: ambiguous unsize");
+ return Err(TypeError::Mismatch);
+ }
+ }
+ } else {
+ debug!("coerce_unsized: early return - ambiguous");
+ return Err(TypeError::Mismatch);
+ }
+ }
+ Err(traits::Unimplemented) => {
+ debug!("coerce_unsized: early return - can't prove obligation");
+ return Err(TypeError::Mismatch);
+ }
+
+ // Object safety violations or miscellaneous.
+ Err(err) => {
+ self.err_ctxt().report_selection_error(
+ obligation.clone(),
+ &obligation,
+ &err,
+ false,
+ );
+ // Treat this like an obligation and follow through
+ // with the unsizing - the lack of a coercion should
+ // be silent, as it causes a type mismatch later.
+ }
+
+ Ok(Some(impl_source)) => queue.extend(impl_source.nested_obligations()),
+ }
+ }
+
+ if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::unsized_tuple_coercion,
+ self.cause.span,
+ "unsized tuple coercion is not stable enough for use and is subject to change",
+ )
+ .emit();
+ }
+
+ if let Some((sub, sup)) = has_trait_upcasting_coercion
+ && !self.tcx().features().trait_upcasting
+ {
+ // Renders better when we erase regions, since they're not really the point here.
+ let (sub, sup) = self.tcx.erase_regions((sub, sup));
+ let mut err = feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::trait_upcasting,
+ self.cause.span,
+ &format!("cannot cast `{sub}` to `{sup}`, trait upcasting coercion is experimental"),
+ );
+ err.note(&format!("required when coercing `{source}` into `{target}`"));
+ err.emit();
+ }
+
+ Ok(coercion)
+ }
+
+ fn coerce_dyn_star(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ b_region: ty::Region<'tcx>,
+ ) -> CoerceResult<'tcx> {
+ if !self.tcx.features().dyn_star {
+ return Err(TypeError::Mismatch);
+ }
+
+ if let ty::Dynamic(a_data, _, _) = a.kind()
+ && let ty::Dynamic(b_data, _, _) = b.kind()
+ {
+ if a_data.principal_def_id() == b_data.principal_def_id() {
+ return self.unify_and(a, b, |_| vec![]);
+ } else if !self.tcx().features().trait_upcasting {
+ let mut err = feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::trait_upcasting,
+ self.cause.span,
+ &format!(
+ "cannot cast `{a}` to `{b}`, trait upcasting coercion is experimental"
+ ),
+ );
+ err.emit();
+ }
+ }
+
+ // Check the obligations of the cast -- for example, when casting
+ // `usize` to `dyn* Clone + 'static`:
+ let obligations = predicates
+ .iter()
+ .map(|predicate| {
+ // For each existential predicate (e.g., `?Self: Clone`) substitute
+ // the type of the expression (e.g., `usize` in our example above)
+ // and then require that the resulting predicate (e.g., `usize: Clone`)
+ // holds (it does).
+ let predicate = predicate.with_self_ty(self.tcx, a);
+ Obligation::new(self.cause.clone(), self.param_env, predicate)
+ })
+ // Enforce the region bound (e.g., `usize: 'static`, in our example).
+ .chain([Obligation::new(
+ self.cause.clone(),
+ self.param_env,
+ self.tcx.mk_predicate(ty::Binder::dummy(ty::PredicateKind::TypeOutlives(
+ ty::OutlivesPredicate(a, b_region),
+ ))),
+ )])
+ .collect();
+
+ Ok(InferOk {
+ value: (vec![Adjustment { kind: Adjust::DynStar, target: b }], b),
+ obligations,
+ })
+ }
+
+ fn coerce_from_safe_fn<F, G>(
+ &self,
+ a: Ty<'tcx>,
+ fn_ty_a: ty::PolyFnSig<'tcx>,
+ b: Ty<'tcx>,
+ to_unsafe: F,
+ normal: G,
+ ) -> CoerceResult<'tcx>
+ where
+ F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ {
+ self.commit_if_ok(|snapshot| {
+ let result = if let ty::FnPtr(fn_ty_b) = b.kind()
+ && let (hir::Unsafety::Normal, hir::Unsafety::Unsafe) =
+ (fn_ty_a.unsafety(), fn_ty_b.unsafety())
+ {
+ let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
+ self.unify_and(unsafe_a, b, to_unsafe)
+ } else {
+ self.unify_and(a, b, normal)
+ };
+
+ // FIXME(#73154): This is a hack. Currently LUB can generate
+ // unsolvable constraints. Additionally, it returns `a`
+ // unconditionally, even when the "LUB" is `b`. In the future, we
+ // want the coerced type to be the actual supertype of these two,
+ // but for now, we want to just error to ensure we don't lock
+ // ourselves into a specific behavior with NLL.
+ self.leak_check(false, snapshot)?;
+
+ result
+ })
+ }
+
+ fn coerce_from_fn_pointer(
+ &self,
+ a: Ty<'tcx>,
+ fn_ty_a: ty::PolyFnSig<'tcx>,
+ b: Ty<'tcx>,
+ ) -> CoerceResult<'tcx> {
+ //! Attempts to coerce from the type of a Rust function item
+ //! into a closure or a `proc`.
+ //!
+
+ let b = self.shallow_resolve(b);
+ debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b);
+
+ self.coerce_from_safe_fn(
+ a,
+ fn_ty_a,
+ b,
+ simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)),
+ identity,
+ )
+ }
+
+ fn coerce_from_fn_item(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
+ //! Attempts to coerce from the type of a Rust function item
+ //! into a closure or a `proc`.
+
+ let b = self.shallow_resolve(b);
+ let InferOk { value: b, mut obligations } =
+ self.normalize_associated_types_in_as_infer_ok(self.cause.span, b);
+ debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
+
+ match b.kind() {
+ ty::FnPtr(b_sig) => {
+ let a_sig = a.fn_sig(self.tcx);
+ if let ty::FnDef(def_id, _) = *a.kind() {
+ // Intrinsics are not coercible to function pointers
+ if self.tcx.is_intrinsic(def_id) {
+ return Err(TypeError::IntrinsicCast);
+ }
+
+ // Safe `#[target_feature]` functions are not assignable to safe fn pointers (RFC 2396).
+
+ if b_sig.unsafety() == hir::Unsafety::Normal
+ && !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty()
+ {
+ return Err(TypeError::TargetFeatureCast(def_id));
+ }
+ }
+
+ let InferOk { value: a_sig, obligations: o1 } =
+ self.normalize_associated_types_in_as_infer_ok(self.cause.span, a_sig);
+ obligations.extend(o1);
+
+ let a_fn_pointer = self.tcx.mk_fn_ptr(a_sig);
+ let InferOk { value, obligations: o2 } = self.coerce_from_safe_fn(
+ a_fn_pointer,
+ a_sig,
+ b,
+ |unsafe_ty| {
+ vec![
+ Adjustment {
+ kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
+ target: a_fn_pointer,
+ },
+ Adjustment {
+ kind: Adjust::Pointer(PointerCast::UnsafeFnPointer),
+ target: unsafe_ty,
+ },
+ ]
+ },
+ simple(Adjust::Pointer(PointerCast::ReifyFnPointer)),
+ )?;
+
+ obligations.extend(o2);
+ Ok(InferOk { value, obligations })
+ }
+ _ => self.unify_and(a, b, identity),
+ }
+ }
+
+ fn coerce_closure_to_fn(
+ &self,
+ a: Ty<'tcx>,
+ closure_def_id_a: DefId,
+ substs_a: SubstsRef<'tcx>,
+ b: Ty<'tcx>,
+ ) -> CoerceResult<'tcx> {
+ //! Attempts to coerce from the type of a non-capturing closure
+ //! into a function pointer.
+ //!
+
+ let b = self.shallow_resolve(b);
+
+ match b.kind() {
+ // At this point we haven't done capture analysis, which means
+ // that the ClosureSubsts just contains an inference variable instead
+ // of tuple of captured types.
+ //
+ // All we care here is if any variable is being captured and not the exact paths,
+ // so we check `upvars_mentioned` for root variables being captured.
+ ty::FnPtr(fn_ty)
+ if self
+ .tcx
+ .upvars_mentioned(closure_def_id_a.expect_local())
+ .map_or(true, |u| u.is_empty()) =>
+ {
+ // We coerce the closure, which has fn type
+ // `extern "rust-call" fn((arg0,arg1,...)) -> _`
+ // to
+ // `fn(arg0,arg1,...) -> _`
+ // or
+ // `unsafe fn(arg0,arg1,...) -> _`
+ let closure_sig = substs_a.as_closure().sig();
+ let unsafety = fn_ty.unsafety();
+ let pointer_ty =
+ self.tcx.mk_fn_ptr(self.tcx.signature_unclosure(closure_sig, unsafety));
+ debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})", a, b, pointer_ty);
+ self.unify_and(
+ pointer_ty,
+ b,
+ simple(Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))),
+ )
+ }
+ _ => self.unify_and(a, b, identity),
+ }
+ }
+
+ fn coerce_unsafe_ptr(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ mutbl_b: hir::Mutability,
+ ) -> CoerceResult<'tcx> {
+ debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b);
+
+ let (is_ref, mt_a) = match *a.kind() {
+ ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
+ ty::RawPtr(mt) => (false, mt),
+ _ => return self.unify_and(a, b, identity),
+ };
+ coerce_mutbls(mt_a.mutbl, mutbl_b)?;
+
+ // Check that the types which they point at are compatible.
+ let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mutbl_b, ty: mt_a.ty });
+ // Although references and unsafe ptrs have the same
+ // representation, we still register an Adjust::DerefRef so that
+ // regionck knows that the region for `a` must be valid here.
+ if is_ref {
+ self.unify_and(a_unsafe, b, |target| {
+ vec![
+ Adjustment { kind: Adjust::Deref(None), target: mt_a.ty },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)), target },
+ ]
+ })
+ } else if mt_a.mutbl != mutbl_b {
+ self.unify_and(a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer)))
+ } else {
+ self.unify_and(a_unsafe, b, identity)
+ }
+ }
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Attempt to coerce an expression to a type, and return the
+ /// adjusted type of the expression, if successful.
+ /// Adjustments are only recorded if the coercion succeeded.
+ /// The expressions *must not* have any pre-existing adjustments.
+ pub fn try_coerce(
+ &self,
+ expr: &hir::Expr<'_>,
+ expr_ty: Ty<'tcx>,
+ target: Ty<'tcx>,
+ allow_two_phase: AllowTwoPhase,
+ cause: Option<ObligationCause<'tcx>>,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ let source = self.resolve_vars_with_obligations(expr_ty);
+ debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
+
+ let cause =
+ cause.unwrap_or_else(|| self.cause(expr.span, ObligationCauseCode::ExprAssignable));
+ let coerce = Coerce::new(self, cause, allow_two_phase);
+ let ok = self.commit_if_ok(|_| coerce.coerce(source, target))?;
+
+ let (adjustments, _) = self.register_infer_ok_obligations(ok);
+ self.apply_adjustments(expr, adjustments);
+ Ok(if expr_ty.references_error() { self.tcx.ty_error() } else { target })
+ }
+
+ /// Same as `try_coerce()`, but without side-effects.
+ ///
+ /// Returns false if the coercion creates any obligations that result in
+ /// errors.
+ pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool {
+ let source = self.resolve_vars_with_obligations(expr_ty);
+ debug!("coercion::can_with_predicates({:?} -> {:?})", source, target);
+
+ let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable);
+ // We don't ever need two-phase here since we throw out the result of the coercion
+ let coerce = Coerce::new(self, cause, AllowTwoPhase::No);
+ self.probe(|_| {
+ let Ok(ok) = coerce.coerce(source, target) else {
+ return false;
+ };
+ let mut fcx = traits::FulfillmentContext::new_in_snapshot();
+ fcx.register_predicate_obligations(self, ok.obligations);
+ fcx.select_where_possible(&self).is_empty()
+ })
+ }
+
+ /// Given a type and a target type, this function will calculate and return
+ /// how many dereference steps needed to achieve `expr_ty <: target`. If
+ /// it's not possible, return `None`.
+ pub fn deref_steps(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> Option<usize> {
+ let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable);
+ // We don't ever need two-phase here since we throw out the result of the coercion
+ let coerce = Coerce::new(self, cause, AllowTwoPhase::No);
+ coerce
+ .autoderef(rustc_span::DUMMY_SP, expr_ty)
+ .find_map(|(ty, steps)| self.probe(|_| coerce.unify(ty, target)).ok().map(|_| steps))
+ }
+
+ /// Given a type, this function will calculate and return the type given
+ /// for `<Ty as Deref>::Target` only if `Ty` also implements `DerefMut`.
+ ///
+ /// This function is for diagnostics only, since it does not register
+ /// trait or region sub-obligations. (presumably we could, but it's not
+ /// particularly important for diagnostics...)
+ pub fn deref_once_mutably_for_diagnostic(&self, expr_ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
+ self.autoderef(rustc_span::DUMMY_SP, expr_ty).nth(1).and_then(|(deref_ty, _)| {
+ self.infcx
+ .type_implements_trait(
+ self.tcx.lang_items().deref_mut_trait()?,
+ expr_ty,
+ ty::List::empty(),
+ self.param_env,
+ )
+ .may_apply()
+ .then(|| deref_ty)
+ })
+ }
+
+ /// Given some expressions, their known unified type and another expression,
+ /// tries to unify the types, potentially inserting coercions on any of the
+ /// provided expressions and returns their LUB (aka "common supertype").
+ ///
+ /// This is really an internal helper. From outside the coercion
+ /// module, you should instantiate a `CoerceMany` instance.
+ fn try_find_coercion_lub<E>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ exprs: &[E],
+ prev_ty: Ty<'tcx>,
+ new: &hir::Expr<'_>,
+ new_ty: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>>
+ where
+ E: AsCoercionSite,
+ {
+ let prev_ty = self.resolve_vars_with_obligations(prev_ty);
+ let new_ty = self.resolve_vars_with_obligations(new_ty);
+ debug!(
+ "coercion::try_find_coercion_lub({:?}, {:?}, exprs={:?} exprs)",
+ prev_ty,
+ new_ty,
+ exprs.len()
+ );
+
+ // The following check fixes #88097, where the compiler erroneously
+ // attempted to coerce a closure type to itself via a function pointer.
+ if prev_ty == new_ty {
+ return Ok(prev_ty);
+ }
+
+ // Special-case that coercion alone cannot handle:
+ // Function items or non-capturing closures of differing IDs or InternalSubsts.
+ let (a_sig, b_sig) = {
+ #[allow(rustc::usage_of_ty_tykind)]
+ let is_capturing_closure = |ty: &ty::TyKind<'tcx>| {
+ if let &ty::Closure(closure_def_id, _substs) = ty {
+ self.tcx.upvars_mentioned(closure_def_id.expect_local()).is_some()
+ } else {
+ false
+ }
+ };
+ if is_capturing_closure(prev_ty.kind()) || is_capturing_closure(new_ty.kind()) {
+ (None, None)
+ } else {
+ match (prev_ty.kind(), new_ty.kind()) {
+ (ty::FnDef(..), ty::FnDef(..)) => {
+ // Don't reify if the function types have a LUB, i.e., they
+ // are the same function and their parameters have a LUB.
+ match self
+ .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
+ {
+ // We have a LUB of prev_ty and new_ty, just return it.
+ Ok(ok) => return Ok(self.register_infer_ok_obligations(ok)),
+ Err(_) => {
+ (Some(prev_ty.fn_sig(self.tcx)), Some(new_ty.fn_sig(self.tcx)))
+ }
+ }
+ }
+ (ty::Closure(_, substs), ty::FnDef(..)) => {
+ let b_sig = new_ty.fn_sig(self.tcx);
+ let a_sig = self
+ .tcx
+ .signature_unclosure(substs.as_closure().sig(), b_sig.unsafety());
+ (Some(a_sig), Some(b_sig))
+ }
+ (ty::FnDef(..), ty::Closure(_, substs)) => {
+ let a_sig = prev_ty.fn_sig(self.tcx);
+ let b_sig = self
+ .tcx
+ .signature_unclosure(substs.as_closure().sig(), a_sig.unsafety());
+ (Some(a_sig), Some(b_sig))
+ }
+ (ty::Closure(_, substs_a), ty::Closure(_, substs_b)) => (
+ Some(self.tcx.signature_unclosure(
+ substs_a.as_closure().sig(),
+ hir::Unsafety::Normal,
+ )),
+ Some(self.tcx.signature_unclosure(
+ substs_b.as_closure().sig(),
+ hir::Unsafety::Normal,
+ )),
+ ),
+ _ => (None, None),
+ }
+ }
+ };
+ if let (Some(a_sig), Some(b_sig)) = (a_sig, b_sig) {
+ // Intrinsics are not coercible to function pointers.
+ if a_sig.abi() == Abi::RustIntrinsic
+ || a_sig.abi() == Abi::PlatformIntrinsic
+ || b_sig.abi() == Abi::RustIntrinsic
+ || b_sig.abi() == Abi::PlatformIntrinsic
+ {
+ return Err(TypeError::IntrinsicCast);
+ }
+ // The signature must match.
+ let a_sig = self.normalize_associated_types_in(new.span, a_sig);
+ let b_sig = self.normalize_associated_types_in(new.span, b_sig);
+ let sig = self
+ .at(cause, self.param_env)
+ .trace(prev_ty, new_ty)
+ .lub(a_sig, b_sig)
+ .map(|ok| self.register_infer_ok_obligations(ok))?;
+
+ // Reify both sides and return the reified fn pointer type.
+ let fn_ptr = self.tcx.mk_fn_ptr(sig);
+ let prev_adjustment = match prev_ty.kind() {
+ ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(a_sig.unsafety())),
+ ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer),
+ _ => unreachable!(),
+ };
+ let next_adjustment = match new_ty.kind() {
+ ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(b_sig.unsafety())),
+ ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer),
+ _ => unreachable!(),
+ };
+ for expr in exprs.iter().map(|e| e.as_coercion_site()) {
+ self.apply_adjustments(
+ expr,
+ vec![Adjustment { kind: prev_adjustment.clone(), target: fn_ptr }],
+ );
+ }
+ self.apply_adjustments(new, vec![Adjustment { kind: next_adjustment, target: fn_ptr }]);
+ return Ok(fn_ptr);
+ }
+
+ // Configure a Coerce instance to compute the LUB.
+ // We don't allow two-phase borrows on any autorefs this creates since we
+ // probably aren't processing function arguments here and even if we were,
+ // they're going to get autorefed again anyway and we can apply 2-phase borrows
+ // at that time.
+ let mut coerce = Coerce::new(self, cause.clone(), AllowTwoPhase::No);
+ coerce.use_lub = true;
+
+ // First try to coerce the new expression to the type of the previous ones,
+ // but only if the new expression has no coercion already applied to it.
+ let mut first_error = None;
+ if !self.typeck_results.borrow().adjustments().contains_key(new.hir_id) {
+ let result = self.commit_if_ok(|_| coerce.coerce(new_ty, prev_ty));
+ match result {
+ Ok(ok) => {
+ let (adjustments, target) = self.register_infer_ok_obligations(ok);
+ self.apply_adjustments(new, adjustments);
+ debug!(
+ "coercion::try_find_coercion_lub: was able to coerce from new type {:?} to previous type {:?} ({:?})",
+ new_ty, prev_ty, target
+ );
+ return Ok(target);
+ }
+ Err(e) => first_error = Some(e),
+ }
+ }
+
+ // Then try to coerce the previous expressions to the type of the new one.
+ // This requires ensuring there are no coercions applied to *any* of the
+ // previous expressions, other than noop reborrows (ignoring lifetimes).
+ for expr in exprs {
+ let expr = expr.as_coercion_site();
+ let noop = match self.typeck_results.borrow().expr_adjustments(expr) {
+ &[
+ Adjustment { kind: Adjust::Deref(_), .. },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. },
+ ] => {
+ match *self.node_ty(expr.hir_id).kind() {
+ ty::Ref(_, _, mt_orig) => {
+ let mutbl_adj: hir::Mutability = mutbl_adj.into();
+ // Reborrow that we can safely ignore, because
+ // the next adjustment can only be a Deref
+ // which will be merged into it.
+ mutbl_adj == mt_orig
+ }
+ _ => false,
+ }
+ }
+ &[Adjustment { kind: Adjust::NeverToAny, .. }] | &[] => true,
+ _ => false,
+ };
+
+ if !noop {
+ debug!(
+ "coercion::try_find_coercion_lub: older expression {:?} had adjustments, requiring LUB",
+ expr,
+ );
+
+ return self
+ .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
+ .map(|ok| self.register_infer_ok_obligations(ok));
+ }
+ }
+
+ match self.commit_if_ok(|_| coerce.coerce(prev_ty, new_ty)) {
+ Err(_) => {
+ // Avoid giving strange errors on failed attempts.
+ if let Some(e) = first_error {
+ Err(e)
+ } else {
+ self.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
+ .map(|ok| self.register_infer_ok_obligations(ok))
+ }
+ }
+ Ok(ok) => {
+ let (adjustments, target) = self.register_infer_ok_obligations(ok);
+ for expr in exprs {
+ let expr = expr.as_coercion_site();
+ self.apply_adjustments(expr, adjustments.clone());
+ }
+ debug!(
+ "coercion::try_find_coercion_lub: was able to coerce previous type {:?} to new type {:?} ({:?})",
+ prev_ty, new_ty, target
+ );
+ Ok(target)
+ }
+ }
+ }
+}
+
+/// CoerceMany encapsulates the pattern you should use when you have
+/// many expressions that are all getting coerced to a common
+/// type. This arises, for example, when you have a match (the result
+/// of each arm is coerced to a common type). It also arises in less
+/// obvious places, such as when you have many `break foo` expressions
+/// that target the same loop, or the various `return` expressions in
+/// a function.
+///
+/// The basic protocol is as follows:
+///
+/// - Instantiate the `CoerceMany` with an initial `expected_ty`.
+/// This will also serve as the "starting LUB". The expectation is
+/// that this type is something which all of the expressions *must*
+/// be coercible to. Use a fresh type variable if needed.
+/// - For each expression whose result is to be coerced, invoke `coerce()` with.
+/// - In some cases we wish to coerce "non-expressions" whose types are implicitly
+/// unit. This happens for example if you have a `break` with no expression,
+/// or an `if` with no `else`. In that case, invoke `coerce_forced_unit()`.
+/// - `coerce()` and `coerce_forced_unit()` may report errors. They hide this
+/// from you so that you don't have to worry your pretty head about it.
+/// But if an error is reported, the final type will be `err`.
+/// - Invoking `coerce()` may cause us to go and adjust the "adjustments" on
+/// previously coerced expressions.
+/// - When all done, invoke `complete()`. This will return the LUB of
+/// all your expressions.
+/// - WARNING: I don't believe this final type is guaranteed to be
+/// related to your initial `expected_ty` in any particular way,
+/// although it will typically be a subtype, so you should check it.
+/// - Invoking `complete()` may cause us to go and adjust the "adjustments" on
+/// previously coerced expressions.
+///
+/// Example:
+///
+/// ```ignore (illustrative)
+/// let mut coerce = CoerceMany::new(expected_ty);
+/// for expr in exprs {
+/// let expr_ty = fcx.check_expr_with_expectation(expr, expected);
+/// coerce.coerce(fcx, &cause, expr, expr_ty);
+/// }
+/// let final_ty = coerce.complete(fcx);
+/// ```
+pub struct CoerceMany<'tcx, 'exprs, E: AsCoercionSite> {
+ expected_ty: Ty<'tcx>,
+ final_ty: Option<Ty<'tcx>>,
+ expressions: Expressions<'tcx, 'exprs, E>,
+ pushed: usize,
+}
+
+/// The type of a `CoerceMany` that is storing up the expressions into
+/// a buffer. We use this in `check/mod.rs` for things like `break`.
+pub type DynamicCoerceMany<'tcx> = CoerceMany<'tcx, 'tcx, &'tcx hir::Expr<'tcx>>;
+
+enum Expressions<'tcx, 'exprs, E: AsCoercionSite> {
+ Dynamic(Vec<&'tcx hir::Expr<'tcx>>),
+ UpFront(&'exprs [E]),
+}
+
+impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
+ /// The usual case; collect the set of expressions dynamically.
+ /// If the full set of coercion sites is known before hand,
+ /// consider `with_coercion_sites()` instead to avoid allocation.
+ pub fn new(expected_ty: Ty<'tcx>) -> Self {
+ Self::make(expected_ty, Expressions::Dynamic(vec![]))
+ }
+
+ /// As an optimization, you can create a `CoerceMany` with a
+ /// pre-existing slice of expressions. In this case, you are
+ /// expected to pass each element in the slice to `coerce(...)` in
+ /// order. This is used with arrays in particular to avoid
+ /// needlessly cloning the slice.
+ pub fn with_coercion_sites(expected_ty: Ty<'tcx>, coercion_sites: &'exprs [E]) -> Self {
+ Self::make(expected_ty, Expressions::UpFront(coercion_sites))
+ }
+
+ fn make(expected_ty: Ty<'tcx>, expressions: Expressions<'tcx, 'exprs, E>) -> Self {
+ CoerceMany { expected_ty, final_ty: None, expressions, pushed: 0 }
+ }
+
+ /// Returns the "expected type" with which this coercion was
+ /// constructed. This represents the "downward propagated" type
+ /// that was given to us at the start of typing whatever construct
+ /// we are typing (e.g., the match expression).
+ ///
+ /// Typically, this is used as the expected type when
+ /// type-checking each of the alternative expressions whose types
+ /// we are trying to merge.
+ pub fn expected_ty(&self) -> Ty<'tcx> {
+ self.expected_ty
+ }
+
+ /// Returns the current "merged type", representing our best-guess
+ /// at the LUB of the expressions we've seen so far (if any). This
+ /// isn't *final* until you call `self.complete()`, which will return
+ /// the merged type.
+ pub fn merged_ty(&self) -> Ty<'tcx> {
+ self.final_ty.unwrap_or(self.expected_ty)
+ }
+
+ /// Indicates that the value generated by `expression`, which is
+ /// of type `expression_ty`, is one of the possibilities that we
+ /// could coerce from. This will record `expression`, and later
+ /// calls to `coerce` may come back and add adjustments and things
+ /// if necessary.
+ pub fn coerce<'a>(
+ &mut self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ cause: &ObligationCause<'tcx>,
+ expression: &'tcx hir::Expr<'tcx>,
+ expression_ty: Ty<'tcx>,
+ ) {
+ self.coerce_inner(fcx, cause, Some(expression), expression_ty, None, false)
+ }
+
+ /// Indicates that one of the inputs is a "forced unit". This
+ /// occurs in a case like `if foo { ... };`, where the missing else
+ /// generates a "forced unit". Another example is a `loop { break;
+ /// }`, where the `break` has no argument expression. We treat
+ /// these cases slightly differently for error-reporting
+ /// purposes. Note that these tend to correspond to cases where
+ /// the `()` expression is implicit in the source, and hence we do
+ /// not take an expression argument.
+ ///
+ /// The `augment_error` gives you a chance to extend the error
+ /// message, in case any results (e.g., we use this to suggest
+ /// removing a `;`).
+ pub fn coerce_forced_unit<'a>(
+ &mut self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ cause: &ObligationCause<'tcx>,
+ augment_error: &mut dyn FnMut(&mut Diagnostic),
+ label_unit_as_expected: bool,
+ ) {
+ self.coerce_inner(
+ fcx,
+ cause,
+ None,
+ fcx.tcx.mk_unit(),
+ Some(augment_error),
+ label_unit_as_expected,
+ )
+ }
+
+ /// The inner coercion "engine". If `expression` is `None`, this
+ /// is a forced-unit case, and hence `expression_ty` must be
+ /// `Nil`.
+ #[instrument(skip(self, fcx, augment_error, label_expression_as_expected), level = "debug")]
+ pub(crate) fn coerce_inner<'a>(
+ &mut self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ cause: &ObligationCause<'tcx>,
+ expression: Option<&'tcx hir::Expr<'tcx>>,
+ mut expression_ty: Ty<'tcx>,
+ augment_error: Option<&mut dyn FnMut(&mut Diagnostic)>,
+ label_expression_as_expected: bool,
+ ) {
+ // Incorporate whatever type inference information we have
+ // until now; in principle we might also want to process
+ // pending obligations, but doing so should only improve
+ // compatibility (hopefully that is true) by helping us
+ // uncover never types better.
+ if expression_ty.is_ty_var() {
+ expression_ty = fcx.infcx.shallow_resolve(expression_ty);
+ }
+
+ // If we see any error types, just propagate that error
+ // upwards.
+ if expression_ty.references_error() || self.merged_ty().references_error() {
+ self.final_ty = Some(fcx.tcx.ty_error());
+ return;
+ }
+
+ // Handle the actual type unification etc.
+ let result = if let Some(expression) = expression {
+ if self.pushed == 0 {
+ // Special-case the first expression we are coercing.
+ // To be honest, I'm not entirely sure why we do this.
+ // We don't allow two-phase borrows, see comment in try_find_coercion_lub for why
+ fcx.try_coerce(
+ expression,
+ expression_ty,
+ self.expected_ty,
+ AllowTwoPhase::No,
+ Some(cause.clone()),
+ )
+ } else {
+ match self.expressions {
+ Expressions::Dynamic(ref exprs) => fcx.try_find_coercion_lub(
+ cause,
+ exprs,
+ self.merged_ty(),
+ expression,
+ expression_ty,
+ ),
+ Expressions::UpFront(ref coercion_sites) => fcx.try_find_coercion_lub(
+ cause,
+ &coercion_sites[0..self.pushed],
+ self.merged_ty(),
+ expression,
+ expression_ty,
+ ),
+ }
+ }
+ } else {
+ // this is a hack for cases where we default to `()` because
+ // the expression etc has been omitted from the source. An
+ // example is an `if let` without an else:
+ //
+ // if let Some(x) = ... { }
+ //
+ // we wind up with a second match arm that is like `_ =>
+ // ()`. That is the case we are considering here. We take
+ // a different path to get the right "expected, found"
+ // message and so forth (and because we know that
+ // `expression_ty` will be unit).
+ //
+ // Another example is `break` with no argument expression.
+ assert!(expression_ty.is_unit(), "if let hack without unit type");
+ fcx.at(cause, fcx.param_env)
+ .eq_exp(label_expression_as_expected, expression_ty, self.merged_ty())
+ .map(|infer_ok| {
+ fcx.register_infer_ok_obligations(infer_ok);
+ expression_ty
+ })
+ };
+
+ debug!(?result);
+ match result {
+ Ok(v) => {
+ self.final_ty = Some(v);
+ if let Some(e) = expression {
+ match self.expressions {
+ Expressions::Dynamic(ref mut buffer) => buffer.push(e),
+ Expressions::UpFront(coercion_sites) => {
+ // if the user gave us an array to validate, check that we got
+ // the next expression in the list, as expected
+ assert_eq!(
+ coercion_sites[self.pushed].as_coercion_site().hir_id,
+ e.hir_id
+ );
+ }
+ }
+ self.pushed += 1;
+ }
+ }
+ Err(coercion_error) => {
+ // Mark that we've failed to coerce the types here to suppress
+ // any superfluous errors we might encounter while trying to
+ // emit or provide suggestions on how to fix the initial error.
+ fcx.set_tainted_by_errors();
+ let (expected, found) = if label_expression_as_expected {
+ // In the case where this is a "forced unit", like
+ // `break`, we want to call the `()` "expected"
+ // since it is implied by the syntax.
+ // (Note: not all force-units work this way.)"
+ (expression_ty, self.merged_ty())
+ } else {
+ // Otherwise, the "expected" type for error
+ // reporting is the current unification type,
+ // which is basically the LUB of the expressions
+ // we've seen so far (combined with the expected
+ // type)
+ (self.merged_ty(), expression_ty)
+ };
+ let (expected, found) = fcx.resolve_vars_if_possible((expected, found));
+
+ let mut err;
+ let mut unsized_return = false;
+ let mut visitor = CollectRetsVisitor { ret_exprs: vec![] };
+ match *cause.code() {
+ ObligationCauseCode::ReturnNoExpression => {
+ err = struct_span_err!(
+ fcx.tcx.sess,
+ cause.span,
+ E0069,
+ "`return;` in a function whose return type is not `()`"
+ );
+ err.span_label(cause.span, "return type is not `()`");
+ }
+ ObligationCauseCode::BlockTailExpression(blk_id) => {
+ let parent_id = fcx.tcx.hir().get_parent_node(blk_id);
+ err = self.report_return_mismatched_types(
+ cause,
+ expected,
+ found,
+ coercion_error.clone(),
+ fcx,
+ parent_id,
+ expression,
+ Some(blk_id),
+ );
+ if !fcx.tcx.features().unsized_locals {
+ unsized_return = self.is_return_ty_unsized(fcx, blk_id);
+ }
+ if let Some(expression) = expression
+ && let hir::ExprKind::Loop(loop_blk, ..) = expression.kind {
+ intravisit::walk_block(& mut visitor, loop_blk);
+ }
+ }
+ ObligationCauseCode::ReturnValue(id) => {
+ err = self.report_return_mismatched_types(
+ cause,
+ expected,
+ found,
+ coercion_error.clone(),
+ fcx,
+ id,
+ expression,
+ None,
+ );
+ if !fcx.tcx.features().unsized_locals {
+ let id = fcx.tcx.hir().get_parent_node(id);
+ unsized_return = self.is_return_ty_unsized(fcx, id);
+ }
+ }
+ _ => {
+ err = fcx.err_ctxt().report_mismatched_types(
+ cause,
+ expected,
+ found,
+ coercion_error.clone(),
+ );
+ }
+ }
+
+ if let Some(augment_error) = augment_error {
+ augment_error(&mut err);
+ }
+
+ let is_insufficiently_polymorphic =
+ matches!(coercion_error, TypeError::RegionsInsufficientlyPolymorphic(..));
+
+ if !is_insufficiently_polymorphic && let Some(expr) = expression {
+ fcx.emit_coerce_suggestions(
+ &mut err,
+ expr,
+ found,
+ expected,
+ None,
+ Some(coercion_error),
+ );
+ }
+
+ if visitor.ret_exprs.len() > 0 && let Some(expr) = expression {
+ self.note_unreachable_loop_return(&mut err, &expr, &visitor.ret_exprs);
+ }
+ err.emit_unless(unsized_return);
+
+ self.final_ty = Some(fcx.tcx.ty_error());
+ }
+ }
+ }
+ fn note_unreachable_loop_return(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ ret_exprs: &Vec<&'tcx hir::Expr<'tcx>>,
+ ) {
+ let hir::ExprKind::Loop(_, _, _, loop_span) = expr.kind else { return;};
+ let mut span: MultiSpan = vec![loop_span].into();
+ span.push_span_label(loop_span, "this might have zero elements to iterate on");
+ const MAXITER: usize = 3;
+ let iter = ret_exprs.iter().take(MAXITER);
+ for ret_expr in iter {
+ span.push_span_label(
+ ret_expr.span,
+ "if the loop doesn't execute, this value would never get returned",
+ );
+ }
+ err.span_note(
+ span,
+ "the function expects a value to always be returned, but loops might run zero times",
+ );
+ if MAXITER < ret_exprs.len() {
+ err.note(&format!(
+ "if the loop doesn't execute, {} other values would never get returned",
+ ret_exprs.len() - MAXITER
+ ));
+ }
+ err.help(
+ "return a value for the case when the loop has zero elements to iterate on, or \
+ consider changing the return type to account for that possibility",
+ );
+ }
+
+ fn report_return_mismatched_types<'a>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ty_err: TypeError<'tcx>,
+ fcx: &FnCtxt<'a, 'tcx>,
+ id: hir::HirId,
+ expression: Option<&'tcx hir::Expr<'tcx>>,
+ blk_id: Option<hir::HirId>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = fcx.err_ctxt().report_mismatched_types(cause, expected, found, ty_err);
+
+ let mut pointing_at_return_type = false;
+ let mut fn_output = None;
+
+ let parent_id = fcx.tcx.hir().get_parent_node(id);
+ let parent = fcx.tcx.hir().get(parent_id);
+ if let Some(expr) = expression
+ && let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(&hir::Closure { body, .. }), .. }) = parent
+ && !matches!(fcx.tcx.hir().body(body).value.kind, hir::ExprKind::Block(..))
+ {
+ fcx.suggest_missing_semicolon(&mut err, expr, expected, true);
+ }
+ // Verify that this is a tail expression of a function, otherwise the
+ // label pointing out the cause for the type coercion will be wrong
+ // as prior return coercions would not be relevant (#57664).
+ let fn_decl = if let (Some(expr), Some(blk_id)) = (expression, blk_id) {
+ pointing_at_return_type =
+ fcx.suggest_mismatched_types_on_tail(&mut err, expr, expected, found, blk_id);
+ if let (Some(cond_expr), true, false) = (
+ fcx.tcx.hir().get_if_cause(expr.hir_id),
+ expected.is_unit(),
+ pointing_at_return_type,
+ )
+ // If the block is from an external macro or try (`?`) desugaring, then
+ // do not suggest adding a semicolon, because there's nowhere to put it.
+ // See issues #81943 and #87051.
+ && matches!(
+ cond_expr.span.desugaring_kind(),
+ None | Some(DesugaringKind::WhileLoop)
+ ) && !in_external_macro(fcx.tcx.sess, cond_expr.span)
+ && !matches!(
+ cond_expr.kind,
+ hir::ExprKind::Match(.., hir::MatchSource::TryDesugar)
+ )
+ {
+ err.span_label(cond_expr.span, "expected this to be `()`");
+ if expr.can_have_side_effects() {
+ fcx.suggest_semicolon_at_end(cond_expr.span, &mut err);
+ }
+ }
+ fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
+ } else {
+ fcx.get_fn_decl(parent_id)
+ };
+
+ if let Some((fn_decl, can_suggest)) = fn_decl {
+ if blk_id.is_none() {
+ pointing_at_return_type |= fcx.suggest_missing_return_type(
+ &mut err,
+ &fn_decl,
+ expected,
+ found,
+ can_suggest,
+ fcx.tcx.hir().get_parent_item(id).into(),
+ );
+ }
+ if !pointing_at_return_type {
+ fn_output = Some(&fn_decl.output); // `impl Trait` return type
+ }
+ }
+
+ let parent_id = fcx.tcx.hir().get_parent_item(id);
+ let parent_item = fcx.tcx.hir().get_by_def_id(parent_id.def_id);
+
+ if let (Some(expr), Some(_), Some((fn_decl, _, _))) =
+ (expression, blk_id, fcx.get_node_fn_decl(parent_item))
+ {
+ fcx.suggest_missing_break_or_return_expr(
+ &mut err,
+ expr,
+ fn_decl,
+ expected,
+ found,
+ id,
+ parent_id.into(),
+ );
+ }
+
+ let ret_coercion_span = fcx.ret_coercion_span.get();
+
+ if let Some(sp) = ret_coercion_span
+ // If the closure has an explicit return type annotation, or if
+ // the closure's return type has been inferred from outside
+ // requirements (such as an Fn* trait bound), then a type error
+ // may occur at the first return expression we see in the closure
+ // (if it conflicts with the declared return type). Skip adding a
+ // note in this case, since it would be incorrect.
+ && !fcx.return_type_pre_known
+ {
+ err.span_note(
+ sp,
+ &format!(
+ "return type inferred to be `{}` here",
+ expected
+ ),
+ );
+ }
+
+ if let (Some(sp), Some(fn_output)) = (ret_coercion_span, fn_output) {
+ self.add_impl_trait_explanation(&mut err, cause, fcx, expected, sp, fn_output);
+ }
+
+ err
+ }
+
+ fn add_impl_trait_explanation<'a>(
+ &self,
+ err: &mut Diagnostic,
+ cause: &ObligationCause<'tcx>,
+ fcx: &FnCtxt<'a, 'tcx>,
+ expected: Ty<'tcx>,
+ sp: Span,
+ fn_output: &hir::FnRetTy<'_>,
+ ) {
+ let return_sp = fn_output.span();
+ err.span_label(return_sp, "expected because this return type...");
+ err.span_label(
+ sp,
+ format!("...is found to be `{}` here", fcx.resolve_vars_with_obligations(expected)),
+ );
+ let impl_trait_msg = "for information on `impl Trait`, see \
+ <https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #returning-types-that-implement-traits>";
+ let trait_obj_msg = "for information on trait objects, see \
+ <https://doc.rust-lang.org/book/ch17-02-trait-objects.html\
+ #using-trait-objects-that-allow-for-values-of-different-types>";
+ err.note("to return `impl Trait`, all returned values must be of the same type");
+ err.note(impl_trait_msg);
+ let snippet = fcx
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(return_sp)
+ .unwrap_or_else(|_| "dyn Trait".to_string());
+ let mut snippet_iter = snippet.split_whitespace();
+ let has_impl = snippet_iter.next().map_or(false, |s| s == "impl");
+ // Only suggest `Box<dyn Trait>` if `Trait` in `impl Trait` is object safe.
+ let mut is_object_safe = false;
+ if let hir::FnRetTy::Return(ty) = fn_output
+ // Get the return type.
+ && let hir::TyKind::OpaqueDef(..) = ty.kind
+ {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty);
+ // Get the `impl Trait`'s `DefId`.
+ if let ty::Opaque(def_id, _) = ty.kind()
+ // Get the `impl Trait`'s `Item` so that we can get its trait bounds and
+ // get the `Trait`'s `DefId`.
+ && let hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }) =
+ fcx.tcx.hir().expect_item(def_id.expect_local()).kind
+ {
+ // Are of this `impl Trait`'s traits object safe?
+ is_object_safe = bounds.iter().all(|bound| {
+ bound
+ .trait_ref()
+ .and_then(|t| t.trait_def_id())
+ .map_or(false, |def_id| {
+ fcx.tcx.object_safety_violations(def_id).is_empty()
+ })
+ })
+ }
+ };
+ if has_impl {
+ if is_object_safe {
+ err.multipart_suggestion(
+ "you could change the return type to be a boxed trait object",
+ vec![
+ (return_sp.with_hi(return_sp.lo() + BytePos(4)), "Box<dyn".to_string()),
+ (return_sp.shrink_to_hi(), ">".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ let sugg = [sp, cause.span]
+ .into_iter()
+ .flat_map(|sp| {
+ [
+ (sp.shrink_to_lo(), "Box::new(".to_string()),
+ (sp.shrink_to_hi(), ")".to_string()),
+ ]
+ .into_iter()
+ })
+ .collect::<Vec<_>>();
+ err.multipart_suggestion(
+ "if you change the return type to expect trait objects, box the returned \
+ expressions",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.help(&format!(
+ "if the trait `{}` were object safe, you could return a boxed trait object",
+ &snippet[5..]
+ ));
+ }
+ err.note(trait_obj_msg);
+ }
+ err.help("you could instead create a new `enum` with a variant for each returned type");
+ }
+
+ fn is_return_ty_unsized<'a>(&self, fcx: &FnCtxt<'a, 'tcx>, blk_id: hir::HirId) -> bool {
+ if let Some((fn_decl, _)) = fcx.get_fn_decl(blk_id)
+ && let hir::FnRetTy::Return(ty) = fn_decl.output
+ && let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty)
+ && let ty::Dynamic(..) = ty.kind()
+ {
+ return true;
+ }
+ false
+ }
+
+ pub fn complete<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Ty<'tcx> {
+ if let Some(final_ty) = self.final_ty {
+ final_ty
+ } else {
+ // If we only had inputs that were of type `!` (or no
+ // inputs at all), then the final type is `!`.
+ assert_eq!(self.pushed, 0);
+ fcx.tcx.types.never
+ }
+ }
+}
+
+/// Something that can be converted into an expression to which we can
+/// apply a coercion.
+pub trait AsCoercionSite {
+ fn as_coercion_site(&self) -> &hir::Expr<'_>;
+}
+
+impl AsCoercionSite for hir::Expr<'_> {
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ self
+ }
+}
+
+impl<'a, T> AsCoercionSite for &'a T
+where
+ T: AsCoercionSite,
+{
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ (**self).as_coercion_site()
+ }
+}
+
+impl AsCoercionSite for ! {
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ unreachable!()
+ }
+}
+
+impl AsCoercionSite for hir::Arm<'_> {
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ &self.body
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs
new file mode 100644
index 000000000..16febfc46
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/demand.rs
@@ -0,0 +1,1454 @@
+use crate::FnCtxt;
+use rustc_ast::util::parser::PREC_POSTFIX;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{is_range_literal, Node};
+use rustc_infer::infer::InferOk;
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::middle::stability::EvalResult;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::ObligationCause;
+
+use super::method::probe;
+
+use std::iter;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn emit_coerce_suggestions(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ expr_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ error: Option<TypeError<'tcx>>,
+ ) {
+ self.annotate_expected_due_to_let_ty(err, expr, error);
+
+ // Use `||` to give these suggestions a precedence
+ let _ = self.suggest_missing_parentheses(err, expr)
+ || self.suggest_deref_ref_or_into(err, expr, expected, expr_ty, expected_ty_expr)
+ || self.suggest_compatible_variants(err, expr, expected, expr_ty)
+ || self.suggest_non_zero_new_unwrap(err, expr, expected, expr_ty)
+ || self.suggest_calling_boxed_future_when_appropriate(err, expr, expected, expr_ty)
+ || self.suggest_no_capture_closure(err, expected, expr_ty)
+ || self.suggest_boxing_when_appropriate(err, expr, expected, expr_ty)
+ || self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected)
+ || self.suggest_copied_or_cloned(err, expr, expr_ty, expected)
+ || self.suggest_into(err, expr, expr_ty, expected);
+
+ self.note_type_is_not_clone(err, expected, expr_ty, expr);
+ self.note_need_for_fn_pointer(err, expected, expr_ty);
+ self.note_internal_mutation_in_method(err, expr, expected, expr_ty);
+ }
+
+ // Requires that the two types unify, and prints an error message if
+ // they don't.
+ pub fn demand_suptype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) {
+ if let Some(mut e) = self.demand_suptype_diag(sp, expected, actual) {
+ e.emit();
+ }
+ }
+
+ pub fn demand_suptype_diag(
+ &self,
+ sp: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ self.demand_suptype_with_origin(&self.misc(sp), expected, actual)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn demand_suptype_with_origin(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ match self.at(cause, self.param_env).sup(expected, actual) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(e) => Some(self.err_ctxt().report_mismatched_types(&cause, expected, actual, e)),
+ }
+ }
+
+ pub fn demand_eqtype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) {
+ if let Some(mut err) = self.demand_eqtype_diag(sp, expected, actual) {
+ err.emit();
+ }
+ }
+
+ pub fn demand_eqtype_diag(
+ &self,
+ sp: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ self.demand_eqtype_with_origin(&self.misc(sp), expected, actual)
+ }
+
+ pub fn demand_eqtype_with_origin(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ match self.at(cause, self.param_env).eq(expected, actual) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(e) => Some(self.err_ctxt().report_mismatched_types(cause, expected, actual, e)),
+ }
+ }
+
+ pub fn demand_coerce(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ allow_two_phase: AllowTwoPhase,
+ ) -> Ty<'tcx> {
+ let (ty, err) =
+ self.demand_coerce_diag(expr, checked_ty, expected, expected_ty_expr, allow_two_phase);
+ if let Some(mut err) = err {
+ err.emit();
+ }
+ ty
+ }
+
+ /// Checks that the type of `expr` can be coerced to `expected`.
+ ///
+ /// N.B., this code relies on `self.diverges` to be accurate. In particular, assignments to `!`
+ /// will be permitted if the diverges flag is currently "always".
+ #[instrument(level = "debug", skip(self, expr, expected_ty_expr, allow_two_phase))]
+ pub fn demand_coerce_diag(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ allow_two_phase: AllowTwoPhase,
+ ) -> (Ty<'tcx>, Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>>) {
+ let expected = self.resolve_vars_with_obligations(expected);
+
+ let e = match self.try_coerce(expr, checked_ty, expected, allow_two_phase, None) {
+ Ok(ty) => return (ty, None),
+ Err(e) => e,
+ };
+
+ self.set_tainted_by_errors();
+ let expr = expr.peel_drop_temps();
+ let cause = self.misc(expr.span);
+ let expr_ty = self.resolve_vars_with_obligations(checked_ty);
+ let mut err = self.err_ctxt().report_mismatched_types(&cause, expected, expr_ty, e.clone());
+
+ let is_insufficiently_polymorphic =
+ matches!(e, TypeError::RegionsInsufficientlyPolymorphic(..));
+
+ // FIXME(#73154): For now, we do leak check when coercing function
+ // pointers in typeck, instead of only during borrowck. This can lead
+ // to these `RegionsInsufficientlyPolymorphic` errors that aren't helpful.
+ if !is_insufficiently_polymorphic {
+ self.emit_coerce_suggestions(
+ &mut err,
+ expr,
+ expr_ty,
+ expected,
+ expected_ty_expr,
+ Some(e),
+ );
+ }
+
+ (expected, Some(err))
+ }
+
+ fn annotate_expected_due_to_let_ty(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ error: Option<TypeError<'_>>,
+ ) {
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ match (self.tcx.hir().find(parent), error) {
+ (Some(hir::Node::Local(hir::Local { ty: Some(ty), init: Some(init), .. })), _)
+ if init.hir_id == expr.hir_id =>
+ {
+ // Point at `let` assignment type.
+ err.span_label(ty.span, "expected due to this");
+ }
+ (
+ Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(lhs, rhs, _), ..
+ })),
+ Some(TypeError::Sorts(ExpectedFound { expected, .. })),
+ ) if rhs.hir_id == expr.hir_id && !expected.is_closure() => {
+ // We ignore closures explicitly because we already point at them elsewhere.
+ // Point at the assigned-to binding.
+ let mut primary_span = lhs.span;
+ let mut secondary_span = lhs.span;
+ let mut post_message = "";
+ match lhs.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path {
+ res:
+ hir::def::Res::Def(
+ hir::def::DefKind::Static(_) | hir::def::DefKind::Const,
+ def_id,
+ ),
+ ..
+ },
+ )) => {
+ if let Some(hir::Node::Item(hir::Item {
+ ident,
+ kind: hir::ItemKind::Static(ty, ..) | hir::ItemKind::Const(ty, ..),
+ ..
+ })) = self.tcx.hir().get_if_local(*def_id)
+ {
+ primary_span = ty.span;
+ secondary_span = ident.span;
+ post_message = " type";
+ }
+ }
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ if let Some(hir::Node::Pat(pat)) = self.tcx.hir().find(*hir_id) {
+ let parent = self.tcx.hir().get_parent_node(pat.hir_id);
+ primary_span = pat.span;
+ secondary_span = pat.span;
+ match self.tcx.hir().find(parent) {
+ Some(hir::Node::Local(hir::Local { ty: Some(ty), .. })) => {
+ primary_span = ty.span;
+ post_message = " type";
+ }
+ Some(hir::Node::Local(hir::Local { init: Some(init), .. })) => {
+ primary_span = init.span;
+ post_message = " value";
+ }
+ Some(hir::Node::Param(hir::Param { ty_span, .. })) => {
+ primary_span = *ty_span;
+ post_message = " parameter type";
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+
+ if primary_span != secondary_span
+ && self
+ .tcx
+ .sess
+ .source_map()
+ .is_multiline(secondary_span.shrink_to_hi().until(primary_span))
+ {
+ // We are pointing at the binding's type or initializer value, but it's pattern
+ // is in a different line, so we point at both.
+ err.span_label(secondary_span, "expected due to the type of this binding");
+ err.span_label(primary_span, &format!("expected due to this{post_message}"));
+ } else if post_message == "" {
+ // We are pointing at either the assignment lhs or the binding def pattern.
+ err.span_label(primary_span, "expected due to the type of this binding");
+ } else {
+ // We are pointing at the binding's type or initializer value.
+ err.span_label(primary_span, &format!("expected due to this{post_message}"));
+ }
+
+ if !lhs.is_syntactic_place_expr() {
+ // We already emitted E0070 "invalid left-hand side of assignment", so we
+ // silence this.
+ err.downgrade_to_delayed_bug();
+ }
+ }
+ _ => {}
+ }
+ }
+
+ /// If the expected type is an enum (Issue #55250) with any variants whose
+ /// sole field is of the found type, suggest such variants. (Issue #42764)
+ fn suggest_compatible_variants(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ expr_ty: Ty<'tcx>,
+ ) -> bool {
+ if let ty::Adt(expected_adt, substs) = expected.kind() {
+ if let hir::ExprKind::Field(base, ident) = expr.kind {
+ let base_ty = self.typeck_results.borrow().expr_ty(base);
+ if self.can_eq(self.param_env, base_ty, expected).is_ok()
+ && let Some(base_span) = base.span.find_ancestor_inside(expr.span)
+ {
+ err.span_suggestion_verbose(
+ expr.span.with_lo(base_span.hi()),
+ format!("consider removing the tuple struct field `{ident}`"),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ }
+
+ // If the expression is of type () and it's the return expression of a block,
+ // we suggest adding a separate return expression instead.
+ // (To avoid things like suggesting `Ok(while .. { .. })`.)
+ if expr_ty.is_unit() {
+ let mut id = expr.hir_id;
+ let mut parent;
+
+ // Unroll desugaring, to make sure this works for `for` loops etc.
+ loop {
+ parent = self.tcx.hir().get_parent_node(id);
+ if let Some(parent_span) = self.tcx.hir().opt_span(parent) {
+ if parent_span.find_ancestor_inside(expr.span).is_some() {
+ // The parent node is part of the same span, so is the result of the
+ // same expansion/desugaring and not the 'real' parent node.
+ id = parent;
+ continue;
+ }
+ }
+ break;
+ }
+
+ if let Some(hir::Node::Block(&hir::Block {
+ span: block_span, expr: Some(e), ..
+ })) = self.tcx.hir().find(parent)
+ {
+ if e.hir_id == id {
+ if let Some(span) = expr.span.find_ancestor_inside(block_span) {
+ let return_suggestions = if self
+ .tcx
+ .is_diagnostic_item(sym::Result, expected_adt.did())
+ {
+ vec!["Ok(())"]
+ } else if self.tcx.is_diagnostic_item(sym::Option, expected_adt.did()) {
+ vec!["None", "Some(())"]
+ } else {
+ return false;
+ };
+ if let Some(indent) =
+ self.tcx.sess.source_map().indentation_before(span.shrink_to_lo())
+ {
+ // Add a semicolon, except after `}`.
+ let semicolon =
+ match self.tcx.sess.source_map().span_to_snippet(span) {
+ Ok(s) if s.ends_with('}') => "",
+ _ => ";",
+ };
+ err.span_suggestions(
+ span.shrink_to_hi(),
+ "try adding an expression at the end of the block",
+ return_suggestions
+ .into_iter()
+ .map(|r| format!("{semicolon}\n{indent}{r}")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ return true;
+ }
+ }
+ }
+ }
+
+ let compatible_variants: Vec<(String, _, _, Option<String>)> = expected_adt
+ .variants()
+ .iter()
+ .filter(|variant| {
+ variant.fields.len() == 1
+ })
+ .filter_map(|variant| {
+ let sole_field = &variant.fields[0];
+
+ let field_is_local = sole_field.did.is_local();
+ let field_is_accessible =
+ sole_field.vis.is_accessible_from(expr.hir_id.owner.def_id, self.tcx)
+ // Skip suggestions for unstable public fields (for example `Pin::pointer`)
+ && matches!(self.tcx.eval_stability(sole_field.did, None, expr.span, None), EvalResult::Allow | EvalResult::Unmarked);
+
+ if !field_is_local && !field_is_accessible {
+ return None;
+ }
+
+ let note_about_variant_field_privacy = (field_is_local && !field_is_accessible)
+ .then(|| format!(" (its field is private, but it's local to this crate and its privacy can be changed)"));
+
+ let sole_field_ty = sole_field.ty(self.tcx, substs);
+ if self.can_coerce(expr_ty, sole_field_ty) {
+ let variant_path =
+ with_no_trimmed_paths!(self.tcx.def_path_str(variant.def_id));
+ // FIXME #56861: DRYer prelude filtering
+ if let Some(path) = variant_path.strip_prefix("std::prelude::")
+ && let Some((_, path)) = path.split_once("::")
+ {
+ return Some((path.to_string(), variant.ctor_kind, sole_field.name, note_about_variant_field_privacy));
+ }
+ Some((variant_path, variant.ctor_kind, sole_field.name, note_about_variant_field_privacy))
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ let suggestions_for = |variant: &_, ctor, field_name| {
+ let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!("{ident}: "),
+ None => String::new(),
+ };
+
+ let (open, close) = match ctor {
+ hir::def::CtorKind::Fn => ("(".to_owned(), ")"),
+ hir::def::CtorKind::Fictive => (format!(" {{ {field_name}: "), " }"),
+
+ // unit variants don't have fields
+ hir::def::CtorKind::Const => unreachable!(),
+ };
+
+ // Suggest constructor as deep into the block tree as possible.
+ // This fixes https://github.com/rust-lang/rust/issues/101065,
+ // and also just helps make the most minimal suggestions.
+ let mut expr = expr;
+ while let hir::ExprKind::Block(block, _) = &expr.kind
+ && let Some(expr_) = &block.expr
+ {
+ expr = expr_
+ }
+
+ vec![
+ (expr.span.shrink_to_lo(), format!("{prefix}{variant}{open}")),
+ (expr.span.shrink_to_hi(), close.to_owned()),
+ ]
+ };
+
+ match &compatible_variants[..] {
+ [] => { /* No variants to format */ }
+ [(variant, ctor_kind, field_name, note)] => {
+ // Just a single matching variant.
+ err.multipart_suggestion_verbose(
+ &format!(
+ "try wrapping the expression in `{variant}`{note}",
+ note = note.as_deref().unwrap_or("")
+ ),
+ suggestions_for(&**variant, *ctor_kind, *field_name),
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ _ => {
+ // More than one matching variant.
+ err.multipart_suggestions(
+ &format!(
+ "try wrapping the expression in a variant of `{}`",
+ self.tcx.def_path_str(expected_adt.did())
+ ),
+ compatible_variants.into_iter().map(
+ |(variant, ctor_kind, field_name, _)| {
+ suggestions_for(&variant, ctor_kind, field_name)
+ },
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ fn suggest_non_zero_new_unwrap(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ expr_ty: Ty<'tcx>,
+ ) -> bool {
+ let tcx = self.tcx;
+ let (adt, unwrap) = match expected.kind() {
+ // In case Option<NonZero*> is wanted, but * is provided, suggest calling new
+ ty::Adt(adt, substs) if tcx.is_diagnostic_item(sym::Option, adt.did()) => {
+ // Unwrap option
+ let ty::Adt(adt, _) = substs.type_at(0).kind() else { return false; };
+
+ (adt, "")
+ }
+ // In case NonZero* is wanted, but * is provided also add `.unwrap()` to satisfy types
+ ty::Adt(adt, _) => (adt, ".unwrap()"),
+ _ => return false,
+ };
+
+ let map = [
+ (sym::NonZeroU8, tcx.types.u8),
+ (sym::NonZeroU16, tcx.types.u16),
+ (sym::NonZeroU32, tcx.types.u32),
+ (sym::NonZeroU64, tcx.types.u64),
+ (sym::NonZeroU128, tcx.types.u128),
+ (sym::NonZeroI8, tcx.types.i8),
+ (sym::NonZeroI16, tcx.types.i16),
+ (sym::NonZeroI32, tcx.types.i32),
+ (sym::NonZeroI64, tcx.types.i64),
+ (sym::NonZeroI128, tcx.types.i128),
+ ];
+
+ let Some((s, _)) = map
+ .iter()
+ .find(|&&(s, t)| self.tcx.is_diagnostic_item(s, adt.did()) && self.can_coerce(expr_ty, t))
+ else { return false; };
+
+ let path = self.tcx.def_path_str(adt.non_enum_variant().def_id);
+
+ err.multipart_suggestion(
+ format!("consider calling `{s}::new`"),
+ vec![
+ (expr.span.shrink_to_lo(), format!("{path}::new(")),
+ (expr.span.shrink_to_hi(), format!("){unwrap}")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+
+ true
+ }
+
+ pub fn get_conversion_methods(
+ &self,
+ span: Span,
+ expected: Ty<'tcx>,
+ checked_ty: Ty<'tcx>,
+ hir_id: hir::HirId,
+ ) -> Vec<AssocItem> {
+ let methods = self.probe_for_return_type(
+ span,
+ probe::Mode::MethodCall,
+ expected,
+ checked_ty,
+ hir_id,
+ |m| {
+ self.has_only_self_parameter(m)
+ && self
+ .tcx
+ // This special internal attribute is used to permit
+ // "identity-like" conversion methods to be suggested here.
+ //
+ // FIXME (#46459 and #46460): ideally
+ // `std::convert::Into::into` and `std::borrow:ToOwned` would
+ // also be `#[rustc_conversion_suggestion]`, if not for
+ // method-probing false-positives and -negatives (respectively).
+ //
+ // FIXME? Other potential candidate methods: `as_ref` and
+ // `as_mut`?
+ .has_attr(m.def_id, sym::rustc_conversion_suggestion)
+ },
+ );
+
+ methods
+ }
+
+ /// This function checks whether the method is not static and does not accept other parameters than `self`.
+ fn has_only_self_parameter(&self, method: &AssocItem) -> bool {
+ match method.kind {
+ ty::AssocKind::Fn => {
+ method.fn_has_self_parameter
+ && self.tcx.fn_sig(method.def_id).inputs().skip_binder().len() == 1
+ }
+ _ => false,
+ }
+ }
+
+ /// Identify some cases where `as_ref()` would be appropriate and suggest it.
+ ///
+ /// Given the following code:
+ /// ```compile_fail,E0308
+ /// struct Foo;
+ /// fn takes_ref(_: &Foo) {}
+ /// let ref opt = Some(Foo);
+ ///
+ /// opt.map(|param| takes_ref(param));
+ /// ```
+ /// Suggest using `opt.as_ref().map(|param| takes_ref(param));` instead.
+ ///
+ /// It only checks for `Option` and `Result` and won't work with
+ /// ```ignore (illustrative)
+ /// opt.map(|param| { takes_ref(param) });
+ /// ```
+ fn can_use_as_ref(&self, expr: &hir::Expr<'_>) -> Option<(Span, &'static str, String)> {
+ let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = expr.kind else {
+ return None;
+ };
+
+ let hir::def::Res::Local(local_id) = path.res else {
+ return None;
+ };
+
+ let local_parent = self.tcx.hir().get_parent_node(local_id);
+ let Some(Node::Param(hir::Param { hir_id: param_hir_id, .. })) = self.tcx.hir().find(local_parent) else {
+ return None;
+ };
+
+ let param_parent = self.tcx.hir().get_parent_node(*param_hir_id);
+ let Some(Node::Expr(hir::Expr {
+ hir_id: expr_hir_id,
+ kind: hir::ExprKind::Closure(hir::Closure { fn_decl: closure_fn_decl, .. }),
+ ..
+ })) = self.tcx.hir().find(param_parent) else {
+ return None;
+ };
+
+ let expr_parent = self.tcx.hir().get_parent_node(*expr_hir_id);
+ let hir = self.tcx.hir().find(expr_parent);
+ let closure_params_len = closure_fn_decl.inputs.len();
+ let (
+ Some(Node::Expr(hir::Expr {
+ kind: hir::ExprKind::MethodCall(method_path, receiver, ..),
+ ..
+ })),
+ 1,
+ ) = (hir, closure_params_len) else {
+ return None;
+ };
+
+ let self_ty = self.typeck_results.borrow().expr_ty(receiver);
+ let name = method_path.ident.name;
+ let is_as_ref_able = match self_ty.peel_refs().kind() {
+ ty::Adt(def, _) => {
+ (self.tcx.is_diagnostic_item(sym::Option, def.did())
+ || self.tcx.is_diagnostic_item(sym::Result, def.did()))
+ && (name == sym::map || name == sym::and_then)
+ }
+ _ => false,
+ };
+ match (is_as_ref_able, self.sess().source_map().span_to_snippet(method_path.ident.span)) {
+ (true, Ok(src)) => {
+ let suggestion = format!("as_ref().{}", src);
+ Some((method_path.ident.span, "consider using `as_ref` instead", suggestion))
+ }
+ _ => None,
+ }
+ }
+
+ pub(crate) fn maybe_get_struct_pattern_shorthand_field(
+ &self,
+ expr: &hir::Expr<'_>,
+ ) -> Option<Symbol> {
+ let hir = self.tcx.hir();
+ let local = match expr {
+ hir::Expr {
+ kind:
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path {
+ res: hir::def::Res::Local(_),
+ segments: [hir::PathSegment { ident, .. }],
+ ..
+ },
+ )),
+ ..
+ } => Some(ident),
+ _ => None,
+ }?;
+
+ match hir.find(hir.get_parent_node(expr.hir_id))? {
+ Node::ExprField(field) => {
+ if field.ident.name == local.name && field.is_shorthand {
+ return Some(local.name);
+ }
+ }
+ _ => {}
+ }
+
+ None
+ }
+
+ /// If the given `HirId` corresponds to a block with a trailing expression, return that expression
+ pub(crate) fn maybe_get_block_expr(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ ) -> Option<&'tcx hir::Expr<'tcx>> {
+ match expr {
+ hir::Expr { kind: hir::ExprKind::Block(block, ..), .. } => block.expr,
+ _ => None,
+ }
+ }
+
+ /// Returns whether the given expression is an `else if`.
+ pub(crate) fn is_else_if_block(&self, expr: &hir::Expr<'_>) -> bool {
+ if let hir::ExprKind::If(..) = expr.kind {
+ let parent_id = self.tcx.hir().get_parent_node(expr.hir_id);
+ if let Some(Node::Expr(hir::Expr {
+ kind: hir::ExprKind::If(_, _, Some(else_expr)),
+ ..
+ })) = self.tcx.hir().find(parent_id)
+ {
+ return else_expr.hir_id == expr.hir_id;
+ }
+ }
+ false
+ }
+
+ /// This function is used to determine potential "simple" improvements or users' errors and
+ /// provide them useful help. For example:
+ ///
+ /// ```compile_fail,E0308
+ /// fn some_fn(s: &str) {}
+ ///
+ /// let x = "hey!".to_owned();
+ /// some_fn(x); // error
+ /// ```
+ ///
+ /// No need to find every potential function which could make a coercion to transform a
+ /// `String` into a `&str` since a `&` would do the trick!
+ ///
+ /// In addition of this check, it also checks between references mutability state. If the
+ /// expected is mutable but the provided isn't, maybe we could just say "Hey, try with
+ /// `&mut`!".
+ pub fn check_ref(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> Option<(
+ Span,
+ String,
+ String,
+ Applicability,
+ bool, /* verbose */
+ bool, /* suggest `&` or `&mut` type annotation */
+ )> {
+ let sess = self.sess();
+ let sp = expr.span;
+
+ // If the span is from an external macro, there's no suggestion we can make.
+ if in_external_macro(sess, sp) {
+ return None;
+ }
+
+ let sm = sess.source_map();
+
+ let replace_prefix = |s: &str, old: &str, new: &str| {
+ s.strip_prefix(old).map(|stripped| new.to_string() + stripped)
+ };
+
+ // `ExprKind::DropTemps` is semantically irrelevant for these suggestions.
+ let expr = expr.peel_drop_temps();
+
+ match (&expr.kind, expected.kind(), checked_ty.kind()) {
+ (_, &ty::Ref(_, exp, _), &ty::Ref(_, check, _)) => match (exp.kind(), check.kind()) {
+ (&ty::Str, &ty::Array(arr, _) | &ty::Slice(arr)) if arr == self.tcx.types.u8 => {
+ if let hir::ExprKind::Lit(_) = expr.kind
+ && let Ok(src) = sm.span_to_snippet(sp)
+ && replace_prefix(&src, "b\"", "\"").is_some()
+ {
+ let pos = sp.lo() + BytePos(1);
+ return Some((
+ sp.with_hi(pos),
+ "consider removing the leading `b`".to_string(),
+ String::new(),
+ Applicability::MachineApplicable,
+ true,
+ false,
+ ));
+ }
+ }
+ (&ty::Array(arr, _) | &ty::Slice(arr), &ty::Str) if arr == self.tcx.types.u8 => {
+ if let hir::ExprKind::Lit(_) = expr.kind
+ && let Ok(src) = sm.span_to_snippet(sp)
+ && replace_prefix(&src, "\"", "b\"").is_some()
+ {
+ return Some((
+ sp.shrink_to_lo(),
+ "consider adding a leading `b`".to_string(),
+ "b".to_string(),
+ Applicability::MachineApplicable,
+ true,
+ false,
+ ));
+ }
+ }
+ _ => {}
+ },
+ (_, &ty::Ref(_, _, mutability), _) => {
+ // Check if it can work when put into a ref. For example:
+ //
+ // ```
+ // fn bar(x: &mut i32) {}
+ //
+ // let x = 0u32;
+ // bar(&x); // error, expected &mut
+ // ```
+ let ref_ty = match mutability {
+ hir::Mutability::Mut => {
+ self.tcx.mk_mut_ref(self.tcx.mk_region(ty::ReStatic), checked_ty)
+ }
+ hir::Mutability::Not => {
+ self.tcx.mk_imm_ref(self.tcx.mk_region(ty::ReStatic), checked_ty)
+ }
+ };
+ if self.can_coerce(ref_ty, expected) {
+ let mut sugg_sp = sp;
+ if let hir::ExprKind::MethodCall(ref segment, receiver, args, _) = expr.kind {
+ let clone_trait =
+ self.tcx.require_lang_item(LangItem::Clone, Some(segment.ident.span));
+ if args.is_empty()
+ && self.typeck_results.borrow().type_dependent_def_id(expr.hir_id).map(
+ |did| {
+ let ai = self.tcx.associated_item(did);
+ ai.trait_container(self.tcx) == Some(clone_trait)
+ },
+ ) == Some(true)
+ && segment.ident.name == sym::clone
+ {
+ // If this expression had a clone call when suggesting borrowing
+ // we want to suggest removing it because it'd now be unnecessary.
+ sugg_sp = receiver.span;
+ }
+ }
+ if let Ok(src) = sm.span_to_snippet(sugg_sp) {
+ let needs_parens = match expr.kind {
+ // parenthesize if needed (Issue #46756)
+ hir::ExprKind::Cast(_, _) | hir::ExprKind::Binary(_, _, _) => true,
+ // parenthesize borrows of range literals (Issue #54505)
+ _ if is_range_literal(expr) => true,
+ _ => false,
+ };
+
+ if let Some(sugg) = self.can_use_as_ref(expr) {
+ return Some((
+ sugg.0,
+ sugg.1.to_string(),
+ sugg.2,
+ Applicability::MachineApplicable,
+ false,
+ false,
+ ));
+ }
+
+ let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!("{ident}: "),
+ None => String::new(),
+ };
+
+ if let Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(..),
+ ..
+ })) = self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ if mutability == hir::Mutability::Mut {
+ // Suppressing this diagnostic, we'll properly print it in `check_expr_assign`
+ return None;
+ }
+ }
+
+ let sugg_expr = if needs_parens { format!("({src})") } else { src };
+ return Some(match mutability {
+ hir::Mutability::Mut => (
+ sp,
+ "consider mutably borrowing here".to_string(),
+ format!("{prefix}&mut {sugg_expr}"),
+ Applicability::MachineApplicable,
+ false,
+ false,
+ ),
+ hir::Mutability::Not => (
+ sp,
+ "consider borrowing here".to_string(),
+ format!("{prefix}&{sugg_expr}"),
+ Applicability::MachineApplicable,
+ false,
+ false,
+ ),
+ });
+ }
+ }
+ }
+ (
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, _, ref expr),
+ _,
+ &ty::Ref(_, checked, _),
+ ) if self.can_sub(self.param_env, checked, expected).is_ok() => {
+ // We have `&T`, check if what was expected was `T`. If so,
+ // we may want to suggest removing a `&`.
+ if sm.is_imported(expr.span) {
+ // Go through the spans from which this span was expanded,
+ // and find the one that's pointing inside `sp`.
+ //
+ // E.g. for `&format!("")`, where we want the span to the
+ // `format!()` invocation instead of its expansion.
+ if let Some(call_span) =
+ iter::successors(Some(expr.span), |s| s.parent_callsite())
+ .find(|&s| sp.contains(s))
+ && sm.is_span_accessible(call_span)
+ {
+ return Some((
+ sp.with_hi(call_span.lo()),
+ "consider removing the borrow".to_string(),
+ String::new(),
+ Applicability::MachineApplicable,
+ true,
+ true
+ ));
+ }
+ return None;
+ }
+ if sp.contains(expr.span)
+ && sm.is_span_accessible(expr.span)
+ {
+ return Some((
+ sp.with_hi(expr.span.lo()),
+ "consider removing the borrow".to_string(),
+ String::new(),
+ Applicability::MachineApplicable,
+ true,
+ true,
+ ));
+ }
+ }
+ (
+ _,
+ &ty::RawPtr(TypeAndMut { ty: ty_b, mutbl: mutbl_b }),
+ &ty::Ref(_, ty_a, mutbl_a),
+ ) => {
+ if let Some(steps) = self.deref_steps(ty_a, ty_b)
+ // Only suggest valid if dereferencing needed.
+ && steps > 0
+ // The pointer type implements `Copy` trait so the suggestion is always valid.
+ && let Ok(src) = sm.span_to_snippet(sp)
+ {
+ let derefs = "*".repeat(steps);
+ if let Some((span, src, applicability)) = match mutbl_b {
+ hir::Mutability::Mut => {
+ let new_prefix = "&mut ".to_owned() + &derefs;
+ match mutbl_a {
+ hir::Mutability::Mut => {
+ replace_prefix(&src, "&mut ", &new_prefix).map(|_| {
+ let pos = sp.lo() + BytePos(5);
+ let sp = sp.with_lo(pos).with_hi(pos);
+ (sp, derefs, Applicability::MachineApplicable)
+ })
+ }
+ hir::Mutability::Not => {
+ replace_prefix(&src, "&", &new_prefix).map(|_| {
+ let pos = sp.lo() + BytePos(1);
+ let sp = sp.with_lo(pos).with_hi(pos);
+ (
+ sp,
+ format!("mut {derefs}"),
+ Applicability::Unspecified,
+ )
+ })
+ }
+ }
+ }
+ hir::Mutability::Not => {
+ let new_prefix = "&".to_owned() + &derefs;
+ match mutbl_a {
+ hir::Mutability::Mut => {
+ replace_prefix(&src, "&mut ", &new_prefix).map(|_| {
+ let lo = sp.lo() + BytePos(1);
+ let hi = sp.lo() + BytePos(5);
+ let sp = sp.with_lo(lo).with_hi(hi);
+ (sp, derefs, Applicability::MachineApplicable)
+ })
+ }
+ hir::Mutability::Not => {
+ replace_prefix(&src, "&", &new_prefix).map(|_| {
+ let pos = sp.lo() + BytePos(1);
+ let sp = sp.with_lo(pos).with_hi(pos);
+ (sp, derefs, Applicability::MachineApplicable)
+ })
+ }
+ }
+ }
+ } {
+ return Some((
+ span,
+ "consider dereferencing".to_string(),
+ src,
+ applicability,
+ true,
+ false,
+ ));
+ }
+ }
+ }
+ _ if sp == expr.span => {
+ if let Some(mut steps) = self.deref_steps(checked_ty, expected) {
+ let mut expr = expr.peel_blocks();
+ let mut prefix_span = expr.span.shrink_to_lo();
+ let mut remove = String::new();
+
+ // Try peeling off any existing `&` and `&mut` to reach our target type
+ while steps > 0 {
+ if let hir::ExprKind::AddrOf(_, mutbl, inner) = expr.kind {
+ // If the expression has `&`, removing it would fix the error
+ prefix_span = prefix_span.with_hi(inner.span.lo());
+ expr = inner;
+ remove += match mutbl {
+ hir::Mutability::Not => "&",
+ hir::Mutability::Mut => "&mut ",
+ };
+ steps -= 1;
+ } else {
+ break;
+ }
+ }
+ // If we've reached our target type with just removing `&`, then just print now.
+ if steps == 0 {
+ return Some((
+ prefix_span,
+ format!("consider removing the `{}`", remove.trim()),
+ String::new(),
+ // Do not remove `&&` to get to bool, because it might be something like
+ // { a } && b, which we have a separate fixup suggestion that is more
+ // likely correct...
+ if remove.trim() == "&&" && expected == self.tcx.types.bool {
+ Applicability::MaybeIncorrect
+ } else {
+ Applicability::MachineApplicable
+ },
+ true,
+ false,
+ ));
+ }
+
+ // For this suggestion to make sense, the type would need to be `Copy`,
+ // or we have to be moving out of a `Box<T>`
+ if self.type_is_copy_modulo_regions(self.param_env, expected, sp)
+ // FIXME(compiler-errors): We can actually do this if the checked_ty is
+ // `steps` layers of boxes, not just one, but this is easier and most likely.
+ || (checked_ty.is_box() && steps == 1)
+ {
+ let deref_kind = if checked_ty.is_box() {
+ "unboxing the value"
+ } else if checked_ty.is_region_ptr() {
+ "dereferencing the borrow"
+ } else {
+ "dereferencing the type"
+ };
+
+ // Suggest removing `&` if we have removed any, otherwise suggest just
+ // dereferencing the remaining number of steps.
+ let message = if remove.is_empty() {
+ format!("consider {deref_kind}")
+ } else {
+ format!(
+ "consider removing the `{}` and {} instead",
+ remove.trim(),
+ deref_kind
+ )
+ };
+
+ let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!("{ident}: "),
+ None => String::new(),
+ };
+
+ let (span, suggestion) = if self.is_else_if_block(expr) {
+ // Don't suggest nonsense like `else *if`
+ return None;
+ } else if let Some(expr) = self.maybe_get_block_expr(expr) {
+ // prefix should be empty here..
+ (expr.span.shrink_to_lo(), "*".to_string())
+ } else {
+ (prefix_span, format!("{}{}", prefix, "*".repeat(steps)))
+ };
+
+ return Some((
+ span,
+ message,
+ suggestion,
+ Applicability::MachineApplicable,
+ true,
+ false,
+ ));
+ }
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ pub fn check_for_cast(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ checked_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> bool {
+ if self.tcx.sess.source_map().is_imported(expr.span) {
+ // Ignore if span is from within a macro.
+ return false;
+ }
+
+ let Ok(src) = self.tcx.sess.source_map().span_to_snippet(expr.span) else {
+ return false;
+ };
+
+ // If casting this expression to a given numeric type would be appropriate in case of a type
+ // mismatch.
+ //
+ // We want to minimize the amount of casting operations that are suggested, as it can be a
+ // lossy operation with potentially bad side effects, so we only suggest when encountering
+ // an expression that indicates that the original type couldn't be directly changed.
+ //
+ // For now, don't suggest casting with `as`.
+ let can_cast = false;
+
+ let mut sugg = vec![];
+
+ if let Some(hir::Node::ExprField(field)) =
+ self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ // `expr` is a literal field for a struct, only suggest if appropriate
+ if field.is_shorthand {
+ // This is a field literal
+ sugg.push((field.ident.span.shrink_to_lo(), format!("{}: ", field.ident)));
+ } else {
+ // Likely a field was meant, but this field wasn't found. Do not suggest anything.
+ return false;
+ }
+ };
+
+ if let hir::ExprKind::Call(path, args) = &expr.kind
+ && let (hir::ExprKind::Path(hir::QPath::TypeRelative(base_ty, path_segment)), 1) =
+ (&path.kind, args.len())
+ // `expr` is a conversion like `u32::from(val)`, do not suggest anything (#63697).
+ && let (hir::TyKind::Path(hir::QPath::Resolved(None, base_ty_path)), sym::from) =
+ (&base_ty.kind, path_segment.ident.name)
+ {
+ if let Some(ident) = &base_ty_path.segments.iter().map(|s| s.ident).next() {
+ match ident.name {
+ sym::i128
+ | sym::i64
+ | sym::i32
+ | sym::i16
+ | sym::i8
+ | sym::u128
+ | sym::u64
+ | sym::u32
+ | sym::u16
+ | sym::u8
+ | sym::isize
+ | sym::usize
+ if base_ty_path.segments.len() == 1 =>
+ {
+ return false;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ let msg = format!(
+ "you can convert {} `{}` to {} `{}`",
+ checked_ty.kind().article(),
+ checked_ty,
+ expected_ty.kind().article(),
+ expected_ty,
+ );
+ let cast_msg = format!(
+ "you can cast {} `{}` to {} `{}`",
+ checked_ty.kind().article(),
+ checked_ty,
+ expected_ty.kind().article(),
+ expected_ty,
+ );
+ let lit_msg = format!(
+ "change the type of the numeric literal from `{checked_ty}` to `{expected_ty}`",
+ );
+
+ let close_paren = if expr.precedence().order() < PREC_POSTFIX {
+ sugg.push((expr.span.shrink_to_lo(), "(".to_string()));
+ ")"
+ } else {
+ ""
+ };
+
+ let mut cast_suggestion = sugg.clone();
+ cast_suggestion.push((expr.span.shrink_to_hi(), format!("{close_paren} as {expected_ty}")));
+ let mut into_suggestion = sugg.clone();
+ into_suggestion.push((expr.span.shrink_to_hi(), format!("{close_paren}.into()")));
+ let mut suffix_suggestion = sugg.clone();
+ suffix_suggestion.push((
+ if matches!(
+ (&expected_ty.kind(), &checked_ty.kind()),
+ (ty::Int(_) | ty::Uint(_), ty::Float(_))
+ ) {
+ // Remove fractional part from literal, for example `42.0f32` into `42`
+ let src = src.trim_end_matches(&checked_ty.to_string());
+ let len = src.split('.').next().unwrap().len();
+ expr.span.with_lo(expr.span.lo() + BytePos(len as u32))
+ } else {
+ let len = src.trim_end_matches(&checked_ty.to_string()).len();
+ expr.span.with_lo(expr.span.lo() + BytePos(len as u32))
+ },
+ if expr.precedence().order() < PREC_POSTFIX {
+ // Readd `)`
+ format!("{expected_ty})")
+ } else {
+ expected_ty.to_string()
+ },
+ ));
+ let literal_is_ty_suffixed = |expr: &hir::Expr<'_>| {
+ if let hir::ExprKind::Lit(lit) = &expr.kind { lit.node.is_suffixed() } else { false }
+ };
+ let is_negative_int =
+ |expr: &hir::Expr<'_>| matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Neg, ..));
+ let is_uint = |ty: Ty<'_>| matches!(ty.kind(), ty::Uint(..));
+
+ let in_const_context = self.tcx.hir().is_inside_const_context(expr.hir_id);
+
+ let suggest_fallible_into_or_lhs_from =
+ |err: &mut Diagnostic, exp_to_found_is_fallible: bool| {
+ // If we know the expression the expected type is derived from, we might be able
+ // to suggest a widening conversion rather than a narrowing one (which may
+ // panic). For example, given x: u8 and y: u32, if we know the span of "x",
+ // x > y
+ // can be given the suggestion "u32::from(x) > y" rather than
+ // "x > y.try_into().unwrap()".
+ let lhs_expr_and_src = expected_ty_expr.and_then(|expr| {
+ self.tcx
+ .sess
+ .source_map()
+ .span_to_snippet(expr.span)
+ .ok()
+ .map(|src| (expr, src))
+ });
+ let (msg, suggestion) = if let (Some((lhs_expr, lhs_src)), false) =
+ (lhs_expr_and_src, exp_to_found_is_fallible)
+ {
+ let msg = format!(
+ "you can convert `{lhs_src}` from `{expected_ty}` to `{checked_ty}`, matching the type of `{src}`",
+ );
+ let suggestion = vec![
+ (lhs_expr.span.shrink_to_lo(), format!("{checked_ty}::from(")),
+ (lhs_expr.span.shrink_to_hi(), ")".to_string()),
+ ];
+ (msg, suggestion)
+ } else {
+ let msg = format!("{msg} and panic if the converted value doesn't fit");
+ let mut suggestion = sugg.clone();
+ suggestion.push((
+ expr.span.shrink_to_hi(),
+ format!("{close_paren}.try_into().unwrap()"),
+ ));
+ (msg, suggestion)
+ };
+ err.multipart_suggestion_verbose(
+ &msg,
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ };
+
+ let suggest_to_change_suffix_or_into =
+ |err: &mut Diagnostic,
+ found_to_exp_is_fallible: bool,
+ exp_to_found_is_fallible: bool| {
+ let exp_is_lhs =
+ expected_ty_expr.map(|e| self.tcx.hir().is_lhs(e.hir_id)).unwrap_or(false);
+
+ if exp_is_lhs {
+ return;
+ }
+
+ let always_fallible = found_to_exp_is_fallible
+ && (exp_to_found_is_fallible || expected_ty_expr.is_none());
+ let msg = if literal_is_ty_suffixed(expr) {
+ &lit_msg
+ } else if always_fallible && (is_negative_int(expr) && is_uint(expected_ty)) {
+ // We now know that converting either the lhs or rhs is fallible. Before we
+ // suggest a fallible conversion, check if the value can never fit in the
+ // expected type.
+ let msg = format!("`{src}` cannot fit into type `{expected_ty}`");
+ err.note(&msg);
+ return;
+ } else if in_const_context {
+ // Do not recommend `into` or `try_into` in const contexts.
+ return;
+ } else if found_to_exp_is_fallible {
+ return suggest_fallible_into_or_lhs_from(err, exp_to_found_is_fallible);
+ } else {
+ &msg
+ };
+ let suggestion = if literal_is_ty_suffixed(expr) {
+ suffix_suggestion.clone()
+ } else {
+ into_suggestion.clone()
+ };
+ err.multipart_suggestion_verbose(msg, suggestion, Applicability::MachineApplicable);
+ };
+
+ match (&expected_ty.kind(), &checked_ty.kind()) {
+ (&ty::Int(ref exp), &ty::Int(ref found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if exp < found => (true, false),
+ (Some(exp), Some(found)) if exp > found => (false, true),
+ (None, Some(8 | 16)) => (false, true),
+ (Some(8 | 16), None) => (true, false),
+ (None, _) | (_, None) => (true, true),
+ _ => (false, false),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Uint(ref exp), &ty::Uint(ref found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if exp < found => (true, false),
+ (Some(exp), Some(found)) if exp > found => (false, true),
+ (None, Some(8 | 16)) => (false, true),
+ (Some(8 | 16), None) => (true, false),
+ (None, _) | (_, None) => (true, true),
+ _ => (false, false),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Int(exp), &ty::Uint(found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if found < exp => (false, true),
+ (None, Some(8)) => (false, true),
+ _ => (true, true),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Uint(exp), &ty::Int(found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if found > exp => (true, false),
+ (Some(8), None) => (true, false),
+ _ => (true, true),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Float(ref exp), &ty::Float(ref found)) => {
+ if found.bit_width() < exp.bit_width() {
+ suggest_to_change_suffix_or_into(err, false, true);
+ } else if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if can_cast {
+ // Missing try_into implementation for `f64` to `f32`
+ err.multipart_suggestion_verbose(
+ &format!("{cast_msg}, producing the closest possible value"),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (&ty::Uint(_) | &ty::Int(_), &ty::Float(_)) => {
+ if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if can_cast {
+ // Missing try_into implementation for `{float}` to `{integer}`
+ err.multipart_suggestion_verbose(
+ &format!("{msg}, rounding the float towards zero"),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (&ty::Float(ref exp), &ty::Uint(ref found)) => {
+ // if `found` is `None` (meaning found is `usize`), don't suggest `.into()`
+ if exp.bit_width() > found.bit_width().unwrap_or(256) {
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{msg}, producing the floating point representation of the integer",
+ ),
+ into_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // Missing try_into implementation for `{integer}` to `{float}`
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{cast_msg}, producing the floating point representation of the integer, \
+ rounded if necessary",
+ ),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (&ty::Float(ref exp), &ty::Int(ref found)) => {
+ // if `found` is `None` (meaning found is `isize`), don't suggest `.into()`
+ if exp.bit_width() > found.bit_width().unwrap_or(256) {
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{}, producing the floating point representation of the integer",
+ &msg,
+ ),
+ into_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // Missing try_into implementation for `{integer}` to `{float}`
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{}, producing the floating point representation of the integer, \
+ rounded if necessary",
+ &msg,
+ ),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (
+ &ty::Uint(ty::UintTy::U32 | ty::UintTy::U64 | ty::UintTy::U128)
+ | &ty::Int(ty::IntTy::I32 | ty::IntTy::I64 | ty::IntTy::I128),
+ &ty::Char,
+ ) => {
+ err.multipart_suggestion_verbose(
+ &format!("{cast_msg}, since a `char` always occupies 4 bytes"),
+ cast_suggestion,
+ Applicability::MachineApplicable,
+ );
+ true
+ }
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/diverges.rs b/compiler/rustc_hir_typeck/src/diverges.rs
new file mode 100644
index 000000000..963a93a95
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/diverges.rs
@@ -0,0 +1,78 @@
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::{self, Span};
+use std::{cmp, ops};
+
+/// Tracks whether executing a node may exit normally (versus
+/// return/break/panic, which "diverge", leaving dead code in their
+/// wake). Tracked semi-automatically (through type variables marked
+/// as diverging), with some manual adjustments for control-flow
+/// primitives (approximating a CFG).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Diverges {
+ /// Potentially unknown, some cases converge,
+ /// others require a CFG to determine them.
+ Maybe,
+
+ /// Definitely known to diverge and therefore
+ /// not reach the next sibling or its parent.
+ Always {
+ /// The `Span` points to the expression
+ /// that caused us to diverge
+ /// (e.g. `return`, `break`, etc).
+ span: Span,
+ /// In some cases (e.g. a `match` expression
+ /// where all arms diverge), we may be
+ /// able to provide a more informative
+ /// message to the user.
+ /// If this is `None`, a default message
+ /// will be generated, which is suitable
+ /// for most cases.
+ custom_note: Option<&'static str>,
+ },
+
+ /// Same as `Always` but with a reachability
+ /// warning already emitted.
+ WarnedAlways,
+}
+
+// Convenience impls for combining `Diverges`.
+
+impl ops::BitAnd for Diverges {
+ type Output = Self;
+ fn bitand(self, other: Self) -> Self {
+ cmp::min(self, other)
+ }
+}
+
+impl ops::BitOr for Diverges {
+ type Output = Self;
+ fn bitor(self, other: Self) -> Self {
+ cmp::max(self, other)
+ }
+}
+
+impl ops::BitAndAssign for Diverges {
+ fn bitand_assign(&mut self, other: Self) {
+ *self = *self & other;
+ }
+}
+
+impl ops::BitOrAssign for Diverges {
+ fn bitor_assign(&mut self, other: Self) {
+ *self = *self | other;
+ }
+}
+
+impl Diverges {
+ /// Creates a `Diverges::Always` with the provided `span` and the default note message.
+ pub(super) fn always(span: Span) -> Diverges {
+ Diverges::Always { span, custom_note: None }
+ }
+
+ pub(super) fn is_always(self) -> bool {
+ // Enum comparison ignores the
+ // contents of fields, so we just
+ // fill them in with garbage here.
+ self >= Diverges::Always { span: DUMMY_SP, custom_note: None }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
new file mode 100644
index 000000000..175037f9b
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -0,0 +1,126 @@
+//! Errors emitted by `rustc_hir_analysis`.
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_middle::ty::Ty;
+use rustc_span::{symbol::Ident, Span};
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_field_multiply_specified_in_initializer, code = "E0062")]
+pub struct FieldMultiplySpecifiedInInitializer {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(previous_use_label)]
+ pub prev_span: Span,
+ pub ident: Ident,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_return_stmt_outside_of_fn_body, code = "E0572")]
+pub struct ReturnStmtOutsideOfFnBody {
+ #[primary_span]
+ pub span: Span,
+ #[label(encl_body_label)]
+ pub encl_body_span: Option<Span>,
+ #[label(encl_fn_label)]
+ pub encl_fn_span: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_yield_expr_outside_of_generator, code = "E0627")]
+pub struct YieldExprOutsideOfGenerator {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_struct_expr_non_exhaustive, code = "E0639")]
+pub struct StructExprNonExhaustive {
+ #[primary_span]
+ pub span: Span,
+ pub what: &'static str,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_method_call_on_unknown_type, code = "E0699")]
+pub struct MethodCallOnUnknownType {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_functional_record_update_on_non_struct, code = "E0436")]
+pub struct FunctionalRecordUpdateOnNonStruct {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_address_of_temporary_taken, code = "E0745")]
+pub struct AddressOfTemporaryTaken {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Subdiagnostic)]
+pub enum AddReturnTypeSuggestion {
+ #[suggestion(
+ hir_analysis_add_return_type_add,
+ code = "-> {found} ",
+ applicability = "machine-applicable"
+ )]
+ Add {
+ #[primary_span]
+ span: Span,
+ found: String,
+ },
+ #[suggestion(
+ hir_analysis_add_return_type_missing_here,
+ code = "-> _ ",
+ applicability = "has-placeholders"
+ )]
+ MissingHere {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Subdiagnostic)]
+pub enum ExpectedReturnTypeLabel<'tcx> {
+ #[label(hir_analysis_expected_default_return_type)]
+ Unit {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(hir_analysis_expected_return_type)]
+ Other {
+ #[primary_span]
+ span: Span,
+ expected: Ty<'tcx>,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_missing_parentheses_in_range, code = "E0689")]
+pub struct MissingParentheseInRange {
+ #[primary_span]
+ #[label(hir_analysis_missing_parentheses_in_range)]
+ pub span: Span,
+ pub ty_str: String,
+ pub method_name: String,
+ #[subdiagnostic]
+ pub add_missing_parentheses: Option<AddMissingParenthesesInRange>,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion_verbose(
+ hir_analysis_add_missing_parentheses_in_range,
+ applicability = "maybe-incorrect"
+)]
+pub struct AddMissingParenthesesInRange {
+ pub func_name: String,
+ #[suggestion_part(code = "(")]
+ pub left: Span,
+ #[suggestion_part(code = ")")]
+ pub right: Span,
+}
diff --git a/compiler/rustc_hir_typeck/src/expectation.rs b/compiler/rustc_hir_typeck/src/expectation.rs
new file mode 100644
index 000000000..e9e810344
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/expectation.rs
@@ -0,0 +1,122 @@
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{self, Span};
+
+use super::Expectation::*;
+use super::FnCtxt;
+
+/// When type-checking an expression, we propagate downward
+/// whatever type hint we are able in the form of an `Expectation`.
+#[derive(Copy, Clone, Debug)]
+pub enum Expectation<'tcx> {
+ /// We know nothing about what type this expression should have.
+ NoExpectation,
+
+ /// This expression should have the type given (or some subtype).
+ ExpectHasType(Ty<'tcx>),
+
+ /// This expression will be cast to the `Ty`.
+ ExpectCastableToType(Ty<'tcx>),
+
+ /// This rvalue expression will be wrapped in `&` or `Box` and coerced
+ /// to `&Ty` or `Box<Ty>`, respectively. `Ty` is `[A]` or `Trait`.
+ ExpectRvalueLikeUnsized(Ty<'tcx>),
+
+ IsLast(Span),
+}
+
+impl<'a, 'tcx> Expectation<'tcx> {
+ // Disregard "castable to" expectations because they
+ // can lead us astray. Consider for example `if cond
+ // {22} else {c} as u8` -- if we propagate the
+ // "castable to u8" constraint to 22, it will pick the
+ // type 22u8, which is overly constrained (c might not
+ // be a u8). In effect, the problem is that the
+ // "castable to" expectation is not the tightest thing
+ // we can say, so we want to drop it in this case.
+ // The tightest thing we can say is "must unify with
+ // else branch". Note that in the case of a "has type"
+ // constraint, this limitation does not hold.
+
+ // If the expected type is just a type variable, then don't use
+ // an expected type. Otherwise, we might write parts of the type
+ // when checking the 'then' block which are incompatible with the
+ // 'else' branch.
+ pub(super) fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
+ match *self {
+ ExpectHasType(ety) => {
+ let ety = fcx.shallow_resolve(ety);
+ if !ety.is_ty_var() { ExpectHasType(ety) } else { NoExpectation }
+ }
+ ExpectRvalueLikeUnsized(ety) => ExpectRvalueLikeUnsized(ety),
+ _ => NoExpectation,
+ }
+ }
+
+ /// Provides an expectation for an rvalue expression given an *optional*
+ /// hint, which is not required for type safety (the resulting type might
+ /// be checked higher up, as is the case with `&expr` and `box expr`), but
+ /// is useful in determining the concrete type.
+ ///
+ /// The primary use case is where the expected type is a fat pointer,
+ /// like `&[isize]`. For example, consider the following statement:
+ ///
+ /// let x: &[isize] = &[1, 2, 3];
+ ///
+ /// In this case, the expected type for the `&[1, 2, 3]` expression is
+ /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the
+ /// expectation `ExpectHasType([isize])`, that would be too strong --
+ /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`.
+ /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced
+ /// to the type `&[isize]`. Therefore, we propagate this more limited hint,
+ /// which still is useful, because it informs integer literals and the like.
+ /// See the test case `test/ui/coerce-expect-unsized.rs` and #20169
+ /// for examples of where this comes up,.
+ pub(super) fn rvalue_hint(fcx: &FnCtxt<'a, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
+ match fcx.tcx.struct_tail_without_normalization(ty).kind() {
+ ty::Slice(_) | ty::Str | ty::Dynamic(..) => ExpectRvalueLikeUnsized(ty),
+ _ => ExpectHasType(ty),
+ }
+ }
+
+ // Resolves `expected` by a single level if it is a variable. If
+ // there is no expected type or resolution is not possible (e.g.,
+ // no constraints yet present), just returns `self`.
+ fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
+ match self {
+ NoExpectation => NoExpectation,
+ ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(t)),
+ ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(t)),
+ ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(t)),
+ IsLast(sp) => IsLast(sp),
+ }
+ }
+
+ pub(super) fn to_option(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
+ match self.resolve(fcx) {
+ NoExpectation | IsLast(_) => None,
+ ExpectCastableToType(ty) | ExpectHasType(ty) | ExpectRvalueLikeUnsized(ty) => Some(ty),
+ }
+ }
+
+ /// It sometimes happens that we want to turn an expectation into
+ /// a **hard constraint** (i.e., something that must be satisfied
+ /// for the program to type-check). `only_has_type` will return
+ /// such a constraint, if it exists.
+ pub(super) fn only_has_type(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
+ match self {
+ ExpectHasType(ty) => Some(fcx.resolve_vars_if_possible(ty)),
+ NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) | IsLast(_) => {
+ None
+ }
+ }
+ }
+
+ /// Like `only_has_type`, but instead of returning `None` if no
+ /// hard constraint exists, creates a fresh type variable.
+ pub(super) fn coercion_target_type(self, fcx: &FnCtxt<'a, 'tcx>, span: Span) -> Ty<'tcx> {
+ self.only_has_type(fcx).unwrap_or_else(|| {
+ fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span })
+ })
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
new file mode 100644
index 000000000..9fde62a81
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -0,0 +1,2896 @@
+//! Type checking expressions.
+//!
+//! See `mod.rs` for more context on type checking in general.
+
+use crate::cast;
+use crate::coercion::CoerceMany;
+use crate::coercion::DynamicCoerceMany;
+use crate::errors::{AddressOfTemporaryTaken, ReturnStmtOutsideOfFnBody, StructExprNonExhaustive};
+use crate::errors::{
+ FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct,
+ YieldExprOutsideOfGenerator,
+};
+use crate::fatally_break_rust;
+use crate::method::SelfSource;
+use crate::type_error_struct;
+use crate::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation};
+use crate::{
+ report_unexpected_variant_res, BreakableCtxt, Diverges, FnCtxt, Needs,
+ TupleArgumentsFlag::DontTupleArguments,
+};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId,
+ ErrorGuaranteed, StashKey,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{Closure, ExprKind, HirId, QPath};
+use rustc_hir_analysis::astconv::AstConv as _;
+use rustc_hir_analysis::check::ty_kind_suggestion;
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::middle::stability;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
+use rustc_middle::ty::error::TypeError::FieldMisMatch;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, AdtKind, Ty, TypeVisitable};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_session::parse::feature_err;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::{Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_target::spec::abi::Abi::RustIntrinsic;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::{self, ObligationCauseCode};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn check_expr_eq_type(&self, expr: &'tcx hir::Expr<'tcx>, expected: Ty<'tcx>) {
+ let ty = self.check_expr_with_hint(expr, expected);
+ self.demand_eqtype(expr.span, expected, ty);
+ }
+
+ pub fn check_expr_has_type_or_error(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ extend_err: impl FnMut(&mut Diagnostic),
+ ) -> Ty<'tcx> {
+ self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected), extend_err)
+ }
+
+ fn check_expr_meets_expectation_or_error(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ mut extend_err: impl FnMut(&mut Diagnostic),
+ ) -> Ty<'tcx> {
+ let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
+ let mut ty = self.check_expr_with_expectation(expr, expected);
+
+ // While we don't allow *arbitrary* coercions here, we *do* allow
+ // coercions from ! to `expected`.
+ if ty.is_never() {
+ if let Some(adjustments) = self.typeck_results.borrow().adjustments().get(expr.hir_id) {
+ self.tcx().sess.delay_span_bug(
+ expr.span,
+ "expression with never type wound up being adjusted",
+ );
+ return if let [Adjustment { kind: Adjust::NeverToAny, target }] = &adjustments[..] {
+ target.to_owned()
+ } else {
+ self.tcx().ty_error()
+ };
+ }
+
+ let adj_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::AdjustmentType,
+ span: expr.span,
+ });
+ self.apply_adjustments(
+ expr,
+ vec![Adjustment { kind: Adjust::NeverToAny, target: adj_ty }],
+ );
+ ty = adj_ty;
+ }
+
+ if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) {
+ let expr = expr.peel_drop_temps();
+ self.suggest_deref_ref_or_into(&mut err, expr, expected_ty, ty, None);
+ extend_err(&mut err);
+ err.emit();
+ }
+ ty
+ }
+
+ pub(super) fn check_expr_coercable_to_type(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Ty<'tcx> {
+ let ty = self.check_expr_with_hint(expr, expected);
+ // checks don't need two phase
+ self.demand_coerce(expr, ty, expected, expected_ty_expr, AllowTwoPhase::No)
+ }
+
+ pub(super) fn check_expr_with_hint(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation(expr, ExpectHasType(expected))
+ }
+
+ fn check_expr_with_expectation_and_needs(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ needs: Needs,
+ ) -> Ty<'tcx> {
+ let ty = self.check_expr_with_expectation(expr, expected);
+
+ // If the expression is used in a place whether mutable place is required
+ // e.g. LHS of assignment, perform the conversion.
+ if let Needs::MutPlace = needs {
+ self.convert_place_derefs_to_mutable(expr);
+ }
+
+ ty
+ }
+
+ pub(super) fn check_expr(&self, expr: &'tcx hir::Expr<'tcx>) -> Ty<'tcx> {
+ self.check_expr_with_expectation(expr, NoExpectation)
+ }
+
+ pub(super) fn check_expr_with_needs(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ needs: Needs,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
+ }
+
+ /// Invariant:
+ /// If an expression has any sub-expressions that result in a type error,
+ /// inspecting that expression's type with `ty.references_error()` will return
+ /// true. Likewise, if an expression is known to diverge, inspecting its
+ /// type with `ty::type_is_bot` will return true (n.b.: since Rust is
+ /// strict, _|_ can appear in the type of an expression that does not,
+ /// itself, diverge: for example, fn() -> _|_.)
+ /// Note that inspecting a type's structure *directly* may expose the fact
+ /// that there are actually multiple representations for `Error`, so avoid
+ /// that when err needs to be handled differently.
+ #[instrument(skip(self, expr), level = "debug")]
+ pub(super) fn check_expr_with_expectation(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_args(expr, expected, &[])
+ }
+
+ /// Same as `check_expr_with_expectation`, but allows us to pass in the arguments of a
+ /// `ExprKind::Call` when evaluating its callee when it is an `ExprKind::Path`.
+ pub(super) fn check_expr_with_expectation_and_args(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Ty<'tcx> {
+ if self.tcx().sess.verbose() {
+ // make this code only run with -Zverbose because it is probably slow
+ if let Ok(lint_str) = self.tcx.sess.source_map().span_to_snippet(expr.span) {
+ if !lint_str.contains('\n') {
+ debug!("expr text: {lint_str}");
+ } else {
+ let mut lines = lint_str.lines();
+ if let Some(line0) = lines.next() {
+ let remaining_lines = lines.count();
+ debug!("expr text: {line0}");
+ debug!("expr text: ...(and {remaining_lines} more lines)");
+ }
+ }
+ }
+ }
+
+ // True if `expr` is a `Try::from_ok(())` that is a result of desugaring a try block
+ // without the final expr (e.g. `try { return; }`). We don't want to generate an
+ // unreachable_code lint for it since warnings for autogenerated code are confusing.
+ let is_try_block_generated_unit_expr = match expr.kind {
+ ExprKind::Call(_, args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {
+ args.len() == 1 && args[0].span.is_desugaring(DesugaringKind::TryBlock)
+ }
+
+ _ => false,
+ };
+
+ // Warn for expressions after diverging siblings.
+ if !is_try_block_generated_unit_expr {
+ self.warn_if_unreachable(expr.hir_id, expr.span, "expression");
+ }
+
+ // Hide the outer diverging and has_errors flags.
+ let old_diverges = self.diverges.replace(Diverges::Maybe);
+ let old_has_errors = self.has_errors.replace(false);
+
+ let ty = ensure_sufficient_stack(|| match &expr.kind {
+ hir::ExprKind::Path(
+ qpath @ hir::QPath::Resolved(..) | qpath @ hir::QPath::TypeRelative(..),
+ ) => self.check_expr_path(qpath, expr, args),
+ _ => self.check_expr_kind(expr, expected),
+ });
+
+ // Warn for non-block expressions with diverging children.
+ match expr.kind {
+ ExprKind::Block(..)
+ | ExprKind::If(..)
+ | ExprKind::Let(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Match(..) => {}
+ // If `expr` is a result of desugaring the try block and is an ok-wrapped
+ // diverging expression (e.g. it arose from desugaring of `try { return }`),
+ // we skip issuing a warning because it is autogenerated code.
+ ExprKind::Call(..) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {}
+ ExprKind::Call(callee, _) => self.warn_if_unreachable(expr.hir_id, callee.span, "call"),
+ ExprKind::MethodCall(segment, ..) => {
+ self.warn_if_unreachable(expr.hir_id, segment.ident.span, "call")
+ }
+ _ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression"),
+ }
+
+ // Any expression that produces a value of type `!` must have diverged
+ if ty.is_never() {
+ self.diverges.set(self.diverges.get() | Diverges::always(expr.span));
+ }
+
+ // Record the type, which applies it effects.
+ // We need to do this after the warning above, so that
+ // we don't warn for the diverging expression itself.
+ self.write_ty(expr.hir_id, ty);
+
+ // Combine the diverging and has_error flags.
+ self.diverges.set(self.diverges.get() | old_diverges);
+ self.has_errors.set(self.has_errors.get() | old_has_errors);
+
+ debug!("type of {} is...", self.tcx.hir().node_to_string(expr.hir_id));
+ debug!("... {:?}, expected is {:?}", ty, expected);
+
+ ty
+ }
+
+ #[instrument(skip(self, expr), level = "debug")]
+ fn check_expr_kind(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ trace!("expr={:#?}", expr);
+
+ let tcx = self.tcx;
+ match expr.kind {
+ ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected),
+ ExprKind::Lit(ref lit) => self.check_lit(&lit, expected),
+ ExprKind::Binary(op, lhs, rhs) => self.check_binop(expr, op, lhs, rhs, expected),
+ ExprKind::Assign(lhs, rhs, span) => {
+ self.check_expr_assign(expr, expected, lhs, rhs, span)
+ }
+ ExprKind::AssignOp(op, lhs, rhs) => {
+ self.check_binop_assign(expr, op, lhs, rhs, expected)
+ }
+ ExprKind::Unary(unop, oprnd) => self.check_expr_unary(unop, oprnd, expected, expr),
+ ExprKind::AddrOf(kind, mutbl, oprnd) => {
+ self.check_expr_addr_of(kind, mutbl, oprnd, expected, expr)
+ }
+ ExprKind::Path(QPath::LangItem(lang_item, _, hir_id)) => {
+ self.check_lang_item_path(lang_item, expr, hir_id)
+ }
+ ExprKind::Path(ref qpath) => self.check_expr_path(qpath, expr, &[]),
+ ExprKind::InlineAsm(asm) => {
+ // We defer some asm checks as we may not have resolved the input and output types yet (they may still be infer vars).
+ self.deferred_asm_checks.borrow_mut().push((asm, expr.hir_id));
+ self.check_expr_asm(asm)
+ }
+ ExprKind::Break(destination, ref expr_opt) => {
+ self.check_expr_break(destination, expr_opt.as_deref(), expr)
+ }
+ ExprKind::Continue(destination) => {
+ if destination.target_id.is_ok() {
+ tcx.types.never
+ } else {
+ // There was an error; make type-check fail.
+ tcx.ty_error()
+ }
+ }
+ ExprKind::Ret(ref expr_opt) => self.check_expr_return(expr_opt.as_deref(), expr),
+ ExprKind::Let(let_expr) => self.check_expr_let(let_expr),
+ ExprKind::Loop(body, _, source, _) => {
+ self.check_expr_loop(body, source, expected, expr)
+ }
+ ExprKind::Match(discrim, arms, match_src) => {
+ self.check_match(expr, &discrim, arms, expected, match_src)
+ }
+ ExprKind::Closure(&Closure { capture_clause, fn_decl, body, movability, .. }) => {
+ self.check_expr_closure(expr, capture_clause, &fn_decl, body, movability, expected)
+ }
+ ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected),
+ ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected),
+ ExprKind::MethodCall(segment, receiver, args, _) => {
+ self.check_method_call(expr, segment, receiver, args, expected)
+ }
+ ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr),
+ ExprKind::Type(e, t) => {
+ let ty = self.to_ty_saving_user_provided_ty(&t);
+ self.check_expr_eq_type(&e, ty);
+ ty
+ }
+ ExprKind::If(cond, then_expr, opt_else_expr) => {
+ self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected)
+ }
+ ExprKind::DropTemps(e) => self.check_expr_with_expectation(e, expected),
+ ExprKind::Array(args) => self.check_expr_array(args, expected, expr),
+ ExprKind::ConstBlock(ref anon_const) => {
+ self.check_expr_const_block(anon_const, expected, expr)
+ }
+ ExprKind::Repeat(element, ref count) => {
+ self.check_expr_repeat(element, count, expected, expr)
+ }
+ ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr),
+ ExprKind::Struct(qpath, fields, ref base_expr) => {
+ self.check_expr_struct(expr, expected, qpath, fields, base_expr)
+ }
+ ExprKind::Field(base, field) => self.check_field(expr, &base, field),
+ ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr),
+ ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src),
+ hir::ExprKind::Err => tcx.ty_error(),
+ }
+ }
+
+ fn check_expr_box(&self, expr: &'tcx hir::Expr<'tcx>, expected: Expectation<'tcx>) -> Ty<'tcx> {
+ let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| match ty.kind() {
+ ty::Adt(def, _) if def.is_box() => Expectation::rvalue_hint(self, ty.boxed_ty()),
+ _ => NoExpectation,
+ });
+ let referent_ty = self.check_expr_with_expectation(expr, expected_inner);
+ self.require_type_is_sized(referent_ty, expr.span, traits::SizedBoxType);
+ self.tcx.mk_box(referent_ty)
+ }
+
+ fn check_expr_unary(
+ &self,
+ unop: hir::UnOp,
+ oprnd: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let expected_inner = match unop {
+ hir::UnOp::Not | hir::UnOp::Neg => expected,
+ hir::UnOp::Deref => NoExpectation,
+ };
+ let mut oprnd_t = self.check_expr_with_expectation(&oprnd, expected_inner);
+
+ if !oprnd_t.references_error() {
+ oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
+ match unop {
+ hir::UnOp::Deref => {
+ if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) {
+ oprnd_t = ty;
+ } else {
+ let mut err = type_error_struct!(
+ tcx.sess,
+ expr.span,
+ oprnd_t,
+ E0614,
+ "type `{oprnd_t}` cannot be dereferenced",
+ );
+ let sp = tcx.sess.source_map().start_point(expr.span);
+ if let Some(sp) =
+ tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
+ {
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ }
+ err.emit();
+ oprnd_t = tcx.ty_error();
+ }
+ }
+ hir::UnOp::Not => {
+ let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
+ // If it's builtin, we can reuse the type, this helps inference.
+ if !(oprnd_t.is_integral() || *oprnd_t.kind() == ty::Bool) {
+ oprnd_t = result;
+ }
+ }
+ hir::UnOp::Neg => {
+ let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
+ // If it's builtin, we can reuse the type, this helps inference.
+ if !oprnd_t.is_numeric() {
+ oprnd_t = result;
+ }
+ }
+ }
+ }
+ oprnd_t
+ }
+
+ fn check_expr_addr_of(
+ &self,
+ kind: hir::BorrowKind,
+ mutbl: hir::Mutability,
+ oprnd: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
+ match ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ if oprnd.is_syntactic_place_expr() {
+ // Places may legitimately have unsized types.
+ // For example, dereferences of a fat pointer and
+ // the last field of a struct can be unsized.
+ ExpectHasType(*ty)
+ } else {
+ Expectation::rvalue_hint(self, *ty)
+ }
+ }
+ _ => NoExpectation,
+ }
+ });
+ let ty =
+ self.check_expr_with_expectation_and_needs(&oprnd, hint, Needs::maybe_mut_place(mutbl));
+
+ let tm = ty::TypeAndMut { ty, mutbl };
+ match kind {
+ _ if tm.ty.references_error() => self.tcx.ty_error(),
+ hir::BorrowKind::Raw => {
+ self.check_named_place_expr(oprnd);
+ self.tcx.mk_ptr(tm)
+ }
+ hir::BorrowKind::Ref => {
+ // Note: at this point, we cannot say what the best lifetime
+ // is to use for resulting pointer. We want to use the
+ // shortest lifetime possible so as to avoid spurious borrowck
+ // errors. Moreover, the longest lifetime will depend on the
+ // precise details of the value whose address is being taken
+ // (and how long it is valid), which we don't know yet until
+ // type inference is complete.
+ //
+ // Therefore, here we simply generate a region variable. The
+ // region inferencer will then select a suitable value.
+ // Finally, borrowck will infer the value of the region again,
+ // this time with enough precision to check that the value
+ // whose address was taken can actually be made to live as long
+ // as it needs to live.
+ let region = self.next_region_var(infer::AddrOfRegion(expr.span));
+ self.tcx.mk_ref(region, tm)
+ }
+ }
+ }
+
+ /// Does this expression refer to a place that either:
+ /// * Is based on a local or static.
+ /// * Contains a dereference
+ /// Note that the adjustments for the children of `expr` should already
+ /// have been resolved.
+ fn check_named_place_expr(&self, oprnd: &'tcx hir::Expr<'tcx>) {
+ let is_named = oprnd.is_place_expr(|base| {
+ // Allow raw borrows if there are any deref adjustments.
+ //
+ // const VAL: (i32,) = (0,);
+ // const REF: &(i32,) = &(0,);
+ //
+ // &raw const VAL.0; // ERROR
+ // &raw const REF.0; // OK, same as &raw const (*REF).0;
+ //
+ // This is maybe too permissive, since it allows
+ // `let u = &raw const Box::new((1,)).0`, which creates an
+ // immediately dangling raw pointer.
+ self.typeck_results
+ .borrow()
+ .adjustments()
+ .get(base.hir_id)
+ .map_or(false, |x| x.iter().any(|adj| matches!(adj.kind, Adjust::Deref(_))))
+ });
+ if !is_named {
+ self.tcx.sess.emit_err(AddressOfTemporaryTaken { span: oprnd.span });
+ }
+ }
+
+ fn check_lang_item_path(
+ &self,
+ lang_item: hir::LangItem,
+ expr: &'tcx hir::Expr<'tcx>,
+ hir_id: Option<hir::HirId>,
+ ) -> Ty<'tcx> {
+ self.resolve_lang_item_path(lang_item, expr.span, expr.hir_id, hir_id).1
+ }
+
+ pub(crate) fn check_expr_path(
+ &self,
+ qpath: &'tcx hir::QPath<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let (res, opt_ty, segs) =
+ self.resolve_ty_and_res_fully_qualified_call(qpath, expr.hir_id, expr.span);
+ let ty = match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ tcx.ty_error()
+ }
+ Res::Def(DefKind::Ctor(_, CtorKind::Fictive), _) => {
+ report_unexpected_variant_res(tcx, res, qpath, expr.span);
+ tcx.ty_error()
+ }
+ _ => self.instantiate_value_path(segs, opt_ty, res, expr.span, expr.hir_id).0,
+ };
+
+ if let ty::FnDef(did, ..) = *ty.kind() {
+ let fn_sig = ty.fn_sig(tcx);
+ if tcx.fn_sig(did).abi() == RustIntrinsic && tcx.item_name(did) == sym::transmute {
+ let from = fn_sig.inputs().skip_binder()[0];
+ let to = fn_sig.output().skip_binder();
+ // We defer the transmute to the end of typeck, once all inference vars have
+ // been resolved or we errored. This is important as we can only check transmute
+ // on concrete types, but the output type may not be known yet (it would only
+ // be known if explicitly specified via turbofish).
+ self.deferred_transmute_checks.borrow_mut().push((from, to, expr.hir_id));
+ }
+ if !tcx.features().unsized_fn_params {
+ // We want to remove some Sized bounds from std functions,
+ // but don't want to expose the removal to stable Rust.
+ // i.e., we don't want to allow
+ //
+ // ```rust
+ // drop as fn(str);
+ // ```
+ //
+ // to work in stable even if the Sized bound on `drop` is relaxed.
+ for i in 0..fn_sig.inputs().skip_binder().len() {
+ // We just want to check sizedness, so instead of introducing
+ // placeholder lifetimes with probing, we just replace higher lifetimes
+ // with fresh vars.
+ let span = args.get(i).map(|a| a.span).unwrap_or(expr.span);
+ let input = self.replace_bound_vars_with_fresh_vars(
+ span,
+ infer::LateBoundRegionConversionTime::FnCall,
+ fn_sig.input(i),
+ );
+ self.require_type_is_sized_deferred(
+ input,
+ span,
+ traits::SizedArgumentType(None),
+ );
+ }
+ }
+ // Here we want to prevent struct constructors from returning unsized types.
+ // There were two cases this happened: fn pointer coercion in stable
+ // and usual function call in presence of unsized_locals.
+ // Also, as we just want to check sizedness, instead of introducing
+ // placeholder lifetimes with probing, we just replace higher lifetimes
+ // with fresh vars.
+ let output = self.replace_bound_vars_with_fresh_vars(
+ expr.span,
+ infer::LateBoundRegionConversionTime::FnCall,
+ fn_sig.output(),
+ );
+ self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType);
+ }
+
+ // We always require that the type provided as the value for
+ // a type parameter outlives the moment of instantiation.
+ let substs = self.typeck_results.borrow().node_substs(expr.hir_id);
+ self.add_wf_bounds(substs, expr);
+
+ ty
+ }
+
+ fn check_expr_break(
+ &self,
+ destination: hir::Destination,
+ expr_opt: Option<&'tcx hir::Expr<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ if let Ok(target_id) = destination.target_id {
+ let (e_ty, cause);
+ if let Some(e) = expr_opt {
+ // If this is a break with a value, we need to type-check
+ // the expression. Get an expected type from the loop context.
+ let opt_coerce_to = {
+ // We should release `enclosing_breakables` before the `check_expr_with_hint`
+ // below, so can't move this block of code to the enclosing scope and share
+ // `ctxt` with the second `enclosing_breakables` borrow below.
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ match enclosing_breakables.opt_find_breakable(target_id) {
+ Some(ctxt) => ctxt.coerce.as_ref().map(|coerce| coerce.expected_ty()),
+ None => {
+ // Avoid ICE when `break` is inside a closure (#65383).
+ return tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+ }
+ }
+ };
+
+ // If the loop context is not a `loop { }`, then break with
+ // a value is illegal, and `opt_coerce_to` will be `None`.
+ // Just set expectation to error in that case.
+ let coerce_to = opt_coerce_to.unwrap_or_else(|| tcx.ty_error());
+
+ // Recurse without `enclosing_breakables` borrowed.
+ e_ty = self.check_expr_with_hint(e, coerce_to);
+ cause = self.misc(e.span);
+ } else {
+ // Otherwise, this is a break *without* a value. That's
+ // always legal, and is equivalent to `break ()`.
+ e_ty = tcx.mk_unit();
+ cause = self.misc(expr.span);
+ }
+
+ // Now that we have type-checked `expr_opt`, borrow
+ // the `enclosing_loops` field and let's coerce the
+ // type of `expr_opt` into what is expected.
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ let Some(ctxt) = enclosing_breakables.opt_find_breakable(target_id) else {
+ // Avoid ICE when `break` is inside a closure (#65383).
+ return tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+ };
+
+ if let Some(ref mut coerce) = ctxt.coerce {
+ if let Some(ref e) = expr_opt {
+ coerce.coerce(self, &cause, e, e_ty);
+ } else {
+ assert!(e_ty.is_unit());
+ let ty = coerce.expected_ty();
+ coerce.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |mut err| {
+ self.suggest_mismatched_types_on_tail(
+ &mut err, expr, ty, e_ty, target_id,
+ );
+ if let Some(val) = ty_kind_suggestion(ty) {
+ let label = destination
+ .label
+ .map(|l| format!(" {}", l.ident))
+ .unwrap_or_else(String::new);
+ err.span_suggestion(
+ expr.span,
+ "give it a value of the expected type",
+ format!("break{label} {val}"),
+ Applicability::HasPlaceholders,
+ );
+ }
+ },
+ false,
+ );
+ }
+ } else {
+ // If `ctxt.coerce` is `None`, we can just ignore
+ // the type of the expression. This is because
+ // either this was a break *without* a value, in
+ // which case it is always a legal type (`()`), or
+ // else an error would have been flagged by the
+ // `loops` pass for using break with an expression
+ // where you are not supposed to.
+ assert!(expr_opt.is_none() || self.tcx.sess.has_errors().is_some());
+ }
+
+ // If we encountered a `break`, then (no surprise) it may be possible to break from the
+ // loop... unless the value being returned from the loop diverges itself, e.g.
+ // `break return 5` or `break loop {}`.
+ ctxt.may_break |= !self.diverges.get().is_always();
+
+ // the type of a `break` is always `!`, since it diverges
+ tcx.types.never
+ } else {
+ // Otherwise, we failed to find the enclosing loop;
+ // this can only happen if the `break` was not
+ // inside a loop at all, which is caught by the
+ // loop-checking pass.
+ let err = self.tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+
+ // We still need to assign a type to the inner expression to
+ // prevent the ICE in #43162.
+ if let Some(e) = expr_opt {
+ self.check_expr_with_hint(e, err);
+
+ // ... except when we try to 'break rust;'.
+ // ICE this expression in particular (see #43162).
+ if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind {
+ if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust {
+ fatally_break_rust(self.tcx.sess);
+ }
+ }
+ }
+
+ // There was an error; make type-check fail.
+ err
+ }
+ }
+
+ fn check_expr_return(
+ &self,
+ expr_opt: Option<&'tcx hir::Expr<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ if self.ret_coercion.is_none() {
+ let mut err = ReturnStmtOutsideOfFnBody {
+ span: expr.span,
+ encl_body_span: None,
+ encl_fn_span: None,
+ };
+
+ let encl_item_id = self.tcx.hir().get_parent_item(expr.hir_id);
+
+ if let Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(..),
+ span: encl_fn_span,
+ ..
+ }))
+ | Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
+ span: encl_fn_span,
+ ..
+ }))
+ | Some(hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(..),
+ span: encl_fn_span,
+ ..
+ })) = self.tcx.hir().find_by_def_id(encl_item_id.def_id)
+ {
+ // We are inside a function body, so reporting "return statement
+ // outside of function body" needs an explanation.
+
+ let encl_body_owner_id = self.tcx.hir().enclosing_body_owner(expr.hir_id);
+
+ // If this didn't hold, we would not have to report an error in
+ // the first place.
+ assert_ne!(encl_item_id.def_id, encl_body_owner_id);
+
+ let encl_body_id = self.tcx.hir().body_owned_by(encl_body_owner_id);
+ let encl_body = self.tcx.hir().body(encl_body_id);
+
+ err.encl_body_span = Some(encl_body.value.span);
+ err.encl_fn_span = Some(*encl_fn_span);
+ }
+
+ self.tcx.sess.emit_err(err);
+
+ if let Some(e) = expr_opt {
+ // We still have to type-check `e` (issue #86188), but calling
+ // `check_return_expr` only works inside fn bodies.
+ self.check_expr(e);
+ }
+ } else if let Some(e) = expr_opt {
+ if self.ret_coercion_span.get().is_none() {
+ self.ret_coercion_span.set(Some(e.span));
+ }
+ self.check_return_expr(e, true);
+ } else {
+ let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut();
+ if self.ret_coercion_span.get().is_none() {
+ self.ret_coercion_span.set(Some(expr.span));
+ }
+ let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression);
+ if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) {
+ coercion.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |db| {
+ let span = fn_decl.output.span();
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ db.span_label(
+ span,
+ format!("expected `{snippet}` because of this return type"),
+ );
+ }
+ },
+ true,
+ );
+ } else {
+ coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
+ }
+ }
+ self.tcx.types.never
+ }
+
+ /// `explicit_return` is `true` if we're checking an explicit `return expr`,
+ /// and `false` if we're checking a trailing expression.
+ pub(super) fn check_return_expr(
+ &self,
+ return_expr: &'tcx hir::Expr<'tcx>,
+ explicit_return: bool,
+ ) {
+ let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
+ span_bug!(return_expr.span, "check_return_expr called outside fn body")
+ });
+
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty);
+ let mut span = return_expr.span;
+ // Use the span of the trailing expression for our cause,
+ // not the span of the entire function
+ if !explicit_return {
+ if let ExprKind::Block(body, _) = return_expr.kind && let Some(last_expr) = body.expr {
+ span = last_expr.span;
+ }
+ }
+ ret_coercion.borrow_mut().coerce(
+ self,
+ &self.cause(span, ObligationCauseCode::ReturnValue(return_expr.hir_id)),
+ return_expr,
+ return_expr_ty,
+ );
+
+ if self.return_type_has_opaque {
+ // Point any obligations that were registered due to opaque type
+ // inference at the return expression.
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_return_for_opaque_ty_error(errors, span, return_expr_ty);
+ });
+ }
+ }
+
+ fn point_at_return_for_opaque_ty_error(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ span: Span,
+ return_expr_ty: Ty<'tcx>,
+ ) {
+ // Don't point at the whole block if it's empty
+ if span == self.tcx.hir().span(self.body_id) {
+ return;
+ }
+ for err in errors {
+ let cause = &mut err.obligation.cause;
+ if let ObligationCauseCode::OpaqueReturnType(None) = cause.code() {
+ let new_cause = ObligationCause::new(
+ cause.span,
+ cause.body_id,
+ ObligationCauseCode::OpaqueReturnType(Some((return_expr_ty, span))),
+ );
+ *cause = new_cause;
+ }
+ }
+ }
+
+ pub(crate) fn check_lhs_assignable(
+ &self,
+ lhs: &'tcx hir::Expr<'tcx>,
+ err_code: &'static str,
+ op_span: Span,
+ adjust_err: impl FnOnce(&mut Diagnostic),
+ ) {
+ if lhs.is_syntactic_place_expr() {
+ return;
+ }
+
+ // FIXME: Make this use Diagnostic once error codes can be dynamically set.
+ let mut err = self.tcx.sess.struct_span_err_with_code(
+ op_span,
+ "invalid left-hand side of assignment",
+ DiagnosticId::Error(err_code.into()),
+ );
+ err.span_label(lhs.span, "cannot assign to this expression");
+
+ self.comes_from_while_condition(lhs.hir_id, |expr| {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "you might have meant to use pattern destructuring",
+ "let ",
+ Applicability::MachineApplicable,
+ );
+ });
+
+ adjust_err(&mut err);
+
+ err.emit();
+ }
+
+ // Check if an expression `original_expr_id` comes from the condition of a while loop,
+ // as opposed from the body of a while loop, which we can naively check by iterating
+ // parents until we find a loop...
+ pub(super) fn comes_from_while_condition(
+ &self,
+ original_expr_id: HirId,
+ then: impl FnOnce(&hir::Expr<'_>),
+ ) {
+ let mut parent = self.tcx.hir().get_parent_node(original_expr_id);
+ while let Some(node) = self.tcx.hir().find(parent) {
+ match node {
+ hir::Node::Expr(hir::Expr {
+ kind:
+ hir::ExprKind::Loop(
+ hir::Block {
+ expr:
+ Some(hir::Expr {
+ kind:
+ hir::ExprKind::Match(expr, ..) | hir::ExprKind::If(expr, ..),
+ ..
+ }),
+ ..
+ },
+ _,
+ hir::LoopSource::While,
+ _,
+ ),
+ ..
+ }) => {
+ // Check if our original expression is a child of the condition of a while loop
+ let expr_is_ancestor = std::iter::successors(Some(original_expr_id), |id| {
+ self.tcx.hir().find_parent_node(*id)
+ })
+ .take_while(|id| *id != parent)
+ .any(|id| id == expr.hir_id);
+ // if it is, then we have a situation like `while Some(0) = value.get(0) {`,
+ // where `while let` was more likely intended.
+ if expr_is_ancestor {
+ then(expr);
+ }
+ break;
+ }
+ hir::Node::Item(_)
+ | hir::Node::ImplItem(_)
+ | hir::Node::TraitItem(_)
+ | hir::Node::Crate(_) => break,
+ _ => {
+ parent = self.tcx.hir().get_parent_node(parent);
+ }
+ }
+ }
+ }
+
+ // A generic function for checking the 'then' and 'else' clauses in an 'if'
+ // or 'if-else' expression.
+ fn check_then_else(
+ &self,
+ cond_expr: &'tcx hir::Expr<'tcx>,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ opt_else_expr: Option<&'tcx hir::Expr<'tcx>>,
+ sp: Span,
+ orig_expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool, |_| {});
+
+ self.warn_if_unreachable(
+ cond_expr.hir_id,
+ then_expr.span,
+ "block in `if` or `while` expression",
+ );
+
+ let cond_diverges = self.diverges.get();
+ self.diverges.set(Diverges::Maybe);
+
+ let expected = orig_expected.adjust_for_branches(self);
+ let then_ty = self.check_expr_with_expectation(then_expr, expected);
+ let then_diverges = self.diverges.get();
+ self.diverges.set(Diverges::Maybe);
+
+ // We've already taken the expected type's preferences
+ // into account when typing the `then` branch. To figure
+ // out the initial shot at a LUB, we thus only consider
+ // `expected` if it represents a *hard* constraint
+ // (`only_has_type`); otherwise, we just go with a
+ // fresh type variable.
+ let coerce_to_ty = expected.coercion_target_type(self, sp);
+ let mut coerce: DynamicCoerceMany<'_> = CoerceMany::new(coerce_to_ty);
+
+ coerce.coerce(self, &self.misc(sp), then_expr, then_ty);
+
+ if let Some(else_expr) = opt_else_expr {
+ let else_ty = self.check_expr_with_expectation(else_expr, expected);
+ let else_diverges = self.diverges.get();
+
+ let opt_suggest_box_span = self.opt_suggest_box_span(then_ty, else_ty, orig_expected);
+ let if_cause = self.if_cause(
+ sp,
+ cond_expr.span,
+ then_expr,
+ else_expr,
+ then_ty,
+ else_ty,
+ opt_suggest_box_span,
+ );
+
+ coerce.coerce(self, &if_cause, else_expr, else_ty);
+
+ // We won't diverge unless both branches do (or the condition does).
+ self.diverges.set(cond_diverges | then_diverges & else_diverges);
+ } else {
+ self.if_fallback_coercion(sp, then_expr, &mut coerce);
+
+ // If the condition is false we can't diverge.
+ self.diverges.set(cond_diverges);
+ }
+
+ let result_ty = coerce.complete(self);
+ if cond_ty.references_error() { self.tcx.ty_error() } else { result_ty }
+ }
+
+ /// Type check assignment expression `expr` of form `lhs = rhs`.
+ /// The expected type is `()` and is passed to the function for the purposes of diagnostics.
+ fn check_expr_assign(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ span: Span,
+ ) -> Ty<'tcx> {
+ let expected_ty = expected.coercion_target_type(self, expr.span);
+ if expected_ty == self.tcx.types.bool {
+ // The expected type is `bool` but this will result in `()` so we can reasonably
+ // say that the user intended to write `lhs == rhs` instead of `lhs = rhs`.
+ // The likely cause of this is `if foo = bar { .. }`.
+ let actual_ty = self.tcx.mk_unit();
+ let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap();
+ let lhs_ty = self.check_expr(&lhs);
+ let rhs_ty = self.check_expr(&rhs);
+ let (applicability, eq) = if self.can_coerce(rhs_ty, lhs_ty) {
+ (Applicability::MachineApplicable, true)
+ } else if let ExprKind::Binary(
+ Spanned { node: hir::BinOpKind::And | hir::BinOpKind::Or, .. },
+ _,
+ rhs_expr,
+ ) = lhs.kind
+ {
+ // if x == 1 && y == 2 { .. }
+ // +
+ let actual_lhs_ty = self.check_expr(&rhs_expr);
+ (Applicability::MaybeIncorrect, self.can_coerce(rhs_ty, actual_lhs_ty))
+ } else if let ExprKind::Binary(
+ Spanned { node: hir::BinOpKind::And | hir::BinOpKind::Or, .. },
+ lhs_expr,
+ _,
+ ) = rhs.kind
+ {
+ // if x == 1 && y == 2 { .. }
+ // +
+ let actual_rhs_ty = self.check_expr(&lhs_expr);
+ (Applicability::MaybeIncorrect, self.can_coerce(actual_rhs_ty, lhs_ty))
+ } else {
+ (Applicability::MaybeIncorrect, false)
+ };
+ if !lhs.is_syntactic_place_expr()
+ && lhs.is_approximately_pattern()
+ && !matches!(lhs.kind, hir::ExprKind::Lit(_))
+ {
+ // Do not suggest `if let x = y` as `==` is way more likely to be the intention.
+ let hir = self.tcx.hir();
+ if let hir::Node::Expr(hir::Expr { kind: ExprKind::If { .. }, .. }) =
+ hir.get(hir.get_parent_node(hir.get_parent_node(expr.hir_id)))
+ {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "you might have meant to use pattern matching",
+ "let ",
+ applicability,
+ );
+ };
+ }
+ if eq {
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "you might have meant to compare for equality",
+ '=',
+ applicability,
+ );
+ }
+
+ // If the assignment expression itself is ill-formed, don't
+ // bother emitting another error
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ err.delay_as_bug()
+ } else {
+ err.emit();
+ }
+ return self.tcx.ty_error();
+ }
+
+ let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
+
+ let suggest_deref_binop = |err: &mut Diagnostic, rhs_ty: Ty<'tcx>| {
+ if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ // Can only assign if the type is sized, so if `DerefMut` yields a type that is
+ // unsized, do not suggest dereferencing it.
+ let lhs_deref_ty_is_sized = self
+ .infcx
+ .type_implements_trait(
+ self.tcx.lang_items().sized_trait().unwrap(),
+ lhs_deref_ty,
+ ty::List::empty(),
+ self.param_env,
+ )
+ .may_apply();
+ if lhs_deref_ty_is_sized && self.can_coerce(rhs_ty, lhs_deref_ty) {
+ err.span_suggestion_verbose(
+ lhs.span.shrink_to_lo(),
+ "consider dereferencing here to assign to the mutably borrowed value",
+ "*",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ };
+
+ // This is (basically) inlined `check_expr_coercable_to_type`, but we want
+ // to suggest an additional fixup here in `suggest_deref_binop`.
+ let rhs_ty = self.check_expr_with_hint(&rhs, lhs_ty);
+ if let (_, Some(mut diag)) =
+ self.demand_coerce_diag(rhs, rhs_ty, lhs_ty, Some(lhs), AllowTwoPhase::No)
+ {
+ suggest_deref_binop(&mut diag, rhs_ty);
+ diag.emit();
+ }
+
+ self.check_lhs_assignable(lhs, "E0070", span, |err| {
+ if let Some(rhs_ty) = self.typeck_results.borrow().expr_ty_opt(rhs) {
+ suggest_deref_binop(err, rhs_ty);
+ }
+ });
+
+ self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized);
+
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ self.tcx.ty_error()
+ } else {
+ self.tcx.mk_unit()
+ }
+ }
+
+ pub(super) fn check_expr_let(&self, let_expr: &'tcx hir::Let<'tcx>) -> Ty<'tcx> {
+ // for let statements, this is done in check_stmt
+ let init = let_expr.init;
+ self.warn_if_unreachable(init.hir_id, init.span, "block in `let` expression");
+ // otherwise check exactly as a let statement
+ self.check_decl(let_expr.into());
+ // but return a bool, for this is a boolean expression
+ self.tcx.types.bool
+ }
+
+ fn check_expr_loop(
+ &self,
+ body: &'tcx hir::Block<'tcx>,
+ source: hir::LoopSource,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let coerce = match source {
+ // you can only use break with a value from a normal `loop { }`
+ hir::LoopSource::Loop => {
+ let coerce_to = expected.coercion_target_type(self, body.span);
+ Some(CoerceMany::new(coerce_to))
+ }
+
+ hir::LoopSource::While | hir::LoopSource::ForLoop => None,
+ };
+
+ let ctxt = BreakableCtxt {
+ coerce,
+ may_break: false, // Will get updated if/when we find a `break`.
+ };
+
+ let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || {
+ self.check_block_no_value(&body);
+ });
+
+ if ctxt.may_break {
+ // No way to know whether it's diverging because
+ // of a `break` or an outer `break` or `return`.
+ self.diverges.set(Diverges::Maybe);
+ }
+
+ // If we permit break with a value, then result type is
+ // the LUB of the breaks (possibly ! if none); else, it
+ // is nil. This makes sense because infinite loops
+ // (which would have type !) are only possible iff we
+ // permit break with a value [1].
+ if ctxt.coerce.is_none() && !ctxt.may_break {
+ // [1]
+ self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break");
+ }
+ ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit())
+ }
+
+ /// Checks a method call.
+ fn check_method_call(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ rcvr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let rcvr_t = self.check_expr(&rcvr);
+ // no need to check for bot/err -- callee does that
+ let rcvr_t = self.structurally_resolved_type(rcvr.span, rcvr_t);
+ let span = segment.ident.span;
+
+ let method = match self.lookup_method(rcvr_t, segment, span, expr, rcvr, args) {
+ Ok(method) => {
+ // We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
+ // trigger this codepath causing `structurally_resolved_type` to emit an error.
+
+ self.write_method_call(expr.hir_id, method);
+ Ok(method)
+ }
+ Err(error) => {
+ if segment.ident.name != kw::Empty {
+ if let Some(mut err) = self.report_method_error(
+ span,
+ rcvr_t,
+ segment.ident,
+ SelfSource::MethodCall(rcvr),
+ error,
+ Some((rcvr, args)),
+ ) {
+ err.emit();
+ }
+ }
+ Err(())
+ }
+ };
+
+ // Call the generic checker.
+ self.check_method_argument_types(span, expr, method, &args, DontTupleArguments, expected)
+ }
+
+ fn check_expr_cast(
+ &self,
+ e: &'tcx hir::Expr<'tcx>,
+ t: &'tcx hir::Ty<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ // Find the type of `e`. Supply hints based on the type we are casting to,
+ // if appropriate.
+ let t_cast = self.to_ty_saving_user_provided_ty(t);
+ let t_cast = self.resolve_vars_if_possible(t_cast);
+ let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
+ let t_expr = self.resolve_vars_if_possible(t_expr);
+
+ // Eagerly check for some obvious errors.
+ if t_expr.references_error() || t_cast.references_error() {
+ self.tcx.ty_error()
+ } else {
+ // Defer other checks until we're done type checking.
+ let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
+ match cast::CastCheck::new(
+ self,
+ e,
+ t_expr,
+ t_cast,
+ t.span,
+ expr.span,
+ self.param_env.constness(),
+ ) {
+ Ok(cast_check) => {
+ debug!(
+ "check_expr_cast: deferring cast from {:?} to {:?}: {:?}",
+ t_cast, t_expr, cast_check,
+ );
+ deferred_cast_checks.push(cast_check);
+ t_cast
+ }
+ Err(_) => self.tcx.ty_error(),
+ }
+ }
+ }
+
+ fn check_expr_array(
+ &self,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let element_ty = if !args.is_empty() {
+ let coerce_to = expected
+ .to_option(self)
+ .and_then(|uty| match *uty.kind() {
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
+ _ => None,
+ })
+ .unwrap_or_else(|| {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: expr.span,
+ })
+ });
+ let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args);
+ assert_eq!(self.diverges.get(), Diverges::Maybe);
+ for e in args {
+ let e_ty = self.check_expr_with_hint(e, coerce_to);
+ let cause = self.misc(e.span);
+ coerce.coerce(self, &cause, e, e_ty);
+ }
+ coerce.complete(self)
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: expr.span,
+ })
+ };
+ let array_len = args.len() as u64;
+ self.suggest_array_len(expr, array_len);
+ self.tcx.mk_array(element_ty, array_len)
+ }
+
+ fn suggest_array_len(&self, expr: &'tcx hir::Expr<'tcx>, array_len: u64) {
+ let parent_node = self.tcx.hir().parent_iter(expr.hir_id).find(|(_, node)| {
+ !matches!(node, hir::Node::Expr(hir::Expr { kind: hir::ExprKind::AddrOf(..), .. }))
+ });
+ let Some((_,
+ hir::Node::Local(hir::Local { ty: Some(ty), .. })
+ | hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, _), .. }))
+ ) = parent_node else {
+ return
+ };
+ if let hir::TyKind::Array(_, length) = ty.peel_refs().kind
+ && let hir::ArrayLen::Body(hir::AnonConst { hir_id, .. }) = length
+ && let Some(span) = self.tcx.hir().opt_span(hir_id)
+ {
+ match self.tcx.sess.diagnostic().steal_diagnostic(span, StashKey::UnderscoreForArrayLengths) {
+ Some(mut err) => {
+ err.span_suggestion(
+ span,
+ "consider specifying the array length",
+ array_len,
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ }
+ None => ()
+ }
+ }
+ }
+
+ fn check_expr_const_block(
+ &self,
+ anon_const: &'tcx hir::AnonConst,
+ expected: Expectation<'tcx>,
+ _expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let body = self.tcx.hir().body(anon_const.body);
+
+ // Create a new function context.
+ let fcx = FnCtxt::new(self, self.param_env.with_const(), body.value.hir_id);
+ crate::GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ let ty = fcx.check_expr_with_expectation(&body.value, expected);
+ fcx.require_type_is_sized(ty, body.value.span, traits::ConstSized);
+ fcx.write_ty(anon_const.hir_id, ty);
+ ty
+ }
+
+ fn check_expr_repeat(
+ &self,
+ element: &'tcx hir::Expr<'tcx>,
+ count: &'tcx hir::ArrayLen,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let count = self.array_length_to_const(count);
+ if let Some(count) = count.try_eval_usize(tcx, self.param_env) {
+ self.suggest_array_len(expr, count);
+ }
+
+ let uty = match expected {
+ ExpectHasType(uty) => match *uty.kind() {
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let (element_ty, t) = match uty {
+ Some(uty) => {
+ self.check_expr_coercable_to_type(&element, uty, None);
+ (uty, uty)
+ }
+ None => {
+ let ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: element.span,
+ });
+ let element_ty = self.check_expr_has_type_or_error(&element, ty, |_| {});
+ (element_ty, ty)
+ }
+ };
+
+ if element_ty.references_error() {
+ return tcx.ty_error();
+ }
+
+ self.check_repeat_element_needs_copy_bound(element, count, element_ty);
+
+ tcx.mk_ty(ty::Array(t, count))
+ }
+
+ fn check_repeat_element_needs_copy_bound(
+ &self,
+ element: &hir::Expr<'_>,
+ count: ty::Const<'tcx>,
+ element_ty: Ty<'tcx>,
+ ) {
+ let tcx = self.tcx;
+ // Actual constants as the repeat element get inserted repeatedly instead of getting copied via Copy.
+ match &element.kind {
+ hir::ExprKind::ConstBlock(..) => return,
+ hir::ExprKind::Path(qpath) => {
+ let res = self.typeck_results.borrow().qpath_res(qpath, element.hir_id);
+ if let Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::AnonConst, _) = res
+ {
+ return;
+ }
+ }
+ _ => {}
+ }
+ // If someone calls a const fn, they can extract that call out into a separate constant (or a const
+ // block in the future), so we check that to tell them that in the diagnostic. Does not affect typeck.
+ let is_const_fn = match element.kind {
+ hir::ExprKind::Call(func, _args) => match *self.node_ty(func.hir_id).kind() {
+ ty::FnDef(def_id, _) => tcx.is_const_fn(def_id),
+ _ => false,
+ },
+ _ => false,
+ };
+
+ // If the length is 0, we don't create any elements, so we don't copy any. If the length is 1, we
+ // don't copy that one element, we move it. Only check for Copy if the length is larger.
+ if count.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) {
+ let lang_item = self.tcx.require_lang_item(LangItem::Copy, None);
+ let code = traits::ObligationCauseCode::RepeatElementCopy { is_const_fn };
+ self.require_type_meets(element_ty, element.span, code, lang_item);
+ }
+ }
+
+ fn check_expr_tuple(
+ &self,
+ elts: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let flds = expected.only_has_type(self).and_then(|ty| {
+ let ty = self.resolve_vars_with_obligations(ty);
+ match ty.kind() {
+ ty::Tuple(flds) => Some(&flds[..]),
+ _ => None,
+ }
+ });
+
+ let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| match flds {
+ Some(fs) if i < fs.len() => {
+ let ety = fs[i];
+ self.check_expr_coercable_to_type(&e, ety, None);
+ ety
+ }
+ _ => self.check_expr_with_expectation(&e, NoExpectation),
+ });
+ let tuple = self.tcx.mk_tup(elt_ts_iter);
+ if tuple.references_error() {
+ self.tcx.ty_error()
+ } else {
+ self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized);
+ tuple
+ }
+ }
+
+ fn check_expr_struct(
+ &self,
+ expr: &hir::Expr<'_>,
+ expected: Expectation<'tcx>,
+ qpath: &QPath<'_>,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Ty<'tcx> {
+ // Find the relevant variant
+ let Some((variant, adt_ty)) = self.check_struct_path(qpath, expr.hir_id) else {
+ self.check_struct_fields_on_error(fields, base_expr);
+ return self.tcx.ty_error();
+ };
+
+ // Prohibit struct expressions when non-exhaustive flag is set.
+ let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
+ if !adt.did().is_local() && variant.is_field_list_non_exhaustive() {
+ self.tcx
+ .sess
+ .emit_err(StructExprNonExhaustive { span: expr.span, what: adt.variant_descr() });
+ }
+
+ self.check_expr_struct_fields(
+ adt_ty,
+ expected,
+ expr.hir_id,
+ qpath.span(),
+ variant,
+ fields,
+ base_expr,
+ expr.span,
+ );
+
+ self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
+ adt_ty
+ }
+
+ fn check_expr_struct_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ expected: Expectation<'tcx>,
+ expr_id: hir::HirId,
+ span: Span,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ expr_span: Span,
+ ) {
+ let tcx = self.tcx;
+
+ let expected_inputs =
+ self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty]);
+ let adt_ty_hint = if let Some(expected_inputs) = expected_inputs {
+ expected_inputs.get(0).cloned().unwrap_or(adt_ty)
+ } else {
+ adt_ty
+ };
+ // re-link the regions that EIfEO can erase.
+ self.demand_eqtype(span, adt_ty_hint, adt_ty);
+
+ let ty::Adt(adt, substs) = adt_ty.kind() else {
+ span_bug!(span, "non-ADT passed to check_expr_struct_fields");
+ };
+ let adt_kind = adt.adt_kind();
+
+ let mut remaining_fields = variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, field)| (field.ident(tcx).normalize_to_macros_2_0(), (i, field)))
+ .collect::<FxHashMap<_, _>>();
+
+ let mut seen_fields = FxHashMap::default();
+
+ let mut error_happened = false;
+
+ // Type-check each field.
+ for (idx, field) in ast_fields.iter().enumerate() {
+ let ident = tcx.adjust_ident(field.ident, variant.def_id);
+ let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) {
+ seen_fields.insert(ident, field.span);
+ self.write_field_index(field.hir_id, i);
+
+ // We don't look at stability attributes on
+ // struct-like enums (yet...), but it's definitely not
+ // a bug to have constructed one.
+ if adt_kind != AdtKind::Enum {
+ tcx.check_stability(v_field.did, Some(expr_id), field.span, None);
+ }
+
+ self.field_ty(field.span, v_field, substs)
+ } else {
+ error_happened = true;
+ if let Some(prev_span) = seen_fields.get(&ident) {
+ tcx.sess.emit_err(FieldMultiplySpecifiedInInitializer {
+ span: field.ident.span,
+ prev_span: *prev_span,
+ ident,
+ });
+ } else {
+ self.report_unknown_field(
+ adt_ty,
+ variant,
+ field,
+ ast_fields,
+ adt.variant_descr(),
+ expr_span,
+ );
+ }
+
+ tcx.ty_error()
+ };
+
+ // Make sure to give a type to the field even if there's
+ // an error, so we can continue type-checking.
+ let ty = self.check_expr_with_hint(&field.expr, field_type);
+ let (_, diag) =
+ self.demand_coerce_diag(&field.expr, ty, field_type, None, AllowTwoPhase::No);
+
+ if let Some(mut diag) = diag {
+ if idx == ast_fields.len() - 1 && remaining_fields.is_empty() {
+ self.suggest_fru_from_range(field, variant, substs, &mut diag);
+ }
+ diag.emit();
+ }
+ }
+
+ // Make sure the programmer specified correct number of fields.
+ if adt_kind == AdtKind::Union {
+ if ast_fields.len() != 1 {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0784,
+ "union expressions should have exactly one field",
+ )
+ .emit();
+ }
+ }
+
+ // If check_expr_struct_fields hit an error, do not attempt to populate
+ // the fields with the base_expr. This could cause us to hit errors later
+ // when certain fields are assumed to exist that in fact do not.
+ if error_happened {
+ return;
+ }
+
+ if let Some(base_expr) = base_expr {
+ // FIXME: We are currently creating two branches here in order to maintain
+ // consistency. But they should be merged as much as possible.
+ let fru_tys = if self.tcx.features().type_changing_struct_update {
+ if adt.is_struct() {
+ // Make some fresh substitutions for our ADT type.
+ let fresh_substs = self.fresh_substs_for_item(base_expr.span, adt.did());
+ // We do subtyping on the FRU fields first, so we can
+ // learn exactly what types we expect the base expr
+ // needs constrained to be compatible with the struct
+ // type we expect from the expectation value.
+ let fru_tys = variant
+ .fields
+ .iter()
+ .map(|f| {
+ let fru_ty = self.normalize_associated_types_in(
+ expr_span,
+ self.field_ty(base_expr.span, f, fresh_substs),
+ );
+ let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id);
+ if let Some(_) = remaining_fields.remove(&ident) {
+ let target_ty = self.field_ty(base_expr.span, f, substs);
+ let cause = self.misc(base_expr.span);
+ match self.at(&cause, self.param_env).sup(target_ty, fru_ty) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations)
+ }
+ Err(_) => {
+ // This should never happen, since we're just subtyping the
+ // remaining_fields, but it's fine to emit this, I guess.
+ self.err_ctxt()
+ .report_mismatched_types(
+ &cause,
+ target_ty,
+ fru_ty,
+ FieldMisMatch(variant.name, ident.name),
+ )
+ .emit();
+ }
+ }
+ }
+ self.resolve_vars_if_possible(fru_ty)
+ })
+ .collect();
+ // The use of fresh substs that we have subtyped against
+ // our base ADT type's fields allows us to guide inference
+ // along so that, e.g.
+ // ```
+ // MyStruct<'a, F1, F2, const C: usize> {
+ // f: F1,
+ // // Other fields that reference `'a`, `F2`, and `C`
+ // }
+ //
+ // let x = MyStruct {
+ // f: 1usize,
+ // ..other_struct
+ // };
+ // ```
+ // will have the `other_struct` expression constrained to
+ // `MyStruct<'a, _, F2, C>`, as opposed to just `_`...
+ // This is important to allow coercions to happen in
+ // `other_struct` itself. See `coerce-in-base-expr.rs`.
+ let fresh_base_ty = self.tcx.mk_adt(*adt, fresh_substs);
+ self.check_expr_has_type_or_error(
+ base_expr,
+ self.resolve_vars_if_possible(fresh_base_ty),
+ |_| {},
+ );
+ fru_tys
+ } else {
+ // Check the base_expr, regardless of a bad expected adt_ty, so we can get
+ // type errors on that expression, too.
+ self.check_expr(base_expr);
+ self.tcx
+ .sess
+ .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
+ return;
+ }
+ } else {
+ self.check_expr_has_type_or_error(base_expr, adt_ty, |_| {
+ let base_ty = self.typeck_results.borrow().expr_ty(*base_expr);
+ let same_adt = match (adt_ty.kind(), base_ty.kind()) {
+ (ty::Adt(adt, _), ty::Adt(base_adt, _)) if adt == base_adt => true,
+ _ => false,
+ };
+ if self.tcx.sess.is_nightly_build() && same_adt {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::type_changing_struct_update,
+ base_expr.span,
+ "type changing struct updating is experimental",
+ )
+ .emit();
+ }
+ });
+ match adt_ty.kind() {
+ ty::Adt(adt, substs) if adt.is_struct() => variant
+ .fields
+ .iter()
+ .map(|f| {
+ self.normalize_associated_types_in(expr_span, f.ty(self.tcx, substs))
+ })
+ .collect(),
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
+ return;
+ }
+ }
+ };
+ self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr_id, fru_tys);
+ } else if adt_kind != AdtKind::Union && !remaining_fields.is_empty() {
+ debug!(?remaining_fields);
+ let private_fields: Vec<&ty::FieldDef> = variant
+ .fields
+ .iter()
+ .filter(|field| !field.vis.is_accessible_from(tcx.parent_module(expr_id), tcx))
+ .collect();
+
+ if !private_fields.is_empty() {
+ self.report_private_fields(adt_ty, span, private_fields, ast_fields);
+ } else {
+ self.report_missing_fields(
+ adt_ty,
+ span,
+ remaining_fields,
+ variant,
+ ast_fields,
+ substs,
+ );
+ }
+ }
+ }
+
+ fn check_struct_fields_on_error(
+ &self,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ ) {
+ for field in fields {
+ self.check_expr(&field.expr);
+ }
+ if let Some(base) = *base_expr {
+ self.check_expr(&base);
+ }
+ }
+
+ /// Report an error for a struct field expression when there are fields which aren't provided.
+ ///
+ /// ```text
+ /// error: missing field `you_can_use_this_field` in initializer of `foo::Foo`
+ /// --> src/main.rs:8:5
+ /// |
+ /// 8 | foo::Foo {};
+ /// | ^^^^^^^^ missing `you_can_use_this_field`
+ ///
+ /// error: aborting due to previous error
+ /// ```
+ fn report_missing_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ remaining_fields: FxHashMap<Ident, (usize, &ty::FieldDef)>,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ substs: SubstsRef<'tcx>,
+ ) {
+ let len = remaining_fields.len();
+
+ let mut displayable_field_names: Vec<&str> =
+ remaining_fields.keys().map(|ident| ident.as_str()).collect();
+ // sorting &str primitives here, sort_unstable is ok
+ displayable_field_names.sort_unstable();
+
+ let mut truncated_fields_error = String::new();
+ let remaining_fields_names = match &displayable_field_names[..] {
+ [field1] => format!("`{}`", field1),
+ [field1, field2] => format!("`{field1}` and `{field2}`"),
+ [field1, field2, field3] => format!("`{field1}`, `{field2}` and `{field3}`"),
+ _ => {
+ truncated_fields_error =
+ format!(" and {} other field{}", len - 3, pluralize!(len - 3));
+ displayable_field_names
+ .iter()
+ .take(3)
+ .map(|n| format!("`{n}`"))
+ .collect::<Vec<_>>()
+ .join(", ")
+ }
+ };
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0063,
+ "missing field{} {}{} in initializer of `{}`",
+ pluralize!(len),
+ remaining_fields_names,
+ truncated_fields_error,
+ adt_ty
+ );
+ err.span_label(span, format!("missing {remaining_fields_names}{truncated_fields_error}"));
+
+ if let Some(last) = ast_fields.last() {
+ self.suggest_fru_from_range(last, variant, substs, &mut err);
+ }
+
+ err.emit();
+ }
+
+ /// If the last field is a range literal, but it isn't supposed to be, then they probably
+ /// meant to use functional update syntax.
+ fn suggest_fru_from_range(
+ &self,
+ last_expr_field: &hir::ExprField<'tcx>,
+ variant: &ty::VariantDef,
+ substs: SubstsRef<'tcx>,
+ err: &mut Diagnostic,
+ ) {
+ // I don't use 'is_range_literal' because only double-sided, half-open ranges count.
+ if let ExprKind::Struct(
+ QPath::LangItem(LangItem::Range, ..),
+ &[ref range_start, ref range_end],
+ _,
+ ) = last_expr_field.expr.kind
+ && let variant_field =
+ variant.fields.iter().find(|field| field.ident(self.tcx) == last_expr_field.ident)
+ && let range_def_id = self.tcx.lang_items().range_struct()
+ && variant_field
+ .and_then(|field| field.ty(self.tcx, substs).ty_adt_def())
+ .map(|adt| adt.did())
+ != range_def_id
+ {
+ let instead = self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(range_end.expr.span)
+ .map(|s| format!(" from `{s}`"))
+ .unwrap_or_default();
+ err.span_suggestion(
+ range_start.span.shrink_to_hi(),
+ &format!("to set the remaining fields{instead}, separate the last named field with a comma"),
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ /// Report an error for a struct field expression when there are invisible fields.
+ ///
+ /// ```text
+ /// error: cannot construct `Foo` with struct literal syntax due to private fields
+ /// --> src/main.rs:8:5
+ /// |
+ /// 8 | foo::Foo {};
+ /// | ^^^^^^^^
+ ///
+ /// error: aborting due to previous error
+ /// ```
+ fn report_private_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ private_fields: Vec<&ty::FieldDef>,
+ used_fields: &'tcx [hir::ExprField<'tcx>],
+ ) {
+ let mut err = self.tcx.sess.struct_span_err(
+ span,
+ &format!(
+ "cannot construct `{adt_ty}` with struct literal syntax due to private fields",
+ ),
+ );
+ let (used_private_fields, remaining_private_fields): (
+ Vec<(Symbol, Span, bool)>,
+ Vec<(Symbol, Span, bool)>,
+ ) = private_fields
+ .iter()
+ .map(|field| {
+ match used_fields.iter().find(|used_field| field.name == used_field.ident.name) {
+ Some(used_field) => (field.name, used_field.span, true),
+ None => (field.name, self.tcx.def_span(field.did), false),
+ }
+ })
+ .partition(|field| field.2);
+ err.span_labels(used_private_fields.iter().map(|(_, span, _)| *span), "private field");
+ if !remaining_private_fields.is_empty() {
+ let remaining_private_fields_len = remaining_private_fields.len();
+ let names = match &remaining_private_fields
+ .iter()
+ .map(|(name, _, _)| name)
+ .collect::<Vec<_>>()[..]
+ {
+ _ if remaining_private_fields_len > 6 => String::new(),
+ [name] => format!("`{name}` "),
+ [names @ .., last] => {
+ let names = names.iter().map(|name| format!("`{name}`")).collect::<Vec<_>>();
+ format!("{} and `{last}` ", names.join(", "))
+ }
+ [] => unreachable!(),
+ };
+ err.note(format!(
+ "... and other private field{s} {names}that {were} not provided",
+ s = pluralize!(remaining_private_fields_len),
+ were = pluralize!("was", remaining_private_fields_len),
+ ));
+ }
+ err.emit();
+ }
+
+ fn report_unknown_field(
+ &self,
+ ty: Ty<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ field: &hir::ExprField<'_>,
+ skip_fields: &[hir::ExprField<'_>],
+ kind_name: &str,
+ expr_span: Span,
+ ) {
+ if variant.is_recovered() {
+ self.set_tainted_by_errors();
+ return;
+ }
+ let mut err = self.err_ctxt().type_error_struct_with_diag(
+ field.ident.span,
+ |actual| match ty.kind() {
+ ty::Adt(adt, ..) if adt.is_enum() => struct_span_err!(
+ self.tcx.sess,
+ field.ident.span,
+ E0559,
+ "{} `{}::{}` has no field named `{}`",
+ kind_name,
+ actual,
+ variant.name,
+ field.ident
+ ),
+ _ => struct_span_err!(
+ self.tcx.sess,
+ field.ident.span,
+ E0560,
+ "{} `{}` has no field named `{}`",
+ kind_name,
+ actual,
+ field.ident
+ ),
+ },
+ ty,
+ );
+
+ let variant_ident_span = self.tcx.def_ident_span(variant.def_id).unwrap();
+ match variant.ctor_kind {
+ CtorKind::Fn => match ty.kind() {
+ ty::Adt(adt, ..) if adt.is_enum() => {
+ err.span_label(
+ variant_ident_span,
+ format!(
+ "`{adt}::{variant}` defined here",
+ adt = ty,
+ variant = variant.name,
+ ),
+ );
+ err.span_label(field.ident.span, "field does not exist");
+ err.span_suggestion_verbose(
+ expr_span,
+ &format!(
+ "`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax",
+ adt = ty,
+ variant = variant.name,
+ ),
+ format!(
+ "{adt}::{variant}(/* fields */)",
+ adt = ty,
+ variant = variant.name,
+ ),
+ Applicability::HasPlaceholders,
+ );
+ }
+ _ => {
+ err.span_label(variant_ident_span, format!("`{adt}` defined here", adt = ty));
+ err.span_label(field.ident.span, "field does not exist");
+ err.span_suggestion_verbose(
+ expr_span,
+ &format!(
+ "`{adt}` is a tuple {kind_name}, use the appropriate syntax",
+ adt = ty,
+ kind_name = kind_name,
+ ),
+ format!("{adt}(/* fields */)", adt = ty),
+ Applicability::HasPlaceholders,
+ );
+ }
+ },
+ _ => {
+ // prevent all specified fields from being suggested
+ let skip_fields = skip_fields.iter().map(|x| x.ident.name);
+ if let Some(field_name) = self.suggest_field_name(
+ variant,
+ field.ident.name,
+ skip_fields.collect(),
+ expr_span,
+ ) {
+ err.span_suggestion(
+ field.ident.span,
+ "a field with a similar name exists",
+ field_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ match ty.kind() {
+ ty::Adt(adt, ..) => {
+ if adt.is_enum() {
+ err.span_label(
+ field.ident.span,
+ format!("`{}::{}` does not have this field", ty, variant.name),
+ );
+ } else {
+ err.span_label(
+ field.ident.span,
+ format!("`{ty}` does not have this field"),
+ );
+ }
+ let available_field_names =
+ self.available_field_names(variant, expr_span);
+ if !available_field_names.is_empty() {
+ err.note(&format!(
+ "available fields are: {}",
+ self.name_series_display(available_field_names)
+ ));
+ }
+ }
+ _ => bug!("non-ADT passed to report_unknown_field"),
+ }
+ };
+ }
+ }
+ err.emit();
+ }
+
+ // Return a hint about the closest match in field names
+ fn suggest_field_name(
+ &self,
+ variant: &'tcx ty::VariantDef,
+ field: Symbol,
+ skip: Vec<Symbol>,
+ // The span where stability will be checked
+ span: Span,
+ ) -> Option<Symbol> {
+ let names = variant
+ .fields
+ .iter()
+ .filter_map(|field| {
+ // ignore already set fields and private fields from non-local crates
+ // and unstable fields.
+ if skip.iter().any(|&x| x == field.name)
+ || (!variant.def_id.is_local() && !field.vis.is_public())
+ || matches!(
+ self.tcx.eval_stability(field.did, None, span, None),
+ stability::EvalResult::Deny { .. }
+ )
+ {
+ None
+ } else {
+ Some(field.name)
+ }
+ })
+ .collect::<Vec<Symbol>>();
+
+ find_best_match_for_name(&names, field, None)
+ }
+
+ fn available_field_names(
+ &self,
+ variant: &'tcx ty::VariantDef,
+ access_span: Span,
+ ) -> Vec<Symbol> {
+ variant
+ .fields
+ .iter()
+ .filter(|field| {
+ let def_scope = self
+ .tcx
+ .adjust_ident_and_get_scope(field.ident(self.tcx), variant.def_id, self.body_id)
+ .1;
+ field.vis.is_accessible_from(def_scope, self.tcx)
+ && !matches!(
+ self.tcx.eval_stability(field.did, None, access_span, None),
+ stability::EvalResult::Deny { .. }
+ )
+ })
+ .filter(|field| !self.tcx.is_doc_hidden(field.did))
+ .map(|field| field.name)
+ .collect()
+ }
+
+ fn name_series_display(&self, names: Vec<Symbol>) -> String {
+ // dynamic limit, to never omit just one field
+ let limit = if names.len() == 6 { 6 } else { 5 };
+ let mut display =
+ names.iter().take(limit).map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
+ if names.len() > limit {
+ display = format!("{} ... and {} others", display, names.len() - limit);
+ }
+ display
+ }
+
+ // Check field access expressions
+ fn check_field(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ base: &'tcx hir::Expr<'tcx>,
+ field: Ident,
+ ) -> Ty<'tcx> {
+ debug!("check_field(expr: {:?}, base: {:?}, field: {:?})", expr, base, field);
+ let base_ty = self.check_expr(base);
+ let base_ty = self.structurally_resolved_type(base.span, base_ty);
+ let mut private_candidate = None;
+ let mut autoderef = self.autoderef(expr.span, base_ty);
+ while let Some((deref_base_ty, _)) = autoderef.next() {
+ debug!("deref_base_ty: {:?}", deref_base_ty);
+ match deref_base_ty.kind() {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ debug!("struct named {:?}", deref_base_ty);
+ let (ident, def_scope) =
+ self.tcx.adjust_ident_and_get_scope(field, base_def.did(), self.body_id);
+ let fields = &base_def.non_enum_variant().fields;
+ if let Some(index) = fields
+ .iter()
+ .position(|f| f.ident(self.tcx).normalize_to_macros_2_0() == ident)
+ {
+ let field = &fields[index];
+ let field_ty = self.field_ty(expr.span, field, substs);
+ // Save the index of all fields regardless of their visibility in case
+ // of error recovery.
+ self.write_field_index(expr.hir_id, index);
+ let adjustments = self.adjust_steps(&autoderef);
+ if field.vis.is_accessible_from(def_scope, self.tcx) {
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None);
+ return field_ty;
+ }
+ private_candidate = Some((adjustments, base_def.did(), field_ty));
+ }
+ }
+ ty::Tuple(tys) => {
+ let fstr = field.as_str();
+ if let Ok(index) = fstr.parse::<usize>() {
+ if fstr == index.to_string() {
+ if let Some(&field_ty) = tys.get(index) {
+ let adjustments = self.adjust_steps(&autoderef);
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.write_field_index(expr.hir_id, index);
+ return field_ty;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+
+ if let Some((adjustments, did, field_ty)) = private_candidate {
+ // (#90483) apply adjustments to avoid ExprUseVisitor from
+ // creating erroneous projection.
+ self.apply_adjustments(base, adjustments);
+ self.ban_private_field_access(expr, base_ty, field, did);
+ return field_ty;
+ }
+
+ if field.name == kw::Empty {
+ } else if self.method_exists(field, base_ty, expr.hir_id, true) {
+ self.ban_take_value_of_method(expr, base_ty, field);
+ } else if !base_ty.is_primitive_ty() {
+ self.ban_nonexisting_field(field, base, expr, base_ty);
+ } else {
+ let field_name = field.to_string();
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ base_ty,
+ E0610,
+ "`{base_ty}` is a primitive type and therefore doesn't have fields",
+ );
+ let is_valid_suffix = |field: &str| {
+ if field == "f32" || field == "f64" {
+ return true;
+ }
+ let mut chars = field.chars().peekable();
+ match chars.peek() {
+ Some('e') | Some('E') => {
+ chars.next();
+ if let Some(c) = chars.peek()
+ && !c.is_numeric() && *c != '-' && *c != '+'
+ {
+ return false;
+ }
+ while let Some(c) = chars.peek() {
+ if !c.is_numeric() {
+ break;
+ }
+ chars.next();
+ }
+ }
+ _ => (),
+ }
+ let suffix = chars.collect::<String>();
+ suffix.is_empty() || suffix == "f32" || suffix == "f64"
+ };
+ let maybe_partial_suffix = |field: &str| -> Option<&str> {
+ let first_chars = ['f', 'l'];
+ if field.len() >= 1
+ && field.to_lowercase().starts_with(first_chars)
+ && field[1..].chars().all(|c| c.is_ascii_digit())
+ {
+ if field.to_lowercase().starts_with(['f']) { Some("f32") } else { Some("f64") }
+ } else {
+ None
+ }
+ };
+ if let ty::Infer(ty::IntVar(_)) = base_ty.kind()
+ && let ExprKind::Lit(Spanned {
+ node: ast::LitKind::Int(_, ast::LitIntType::Unsuffixed),
+ ..
+ }) = base.kind
+ && !base.span.from_expansion()
+ {
+ if is_valid_suffix(&field_name) {
+ err.span_suggestion_verbose(
+ field.span.shrink_to_lo(),
+ "if intended to be a floating point literal, consider adding a `0` after the period",
+ '0',
+ Applicability::MaybeIncorrect,
+ );
+ } else if let Some(correct_suffix) = maybe_partial_suffix(&field_name) {
+ err.span_suggestion_verbose(
+ field.span,
+ format!("if intended to be a floating point literal, consider adding a `0` after the period and a `{correct_suffix}` suffix"),
+ format!("0{correct_suffix}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err.emit();
+ }
+
+ self.tcx().ty_error()
+ }
+
+ fn suggest_await_on_field_access(
+ &self,
+ err: &mut Diagnostic,
+ field_ident: Ident,
+ base: &'tcx hir::Expr<'tcx>,
+ ty: Ty<'tcx>,
+ ) {
+ let output_ty = match self.get_impl_future_output_ty(ty) {
+ Some(output_ty) => self.resolve_vars_if_possible(output_ty),
+ _ => return,
+ };
+ let mut add_label = true;
+ if let ty::Adt(def, _) = output_ty.skip_binder().kind() {
+ // no field access on enum type
+ if !def.is_enum() {
+ if def
+ .non_enum_variant()
+ .fields
+ .iter()
+ .any(|field| field.ident(self.tcx) == field_ident)
+ {
+ add_label = false;
+ err.span_label(
+ field_ident.span,
+ "field not available in `impl Future`, but it is available in its `Output`",
+ );
+ err.span_suggestion_verbose(
+ base.span.shrink_to_hi(),
+ "consider `await`ing on the `Future` and access the field of its `Output`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ if add_label {
+ err.span_label(field_ident.span, &format!("field not found in `{ty}`"));
+ }
+ }
+
+ fn ban_nonexisting_field(
+ &self,
+ ident: Ident,
+ base: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ base_ty: Ty<'tcx>,
+ ) {
+ debug!(
+ "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, base_ty={:?}",
+ ident, base, expr, base_ty
+ );
+ let mut err = self.no_such_field_err(ident, base_ty, base.hir_id);
+
+ match *base_ty.peel_refs().kind() {
+ ty::Array(_, len) => {
+ self.maybe_suggest_array_indexing(&mut err, expr, base, ident, len);
+ }
+ ty::RawPtr(..) => {
+ self.suggest_first_deref_field(&mut err, expr, base, ident);
+ }
+ ty::Adt(def, _) if !def.is_enum() => {
+ self.suggest_fields_on_recordish(&mut err, def, ident, expr.span);
+ }
+ ty::Param(param_ty) => {
+ self.point_at_param_definition(&mut err, param_ty);
+ }
+ ty::Opaque(_, _) => {
+ self.suggest_await_on_field_access(&mut err, ident, base, base_ty.peel_refs());
+ }
+ _ => {}
+ }
+
+ self.suggest_fn_call(&mut err, base, base_ty, |output_ty| {
+ if let ty::Adt(def, _) = output_ty.kind() && !def.is_enum() {
+ def.non_enum_variant().fields.iter().any(|field| {
+ field.ident(self.tcx) == ident
+ && field.vis.is_accessible_from(expr.hir_id.owner.def_id, self.tcx)
+ })
+ } else if let ty::Tuple(tys) = output_ty.kind()
+ && let Ok(idx) = ident.as_str().parse::<usize>()
+ {
+ idx < tys.len()
+ } else {
+ false
+ }
+ });
+
+ if ident.name == kw::Await {
+ // We know by construction that `<expr>.await` is either on Rust 2015
+ // or results in `ExprKind::Await`. Suggest switching the edition to 2018.
+ err.note("to `.await` a `Future`, switch to Rust 2018 or later");
+ err.help_use_latest_edition();
+ }
+
+ err.emit();
+ }
+
+ fn ban_private_field_access(
+ &self,
+ expr: &hir::Expr<'_>,
+ expr_t: Ty<'tcx>,
+ field: Ident,
+ base_did: DefId,
+ ) {
+ let struct_path = self.tcx().def_path_str(base_did);
+ let kind_name = self.tcx().def_kind(base_did).descr(base_did);
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ field.span,
+ E0616,
+ "field `{field}` of {kind_name} `{struct_path}` is private",
+ );
+ err.span_label(field.span, "private field");
+ // Also check if an accessible method exists, which is often what is meant.
+ if self.method_exists(field, expr_t, expr.hir_id, false) && !self.expr_in_place(expr.hir_id)
+ {
+ self.suggest_method_call(
+ &mut err,
+ &format!("a method `{field}` also exists, call it with parentheses"),
+ field,
+ expr_t,
+ expr,
+ None,
+ );
+ }
+ err.emit();
+ }
+
+ fn ban_take_value_of_method(&self, expr: &hir::Expr<'_>, expr_t: Ty<'tcx>, field: Ident) {
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0615,
+ "attempted to take value of method `{field}` on type `{expr_t}`",
+ );
+ err.span_label(field.span, "method, not a field");
+ let expr_is_call =
+ if let hir::Node::Expr(hir::Expr { kind: ExprKind::Call(callee, _args), .. }) =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ expr.hir_id == callee.hir_id
+ } else {
+ false
+ };
+ let expr_snippet =
+ self.tcx.sess.source_map().span_to_snippet(expr.span).unwrap_or_default();
+ let is_wrapped = expr_snippet.starts_with('(') && expr_snippet.ends_with(')');
+ let after_open = expr.span.lo() + rustc_span::BytePos(1);
+ let before_close = expr.span.hi() - rustc_span::BytePos(1);
+
+ if expr_is_call && is_wrapped {
+ err.multipart_suggestion(
+ "remove wrapping parentheses to call the method",
+ vec![
+ (expr.span.with_hi(after_open), String::new()),
+ (expr.span.with_lo(before_close), String::new()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else if !self.expr_in_place(expr.hir_id) {
+ // Suggest call parentheses inside the wrapping parentheses
+ let span = if is_wrapped {
+ expr.span.with_lo(after_open).with_hi(before_close)
+ } else {
+ expr.span
+ };
+ self.suggest_method_call(
+ &mut err,
+ "use parentheses to call the method",
+ field,
+ expr_t,
+ expr,
+ Some(span),
+ );
+ } else if let ty::RawPtr(ty_and_mut) = expr_t.kind()
+ && let ty::Adt(adt_def, _) = ty_and_mut.ty.kind()
+ && let ExprKind::Field(base_expr, _) = expr.kind
+ && adt_def.variants().len() == 1
+ && adt_def
+ .variants()
+ .iter()
+ .next()
+ .unwrap()
+ .fields
+ .iter()
+ .any(|f| f.ident(self.tcx) == field)
+ {
+ err.multipart_suggestion(
+ "to access the field, dereference first",
+ vec![
+ (base_expr.span.shrink_to_lo(), "(*".to_string()),
+ (base_expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.help("methods are immutable and cannot be assigned to");
+ }
+
+ err.emit();
+ }
+
+ fn point_at_param_definition(&self, err: &mut Diagnostic, param: ty::ParamTy) {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let generic_param = generics.type_param(&param, self.tcx);
+ if let ty::GenericParamDefKind::Type { synthetic: true, .. } = generic_param.kind {
+ return;
+ }
+ let param_def_id = generic_param.def_id;
+ let param_hir_id = match param_def_id.as_local() {
+ Some(x) => self.tcx.hir().local_def_id_to_hir_id(x),
+ None => return,
+ };
+ let param_span = self.tcx.hir().span(param_hir_id);
+ let param_name = self.tcx.hir().ty_param_name(param_def_id.expect_local());
+
+ err.span_label(param_span, &format!("type parameter '{param_name}' declared here"));
+ }
+
+ fn suggest_fields_on_recordish(
+ &self,
+ err: &mut Diagnostic,
+ def: ty::AdtDef<'tcx>,
+ field: Ident,
+ access_span: Span,
+ ) {
+ if let Some(suggested_field_name) =
+ self.suggest_field_name(def.non_enum_variant(), field.name, vec![], access_span)
+ {
+ err.span_suggestion(
+ field.span,
+ "a field with a similar name exists",
+ suggested_field_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(field.span, "unknown field");
+ let struct_variant_def = def.non_enum_variant();
+ let field_names = self.available_field_names(struct_variant_def, access_span);
+ if !field_names.is_empty() {
+ err.note(&format!(
+ "available fields are: {}",
+ self.name_series_display(field_names),
+ ));
+ }
+ }
+ }
+
+ fn maybe_suggest_array_indexing(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ field: Ident,
+ len: ty::Const<'tcx>,
+ ) {
+ if let (Some(len), Ok(user_index)) =
+ (len.try_eval_usize(self.tcx, self.param_env), field.as_str().parse::<u64>())
+ && let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span)
+ {
+ let help = "instead of using tuple indexing, use array indexing";
+ let suggestion = format!("{base}[{field}]");
+ let applicability = if len < user_index {
+ Applicability::MachineApplicable
+ } else {
+ Applicability::MaybeIncorrect
+ };
+ err.span_suggestion(expr.span, help, suggestion, applicability);
+ }
+ }
+
+ fn suggest_first_deref_field(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ field: Ident,
+ ) {
+ if let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span) {
+ let msg = format!("`{base}` is a raw pointer; try dereferencing it");
+ let suggestion = format!("(*{base}).{field}");
+ err.span_suggestion(expr.span, &msg, suggestion, Applicability::MaybeIncorrect);
+ }
+ }
+
+ fn no_such_field_err(
+ &self,
+ field: Ident,
+ expr_t: Ty<'tcx>,
+ id: HirId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let span = field.span;
+ debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t);
+
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0609,
+ "no field `{field}` on type `{expr_t}`",
+ );
+
+ // try to add a suggestion in case the field is a nested field of a field of the Adt
+ let mod_id = self.tcx.parent_module(id).to_def_id();
+ if let Some((fields, substs)) =
+ self.get_field_candidates_considering_privacy(span, expr_t, mod_id)
+ {
+ let candidate_fields: Vec<_> = fields
+ .filter_map(|candidate_field| {
+ self.check_for_nested_field_satisfying(
+ span,
+ &|candidate_field, _| candidate_field.ident(self.tcx()) == field,
+ candidate_field,
+ substs,
+ vec![],
+ mod_id,
+ )
+ })
+ .map(|mut field_path| {
+ field_path.pop();
+ field_path
+ .iter()
+ .map(|id| id.name.to_ident_string())
+ .collect::<Vec<String>>()
+ .join(".")
+ })
+ .collect::<Vec<_>>();
+
+ let len = candidate_fields.len();
+ if len > 0 {
+ err.span_suggestions(
+ field.span.shrink_to_lo(),
+ format!(
+ "{} of the expressions' fields {} a field of the same name",
+ if len > 1 { "some" } else { "one" },
+ if len > 1 { "have" } else { "has" },
+ ),
+ candidate_fields.iter().map(|path| format!("{path}.")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err
+ }
+
+ pub(crate) fn get_field_candidates_considering_privacy(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ mod_id: DefId,
+ ) -> Option<(impl Iterator<Item = &'tcx ty::FieldDef> + 'tcx, SubstsRef<'tcx>)> {
+ debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_ty);
+
+ for (base_t, _) in self.autoderef(span, base_ty) {
+ match base_t.kind() {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ let tcx = self.tcx;
+ let fields = &base_def.non_enum_variant().fields;
+ // Some struct, e.g. some that impl `Deref`, have all private fields
+ // because you're expected to deref them to access the _real_ fields.
+ // This, for example, will help us suggest accessing a field through a `Box<T>`.
+ if fields.iter().all(|field| !field.vis.is_accessible_from(mod_id, tcx)) {
+ continue;
+ }
+ return Some((
+ fields
+ .iter()
+ .filter(move |field| field.vis.is_accessible_from(mod_id, tcx))
+ // For compile-time reasons put a limit on number of fields we search
+ .take(100),
+ substs,
+ ));
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// This method is called after we have encountered a missing field error to recursively
+ /// search for the field
+ pub(crate) fn check_for_nested_field_satisfying(
+ &self,
+ span: Span,
+ matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool,
+ candidate_field: &ty::FieldDef,
+ subst: SubstsRef<'tcx>,
+ mut field_path: Vec<Ident>,
+ mod_id: DefId,
+ ) -> Option<Vec<Ident>> {
+ debug!(
+ "check_for_nested_field_satisfying(span: {:?}, candidate_field: {:?}, field_path: {:?}",
+ span, candidate_field, field_path
+ );
+
+ if field_path.len() > 3 {
+ // For compile-time reasons and to avoid infinite recursion we only check for fields
+ // up to a depth of three
+ None
+ } else {
+ field_path.push(candidate_field.ident(self.tcx).normalize_to_macros_2_0());
+ let field_ty = candidate_field.ty(self.tcx, subst);
+ if matches(candidate_field, field_ty) {
+ return Some(field_path);
+ } else if let Some((nested_fields, subst)) =
+ self.get_field_candidates_considering_privacy(span, field_ty, mod_id)
+ {
+ // recursively search fields of `candidate_field` if it's a ty::Adt
+ for field in nested_fields {
+ if let Some(field_path) = self.check_for_nested_field_satisfying(
+ span,
+ matches,
+ field,
+ subst,
+ field_path.clone(),
+ mod_id,
+ ) {
+ return Some(field_path);
+ }
+ }
+ }
+ None
+ }
+ }
+
+ fn check_expr_index(
+ &self,
+ base: &'tcx hir::Expr<'tcx>,
+ idx: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let base_t = self.check_expr(&base);
+ let idx_t = self.check_expr(&idx);
+
+ if base_t.references_error() {
+ base_t
+ } else if idx_t.references_error() {
+ idx_t
+ } else {
+ let base_t = self.structurally_resolved_type(base.span, base_t);
+ match self.lookup_indexing(expr, base, base_t, idx, idx_t) {
+ Some((index_ty, element_ty)) => {
+ // two-phase not needed because index_ty is never mutable
+ self.demand_coerce(idx, idx_t, index_ty, None, AllowTwoPhase::No);
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_index_if_possible(errors, idx.span)
+ });
+ element_ty
+ }
+ None => {
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ expr.span,
+ base_t,
+ E0608,
+ "cannot index into a value of type `{base_t}`",
+ );
+ // Try to give some advice about indexing tuples.
+ if let ty::Tuple(..) = base_t.kind() {
+ let mut needs_note = true;
+ // If the index is an integer, we can show the actual
+ // fixed expression:
+ if let ExprKind::Lit(ref lit) = idx.kind {
+ if let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node {
+ let snip = self.tcx.sess.source_map().span_to_snippet(base.span);
+ if let Ok(snip) = snip {
+ err.span_suggestion(
+ expr.span,
+ "to access tuple elements, use",
+ format!("{snip}.{i}"),
+ Applicability::MachineApplicable,
+ );
+ needs_note = false;
+ }
+ }
+ }
+ if needs_note {
+ err.help(
+ "to access tuple elements, use tuple indexing \
+ syntax (e.g., `tuple.0`)",
+ );
+ }
+ }
+ err.emit();
+ self.tcx.ty_error()
+ }
+ }
+ }
+ }
+
+ fn point_at_index_if_possible(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ span: Span,
+ ) {
+ for error in errors {
+ match error.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(predicate)
+ if self.tcx.is_diagnostic_item(sym::SliceIndex, predicate.trait_ref.def_id) => {
+ }
+ _ => continue,
+ }
+ error.obligation.cause.span = span;
+ }
+ }
+
+ fn check_expr_yield(
+ &self,
+ value: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ src: &'tcx hir::YieldSource,
+ ) -> Ty<'tcx> {
+ match self.resume_yield_tys {
+ Some((resume_ty, yield_ty)) => {
+ self.check_expr_coercable_to_type(&value, yield_ty, None);
+
+ resume_ty
+ }
+ // Given that this `yield` expression was generated as a result of lowering a `.await`,
+ // we know that the yield type must be `()`; however, the context won't contain this
+ // information. Hence, we check the source of the yield expression here and check its
+ // value's type against `()` (this check should always hold).
+ None if src.is_await() => {
+ self.check_expr_coercable_to_type(&value, self.tcx.mk_unit(), None);
+ self.tcx.mk_unit()
+ }
+ _ => {
+ self.tcx.sess.emit_err(YieldExprOutsideOfGenerator { span: expr.span });
+ // Avoid expressions without types during writeback (#78653).
+ self.check_expr(value);
+ self.tcx.mk_unit()
+ }
+ }
+ }
+
+ fn check_expr_asm_operand(&self, expr: &'tcx hir::Expr<'tcx>, is_input: bool) {
+ let needs = if is_input { Needs::None } else { Needs::MutPlace };
+ let ty = self.check_expr_with_needs(expr, needs);
+ self.require_type_is_sized(ty, expr.span, traits::InlineAsmSized);
+
+ if !is_input && !expr.is_syntactic_place_expr() {
+ let mut err = self.tcx.sess.struct_span_err(expr.span, "invalid asm output");
+ err.span_label(expr.span, "cannot assign to this expression");
+ err.emit();
+ }
+
+ // If this is an input value, we require its type to be fully resolved
+ // at this point. This allows us to provide helpful coercions which help
+ // pass the type candidate list in a later pass.
+ //
+ // We don't require output types to be resolved at this point, which
+ // allows them to be inferred based on how they are used later in the
+ // function.
+ if is_input {
+ let ty = self.structurally_resolved_type(expr.span, ty);
+ match *ty.kind() {
+ ty::FnDef(..) => {
+ let fnptr_ty = self.tcx.mk_fn_ptr(ty.fn_sig(self.tcx));
+ self.demand_coerce(expr, ty, fnptr_ty, None, AllowTwoPhase::No);
+ }
+ ty::Ref(_, base_ty, mutbl) => {
+ let ptr_ty = self.tcx.mk_ptr(ty::TypeAndMut { ty: base_ty, mutbl });
+ self.demand_coerce(expr, ty, ptr_ty, None, AllowTwoPhase::No);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ fn check_expr_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>) -> Ty<'tcx> {
+ for (op, _op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::In { expr, .. } => {
+ self.check_expr_asm_operand(expr, true);
+ }
+ hir::InlineAsmOperand::Out { expr: Some(expr), .. }
+ | hir::InlineAsmOperand::InOut { expr, .. } => {
+ self.check_expr_asm_operand(expr, false);
+ }
+ hir::InlineAsmOperand::Out { expr: None, .. } => {}
+ hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ self.check_expr_asm_operand(in_expr, true);
+ if let Some(out_expr) = out_expr {
+ self.check_expr_asm_operand(out_expr, false);
+ }
+ }
+ // `AnonConst`s have their own body and is type-checked separately.
+ // As they don't flow into the type system we don't need them to
+ // be well-formed.
+ hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymFn { .. } => {}
+ hir::InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ if asm.options.contains(ast::InlineAsmOptions::NORETURN) {
+ self.tcx.types.never
+ } else {
+ self.tcx.mk_unit()
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
new file mode 100644
index 000000000..fce2a5888
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
@@ -0,0 +1,908 @@
+//! A different sort of visitor for walking fn bodies. Unlike the
+//! normal visitor, which just walks the entire body in one shot, the
+//! `ExprUseVisitor` determines how expressions are being used.
+
+use std::slice::from_ref;
+
+use hir::def::DefKind;
+use hir::Expr;
+// Export these here so that Clippy can use them.
+pub use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection};
+
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::PatKind;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::hir::place::ProjectionKind;
+use rustc_middle::mir::FakeReadCause;
+use rustc_middle::ty::{self, adjustment, AdtKind, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use ty::BorrowKind::ImmBorrow;
+
+use crate::mem_categorization as mc;
+
+/// This trait defines the callbacks you can expect to receive when
+/// employing the ExprUseVisitor.
+pub trait Delegate<'tcx> {
+ /// The value found at `place` is moved, depending
+ /// on `mode`. Where `diag_expr_id` is the id used for diagnostics for `place`.
+ ///
+ /// Use of a `Copy` type in a ByValue context is considered a use
+ /// by `ImmBorrow` and `borrow` is called instead. This is because
+ /// a shared borrow is the "minimum access" that would be needed
+ /// to perform a copy.
+ ///
+ ///
+ /// The parameter `diag_expr_id` indicates the HIR id that ought to be used for
+ /// diagnostics. Around pattern matching such as `let pat = expr`, the diagnostic
+ /// id will be the id of the expression `expr` but the place itself will have
+ /// the id of the binding in the pattern `pat`.
+ fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
+
+ /// The value found at `place` is being borrowed with kind `bk`.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn borrow(
+ &mut self,
+ place_with_id: &PlaceWithHirId<'tcx>,
+ diag_expr_id: hir::HirId,
+ bk: ty::BorrowKind,
+ );
+
+ /// The value found at `place` is being copied.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn copy(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ // In most cases, copying data from `x` is equivalent to doing `*&x`, so by default
+ // we treat a copy of `x` as a borrow of `x`.
+ self.borrow(place_with_id, diag_expr_id, ty::BorrowKind::ImmBorrow)
+ }
+
+ /// The path at `assignee_place` is being assigned to.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
+
+ /// The path at `binding_place` is a binding that is being initialized.
+ ///
+ /// This covers cases such as `let x = 42;`
+ fn bind(&mut self, binding_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ // Bindings can normally be treated as a regular assignment, so by default we
+ // forward this to the mutate callback.
+ self.mutate(binding_place, diag_expr_id)
+ }
+
+ /// The `place` should be a fake read because of specified `cause`.
+ fn fake_read(
+ &mut self,
+ place_with_id: &PlaceWithHirId<'tcx>,
+ cause: FakeReadCause,
+ diag_expr_id: hir::HirId,
+ );
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+enum ConsumeMode {
+ /// reference to x where x has a type that copies
+ Copy,
+ /// reference to x where x has a type that moves
+ Move,
+}
+
+/// The ExprUseVisitor type
+///
+/// This is the code that actually walks the tree.
+pub struct ExprUseVisitor<'a, 'tcx> {
+ mc: mc::MemCategorizationContext<'a, 'tcx>,
+ body_owner: LocalDefId,
+ delegate: &'a mut dyn Delegate<'tcx>,
+}
+
+/// If the MC results in an error, it's because the type check
+/// failed (or will fail, when the error is uncovered and reported
+/// during writeback). In this case, we just ignore this part of the
+/// code.
+///
+/// Note that this macro appears similar to try!(), but, unlike try!(),
+/// it does not propagate the error.
+macro_rules! return_if_err {
+ ($inp: expr) => {
+ match $inp {
+ Ok(v) => v,
+ Err(()) => {
+ debug!("mc reported err");
+ return;
+ }
+ }
+ };
+}
+
+impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
+ /// Creates the ExprUseVisitor, configuring it with the various options provided:
+ ///
+ /// - `delegate` -- who receives the callbacks
+ /// - `param_env` --- parameter environment for trait lookups (esp. pertaining to `Copy`)
+ /// - `typeck_results` --- typeck results for the code being analyzed
+ pub fn new(
+ delegate: &'a mut (dyn Delegate<'tcx> + 'a),
+ infcx: &'a InferCtxt<'tcx>,
+ body_owner: LocalDefId,
+ param_env: ty::ParamEnv<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Self {
+ ExprUseVisitor {
+ mc: mc::MemCategorizationContext::new(infcx, param_env, body_owner, typeck_results),
+ body_owner,
+ delegate,
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn consume_body(&mut self, body: &hir::Body<'_>) {
+ for param in body.params {
+ let param_ty = return_if_err!(self.mc.pat_ty_adjusted(param.pat));
+ debug!("consume_body: param_ty = {:?}", param_ty);
+
+ let param_place = self.mc.cat_rvalue(param.hir_id, param.pat.span, param_ty);
+
+ self.walk_irrefutable_pat(&param_place, param.pat);
+ }
+
+ self.consume_expr(&body.value);
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.mc.tcx()
+ }
+
+ fn delegate_consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ delegate_consume(&self.mc, self.delegate, place_with_id, diag_expr_id)
+ }
+
+ fn consume_exprs(&mut self, exprs: &[hir::Expr<'_>]) {
+ for expr in exprs {
+ self.consume_expr(expr);
+ }
+ }
+
+ pub fn consume_expr(&mut self, expr: &hir::Expr<'_>) {
+ debug!("consume_expr(expr={:?})", expr);
+
+ let place_with_id = return_if_err!(self.mc.cat_expr(expr));
+ self.delegate_consume(&place_with_id, place_with_id.hir_id);
+ self.walk_expr(expr);
+ }
+
+ fn mutate_expr(&mut self, expr: &hir::Expr<'_>) {
+ let place_with_id = return_if_err!(self.mc.cat_expr(expr));
+ self.delegate.mutate(&place_with_id, place_with_id.hir_id);
+ self.walk_expr(expr);
+ }
+
+ fn borrow_expr(&mut self, expr: &hir::Expr<'_>, bk: ty::BorrowKind) {
+ debug!("borrow_expr(expr={:?}, bk={:?})", expr, bk);
+
+ let place_with_id = return_if_err!(self.mc.cat_expr(expr));
+ self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk);
+
+ self.walk_expr(expr)
+ }
+
+ fn select_from_expr(&mut self, expr: &hir::Expr<'_>) {
+ self.walk_expr(expr)
+ }
+
+ pub fn walk_expr(&mut self, expr: &hir::Expr<'_>) {
+ debug!("walk_expr(expr={:?})", expr);
+
+ self.walk_adjustment(expr);
+
+ match expr.kind {
+ hir::ExprKind::Path(_) => {}
+
+ hir::ExprKind::Type(subexpr, _) => self.walk_expr(subexpr),
+
+ hir::ExprKind::Unary(hir::UnOp::Deref, base) => {
+ // *base
+ self.select_from_expr(base);
+ }
+
+ hir::ExprKind::Field(base, _) => {
+ // base.f
+ self.select_from_expr(base);
+ }
+
+ hir::ExprKind::Index(lhs, rhs) => {
+ // lhs[rhs]
+ self.select_from_expr(lhs);
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Call(callee, args) => {
+ // callee(args)
+ self.consume_expr(callee);
+ self.consume_exprs(args);
+ }
+
+ hir::ExprKind::MethodCall(.., receiver, args, _) => {
+ // callee.m(args)
+ self.consume_expr(receiver);
+ self.consume_exprs(args);
+ }
+
+ hir::ExprKind::Struct(_, fields, ref opt_with) => {
+ self.walk_struct_expr(fields, opt_with);
+ }
+
+ hir::ExprKind::Tup(exprs) => {
+ self.consume_exprs(exprs);
+ }
+
+ hir::ExprKind::If(ref cond_expr, ref then_expr, ref opt_else_expr) => {
+ self.consume_expr(cond_expr);
+ self.consume_expr(then_expr);
+ if let Some(ref else_expr) = *opt_else_expr {
+ self.consume_expr(else_expr);
+ }
+ }
+
+ hir::ExprKind::Let(hir::Let { pat, init, .. }) => {
+ self.walk_local(init, pat, None, |t| t.borrow_expr(init, ty::ImmBorrow))
+ }
+
+ hir::ExprKind::Match(ref discr, arms, _) => {
+ let discr_place = return_if_err!(self.mc.cat_expr(discr));
+ self.maybe_read_scrutinee(
+ discr,
+ discr_place.clone(),
+ arms.iter().map(|arm| arm.pat),
+ );
+
+ // treatment of the discriminant is handled while walking the arms.
+ for arm in arms {
+ self.walk_arm(&discr_place, arm);
+ }
+ }
+
+ hir::ExprKind::Array(exprs) => {
+ self.consume_exprs(exprs);
+ }
+
+ hir::ExprKind::AddrOf(_, m, ref base) => {
+ // &base
+ // make sure that the thing we are pointing out stays valid
+ // for the lifetime `scope_r` of the resulting ptr:
+ let bk = ty::BorrowKind::from_mutbl(m);
+ self.borrow_expr(base, bk);
+ }
+
+ hir::ExprKind::InlineAsm(asm) => {
+ for (op, _op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::In { expr, .. } => self.consume_expr(expr),
+ hir::InlineAsmOperand::Out { expr: Some(expr), .. }
+ | hir::InlineAsmOperand::InOut { expr, .. } => {
+ self.mutate_expr(expr);
+ }
+ hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ self.consume_expr(in_expr);
+ if let Some(out_expr) = out_expr {
+ self.mutate_expr(out_expr);
+ }
+ }
+ hir::InlineAsmOperand::Out { expr: None, .. }
+ | hir::InlineAsmOperand::Const { .. }
+ | hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ }
+
+ hir::ExprKind::Continue(..)
+ | hir::ExprKind::Lit(..)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Err => {}
+
+ hir::ExprKind::Loop(blk, ..) => {
+ self.walk_block(blk);
+ }
+
+ hir::ExprKind::Unary(_, lhs) => {
+ self.consume_expr(lhs);
+ }
+
+ hir::ExprKind::Binary(_, lhs, rhs) => {
+ self.consume_expr(lhs);
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Block(blk, _) => {
+ self.walk_block(blk);
+ }
+
+ hir::ExprKind::Break(_, ref opt_expr) | hir::ExprKind::Ret(ref opt_expr) => {
+ if let Some(expr) = *opt_expr {
+ self.consume_expr(expr);
+ }
+ }
+
+ hir::ExprKind::Assign(lhs, rhs, _) => {
+ self.mutate_expr(lhs);
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Cast(base, _) => {
+ self.consume_expr(base);
+ }
+
+ hir::ExprKind::DropTemps(expr) => {
+ self.consume_expr(expr);
+ }
+
+ hir::ExprKind::AssignOp(_, lhs, rhs) => {
+ if self.mc.typeck_results.is_method_call(expr) {
+ self.consume_expr(lhs);
+ } else {
+ self.mutate_expr(lhs);
+ }
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Repeat(base, _) => {
+ self.consume_expr(base);
+ }
+
+ hir::ExprKind::Closure { .. } => {
+ self.walk_captures(expr);
+ }
+
+ hir::ExprKind::Box(ref base) => {
+ self.consume_expr(base);
+ }
+
+ hir::ExprKind::Yield(value, _) => {
+ self.consume_expr(value);
+ }
+ }
+ }
+
+ fn walk_stmt(&mut self, stmt: &hir::Stmt<'_>) {
+ match stmt.kind {
+ hir::StmtKind::Local(hir::Local { pat, init: Some(expr), els, .. }) => {
+ self.walk_local(expr, pat, *els, |_| {})
+ }
+
+ hir::StmtKind::Local(_) => {}
+
+ hir::StmtKind::Item(_) => {
+ // We don't visit nested items in this visitor,
+ // only the fn body we were given.
+ }
+
+ hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => {
+ self.consume_expr(expr);
+ }
+ }
+ }
+
+ fn maybe_read_scrutinee<'t>(
+ &mut self,
+ discr: &Expr<'_>,
+ discr_place: PlaceWithHirId<'tcx>,
+ pats: impl Iterator<Item = &'t hir::Pat<'t>>,
+ ) {
+ // Matching should not always be considered a use of the place, hence
+ // discr does not necessarily need to be borrowed.
+ // We only want to borrow discr if the pattern contain something other
+ // than wildcards.
+ let ExprUseVisitor { ref mc, body_owner: _, delegate: _ } = *self;
+ let mut needs_to_be_read = false;
+ for pat in pats {
+ return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| {
+ match &pat.kind {
+ PatKind::Binding(.., opt_sub_pat) => {
+ // If the opt_sub_pat is None, than the binding does not count as
+ // a wildcard for the purpose of borrowing discr.
+ if opt_sub_pat.is_none() {
+ needs_to_be_read = true;
+ }
+ }
+ PatKind::Path(qpath) => {
+ // A `Path` pattern is just a name like `Foo`. This is either a
+ // named constant or else it refers to an ADT variant
+
+ let res = self.mc.typeck_results.qpath_res(qpath, pat.hir_id);
+ match res {
+ Res::Def(DefKind::Const, _) | Res::Def(DefKind::AssocConst, _) => {
+ // Named constants have to be equated with the value
+ // being matched, so that's a read of the value being matched.
+ //
+ // FIXME: We don't actually reads for ZSTs.
+ needs_to_be_read = true;
+ }
+ _ => {
+ // Otherwise, this is a struct/enum variant, and so it's
+ // only a read if we need to read the discriminant.
+ needs_to_be_read |= is_multivariant_adt(place.place.ty());
+ }
+ }
+ }
+ PatKind::TupleStruct(..) | PatKind::Struct(..) | PatKind::Tuple(..) => {
+ // For `Foo(..)`, `Foo { ... }` and `(...)` patterns, check if we are matching
+ // against a multivariant enum or struct. In that case, we have to read
+ // the discriminant. Otherwise this kind of pattern doesn't actually
+ // read anything (we'll get invoked for the `...`, which may indeed
+ // perform some reads).
+
+ let place_ty = place.place.ty();
+ needs_to_be_read |= is_multivariant_adt(place_ty);
+ }
+ PatKind::Lit(_) | PatKind::Range(..) => {
+ // If the PatKind is a Lit or a Range then we want
+ // to borrow discr.
+ needs_to_be_read = true;
+ }
+ PatKind::Or(_)
+ | PatKind::Box(_)
+ | PatKind::Slice(..)
+ | PatKind::Ref(..)
+ | PatKind::Wild => {
+ // If the PatKind is Or, Box, Slice or Ref, the decision is made later
+ // as these patterns contains subpatterns
+ // If the PatKind is Wild, the decision is made based on the other patterns being
+ // examined
+ }
+ }
+ }));
+ }
+
+ if needs_to_be_read {
+ self.borrow_expr(discr, ty::ImmBorrow);
+ } else {
+ let closure_def_id = match discr_place.place.base {
+ PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id),
+ _ => None,
+ };
+
+ self.delegate.fake_read(
+ &discr_place,
+ FakeReadCause::ForMatchedPlace(closure_def_id),
+ discr_place.hir_id,
+ );
+
+ // We always want to walk the discriminant. We want to make sure, for instance,
+ // that the discriminant has been initialized.
+ self.walk_expr(discr);
+ }
+ }
+
+ fn walk_local<F>(
+ &mut self,
+ expr: &hir::Expr<'_>,
+ pat: &hir::Pat<'_>,
+ els: Option<&hir::Block<'_>>,
+ mut f: F,
+ ) where
+ F: FnMut(&mut Self),
+ {
+ self.walk_expr(expr);
+ let expr_place = return_if_err!(self.mc.cat_expr(expr));
+ f(self);
+ if let Some(els) = els {
+ // borrowing because we need to test the discriminant
+ self.maybe_read_scrutinee(expr, expr_place.clone(), from_ref(pat).iter());
+ self.walk_block(els)
+ }
+ self.walk_irrefutable_pat(&expr_place, &pat);
+ }
+
+ /// Indicates that the value of `blk` will be consumed, meaning either copied or moved
+ /// depending on its type.
+ fn walk_block(&mut self, blk: &hir::Block<'_>) {
+ debug!("walk_block(blk.hir_id={})", blk.hir_id);
+
+ for stmt in blk.stmts {
+ self.walk_stmt(stmt);
+ }
+
+ if let Some(ref tail_expr) = blk.expr {
+ self.consume_expr(tail_expr);
+ }
+ }
+
+ fn walk_struct_expr<'hir>(
+ &mut self,
+ fields: &[hir::ExprField<'_>],
+ opt_with: &Option<&'hir hir::Expr<'_>>,
+ ) {
+ // Consume the expressions supplying values for each field.
+ for field in fields {
+ self.consume_expr(field.expr);
+ }
+
+ let with_expr = match *opt_with {
+ Some(w) => &*w,
+ None => {
+ return;
+ }
+ };
+
+ let with_place = return_if_err!(self.mc.cat_expr(with_expr));
+
+ // Select just those fields of the `with`
+ // expression that will actually be used
+ match with_place.place.ty().kind() {
+ ty::Adt(adt, substs) if adt.is_struct() => {
+ // Consume those fields of the with expression that are needed.
+ for (f_index, with_field) in adt.non_enum_variant().fields.iter().enumerate() {
+ let is_mentioned = fields.iter().any(|f| {
+ self.tcx().field_index(f.hir_id, self.mc.typeck_results) == f_index
+ });
+ if !is_mentioned {
+ let field_place = self.mc.cat_projection(
+ &*with_expr,
+ with_place.clone(),
+ with_field.ty(self.tcx(), substs),
+ ProjectionKind::Field(f_index as u32, VariantIdx::new(0)),
+ );
+ self.delegate_consume(&field_place, field_place.hir_id);
+ }
+ }
+ }
+ _ => {
+ // the base expression should always evaluate to a
+ // struct; however, when EUV is run during typeck, it
+ // may not. This will generate an error earlier in typeck,
+ // so we can just ignore it.
+ if !self.tcx().sess.has_errors().is_some() {
+ span_bug!(with_expr.span, "with expression doesn't evaluate to a struct");
+ }
+ }
+ }
+
+ // walk the with expression so that complex expressions
+ // are properly handled.
+ self.walk_expr(with_expr);
+ }
+
+ /// Invoke the appropriate delegate calls for anything that gets
+ /// consumed or borrowed as part of the automatic adjustment
+ /// process.
+ fn walk_adjustment(&mut self, expr: &hir::Expr<'_>) {
+ let adjustments = self.mc.typeck_results.expr_adjustments(expr);
+ let mut place_with_id = return_if_err!(self.mc.cat_expr_unadjusted(expr));
+ for adjustment in adjustments {
+ debug!("walk_adjustment expr={:?} adj={:?}", expr, adjustment);
+ match adjustment.kind {
+ adjustment::Adjust::NeverToAny
+ | adjustment::Adjust::Pointer(_)
+ | adjustment::Adjust::DynStar => {
+ // Creating a closure/fn-pointer or unsizing consumes
+ // the input and stores it into the resulting rvalue.
+ self.delegate_consume(&place_with_id, place_with_id.hir_id);
+ }
+
+ adjustment::Adjust::Deref(None) => {}
+
+ // Autoderefs for overloaded Deref calls in fact reference
+ // their receiver. That is, if we have `(*x)` where `x`
+ // is of type `Rc<T>`, then this in fact is equivalent to
+ // `x.deref()`. Since `deref()` is declared with `&self`,
+ // this is an autoref of `x`.
+ adjustment::Adjust::Deref(Some(ref deref)) => {
+ let bk = ty::BorrowKind::from_mutbl(deref.mutbl);
+ self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk);
+ }
+
+ adjustment::Adjust::Borrow(ref autoref) => {
+ self.walk_autoref(expr, &place_with_id, autoref);
+ }
+ }
+ place_with_id =
+ return_if_err!(self.mc.cat_expr_adjusted(expr, place_with_id, adjustment));
+ }
+ }
+
+ /// Walks the autoref `autoref` applied to the autoderef'd
+ /// `expr`. `base_place` is the mem-categorized form of `expr`
+ /// after all relevant autoderefs have occurred.
+ fn walk_autoref(
+ &mut self,
+ expr: &hir::Expr<'_>,
+ base_place: &PlaceWithHirId<'tcx>,
+ autoref: &adjustment::AutoBorrow<'tcx>,
+ ) {
+ debug!(
+ "walk_autoref(expr.hir_id={} base_place={:?} autoref={:?})",
+ expr.hir_id, base_place, autoref
+ );
+
+ match *autoref {
+ adjustment::AutoBorrow::Ref(_, m) => {
+ self.delegate.borrow(
+ base_place,
+ base_place.hir_id,
+ ty::BorrowKind::from_mutbl(m.into()),
+ );
+ }
+
+ adjustment::AutoBorrow::RawPtr(m) => {
+ debug!("walk_autoref: expr.hir_id={} base_place={:?}", expr.hir_id, base_place);
+
+ self.delegate.borrow(base_place, base_place.hir_id, ty::BorrowKind::from_mutbl(m));
+ }
+ }
+ }
+
+ fn walk_arm(&mut self, discr_place: &PlaceWithHirId<'tcx>, arm: &hir::Arm<'_>) {
+ let closure_def_id = match discr_place.place.base {
+ PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id),
+ _ => None,
+ };
+
+ self.delegate.fake_read(
+ discr_place,
+ FakeReadCause::ForMatchedPlace(closure_def_id),
+ discr_place.hir_id,
+ );
+ self.walk_pat(discr_place, arm.pat, arm.guard.is_some());
+
+ if let Some(hir::Guard::If(e)) = arm.guard {
+ self.consume_expr(e)
+ } else if let Some(hir::Guard::IfLet(ref l)) = arm.guard {
+ self.consume_expr(l.init)
+ }
+
+ self.consume_expr(arm.body);
+ }
+
+ /// Walks a pat that occurs in isolation (i.e., top-level of fn argument or
+ /// let binding, and *not* a match arm or nested pat.)
+ fn walk_irrefutable_pat(&mut self, discr_place: &PlaceWithHirId<'tcx>, pat: &hir::Pat<'_>) {
+ let closure_def_id = match discr_place.place.base {
+ PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id),
+ _ => None,
+ };
+
+ self.delegate.fake_read(
+ discr_place,
+ FakeReadCause::ForLet(closure_def_id),
+ discr_place.hir_id,
+ );
+ self.walk_pat(discr_place, pat, false);
+ }
+
+ /// The core driver for walking a pattern
+ fn walk_pat(
+ &mut self,
+ discr_place: &PlaceWithHirId<'tcx>,
+ pat: &hir::Pat<'_>,
+ has_guard: bool,
+ ) {
+ debug!("walk_pat(discr_place={:?}, pat={:?}, has_guard={:?})", discr_place, pat, has_guard);
+
+ let tcx = self.tcx();
+ let ExprUseVisitor { ref mc, body_owner: _, ref mut delegate } = *self;
+ return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| {
+ if let PatKind::Binding(_, canonical_id, ..) = pat.kind {
+ debug!("walk_pat: binding place={:?} pat={:?}", place, pat);
+ if let Some(bm) =
+ mc.typeck_results.extract_binding_mode(tcx.sess, pat.hir_id, pat.span)
+ {
+ debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm);
+
+ // pat_ty: the type of the binding being produced.
+ let pat_ty = return_if_err!(mc.node_ty(pat.hir_id));
+ debug!("walk_pat: pat_ty={:?}", pat_ty);
+
+ let def = Res::Local(canonical_id);
+ if let Ok(ref binding_place) = mc.cat_res(pat.hir_id, pat.span, pat_ty, def) {
+ delegate.bind(binding_place, binding_place.hir_id);
+ }
+
+ // Subtle: MIR desugaring introduces immutable borrows for each pattern
+ // binding when lowering pattern guards to ensure that the guard does not
+ // modify the scrutinee.
+ if has_guard {
+ delegate.borrow(place, discr_place.hir_id, ImmBorrow);
+ }
+
+ // It is also a borrow or copy/move of the value being matched.
+ // In a cases of pattern like `let pat = upvar`, don't use the span
+ // of the pattern, as this just looks confusing, instead use the span
+ // of the discriminant.
+ match bm {
+ ty::BindByReference(m) => {
+ let bk = ty::BorrowKind::from_mutbl(m);
+ delegate.borrow(place, discr_place.hir_id, bk);
+ }
+ ty::BindByValue(..) => {
+ debug!("walk_pat binding consuming pat");
+ delegate_consume(mc, *delegate, place, discr_place.hir_id);
+ }
+ }
+ }
+ }
+ }));
+ }
+
+ /// Handle the case where the current body contains a closure.
+ ///
+ /// When the current body being handled is a closure, then we must make sure that
+ /// - The parent closure only captures Places from the nested closure that are not local to it.
+ ///
+ /// In the following example the closures `c` only captures `p.x` even though `incr`
+ /// is a capture of the nested closure
+ ///
+ /// ```
+ /// struct P { x: i32 }
+ /// let mut p = P { x: 4 };
+ /// let c = || {
+ /// let incr = 10;
+ /// let nested = || p.x += incr;
+ /// };
+ /// ```
+ ///
+ /// - When reporting the Place back to the Delegate, ensure that the UpvarId uses the enclosing
+ /// closure as the DefId.
+ fn walk_captures(&mut self, closure_expr: &hir::Expr<'_>) {
+ fn upvar_is_local_variable<'tcx>(
+ upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
+ upvar_id: hir::HirId,
+ body_owner_is_closure: bool,
+ ) -> bool {
+ upvars.map(|upvars| !upvars.contains_key(&upvar_id)).unwrap_or(body_owner_is_closure)
+ }
+
+ debug!("walk_captures({:?})", closure_expr);
+
+ let tcx = self.tcx();
+ let closure_def_id = tcx.hir().local_def_id(closure_expr.hir_id);
+ let upvars = tcx.upvars_mentioned(self.body_owner);
+
+ // For purposes of this function, generator and closures are equivalent.
+ let body_owner_is_closure =
+ matches!(tcx.hir().body_owner_kind(self.body_owner), hir::BodyOwnerKind::Closure,);
+
+ // If we have a nested closure, we want to include the fake reads present in the nested closure.
+ if let Some(fake_reads) = self.mc.typeck_results.closure_fake_reads.get(&closure_def_id) {
+ for (fake_read, cause, hir_id) in fake_reads.iter() {
+ match fake_read.base {
+ PlaceBase::Upvar(upvar_id) => {
+ if upvar_is_local_variable(
+ upvars,
+ upvar_id.var_path.hir_id,
+ body_owner_is_closure,
+ ) {
+ // The nested closure might be fake reading the current (enclosing) closure's local variables.
+ // The only places we want to fake read before creating the parent closure are the ones that
+ // are not local to it/ defined by it.
+ //
+ // ```rust,ignore(cannot-test-this-because-pseudo-code)
+ // let v1 = (0, 1);
+ // let c = || { // fake reads: v1
+ // let v2 = (0, 1);
+ // let e = || { // fake reads: v1, v2
+ // let (_, t1) = v1;
+ // let (_, t2) = v2;
+ // }
+ // }
+ // ```
+ // This check is performed when visiting the body of the outermost closure (`c`) and ensures
+ // that we don't add a fake read of v2 in c.
+ continue;
+ }
+ }
+ _ => {
+ bug!(
+ "Do not know how to get HirId out of Rvalue and StaticItem {:?}",
+ fake_read.base
+ );
+ }
+ };
+ self.delegate.fake_read(
+ &PlaceWithHirId { place: fake_read.clone(), hir_id: *hir_id },
+ *cause,
+ *hir_id,
+ );
+ }
+ }
+
+ if let Some(min_captures) = self.mc.typeck_results.closure_min_captures.get(&closure_def_id)
+ {
+ for (var_hir_id, min_list) in min_captures.iter() {
+ if upvars.map_or(body_owner_is_closure, |upvars| !upvars.contains_key(var_hir_id)) {
+ // The nested closure might be capturing the current (enclosing) closure's local variables.
+ // We check if the root variable is ever mentioned within the enclosing closure, if not
+ // then for the current body (if it's a closure) these aren't captures, we will ignore them.
+ continue;
+ }
+ for captured_place in min_list {
+ let place = &captured_place.place;
+ let capture_info = captured_place.info;
+
+ let place_base = if body_owner_is_closure {
+ // Mark the place to be captured by the enclosing closure
+ PlaceBase::Upvar(ty::UpvarId::new(*var_hir_id, self.body_owner))
+ } else {
+ // If the body owner isn't a closure then the variable must
+ // be a local variable
+ PlaceBase::Local(*var_hir_id)
+ };
+ let place_with_id = PlaceWithHirId::new(
+ capture_info.path_expr_id.unwrap_or(
+ capture_info.capture_kind_expr_id.unwrap_or(closure_expr.hir_id),
+ ),
+ place.base_ty,
+ place_base,
+ place.projections.clone(),
+ );
+
+ match capture_info.capture_kind {
+ ty::UpvarCapture::ByValue => {
+ self.delegate_consume(&place_with_id, place_with_id.hir_id);
+ }
+ ty::UpvarCapture::ByRef(upvar_borrow) => {
+ self.delegate.borrow(
+ &place_with_id,
+ place_with_id.hir_id,
+ upvar_borrow,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn copy_or_move<'a, 'tcx>(
+ mc: &mc::MemCategorizationContext<'a, 'tcx>,
+ place_with_id: &PlaceWithHirId<'tcx>,
+) -> ConsumeMode {
+ if !mc.type_is_copy_modulo_regions(
+ place_with_id.place.ty(),
+ mc.tcx().hir().span(place_with_id.hir_id),
+ ) {
+ ConsumeMode::Move
+ } else {
+ ConsumeMode::Copy
+ }
+}
+
+// - If a place is used in a `ByValue` context then move it if it's not a `Copy` type.
+// - If the place that is a `Copy` type consider it an `ImmBorrow`.
+fn delegate_consume<'a, 'tcx>(
+ mc: &mc::MemCategorizationContext<'a, 'tcx>,
+ delegate: &mut (dyn Delegate<'tcx> + 'a),
+ place_with_id: &PlaceWithHirId<'tcx>,
+ diag_expr_id: hir::HirId,
+) {
+ debug!("delegate_consume(place_with_id={:?})", place_with_id);
+
+ let mode = copy_or_move(mc, place_with_id);
+
+ match mode {
+ ConsumeMode::Move => delegate.consume(place_with_id, diag_expr_id),
+ ConsumeMode::Copy => delegate.copy(place_with_id, diag_expr_id),
+ }
+}
+
+fn is_multivariant_adt(ty: Ty<'_>) -> bool {
+ if let ty::Adt(def, _) = ty.kind() {
+ // Note that if a non-exhaustive SingleVariant is defined in another crate, we need
+ // to assume that more cases will be added to the variant in the future. This mean
+ // that we should handle non-exhaustive SingleVariant the same way we would handle
+ // a MultiVariant.
+ // If the variant is not local it must be defined in another crate.
+ let is_non_exhaustive = match def.adt_kind() {
+ AdtKind::Struct | AdtKind::Union => {
+ def.non_enum_variant().is_field_list_non_exhaustive()
+ }
+ AdtKind::Enum => def.is_variant_list_non_exhaustive(),
+ };
+ def.variants().len() > 1 || (!def.did().is_local() && is_non_exhaustive)
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/fallback.rs b/compiler/rustc_hir_typeck/src/fallback.rs
new file mode 100644
index 000000000..747ecb036
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fallback.rs
@@ -0,0 +1,398 @@
+use crate::FnCtxt;
+use rustc_data_structures::{
+ fx::{FxHashMap, FxHashSet},
+ graph::WithSuccessors,
+ graph::{iterate::DepthFirstSearch, vec_graph::VecGraph},
+};
+use rustc_middle::ty::{self, Ty};
+
+impl<'tcx> FnCtxt<'_, 'tcx> {
+ /// Performs type inference fallback, returning true if any fallback
+ /// occurs.
+ pub(super) fn type_inference_fallback(&self) -> bool {
+ debug!(
+ "type-inference-fallback start obligations: {:#?}",
+ self.fulfillment_cx.borrow_mut().pending_obligations()
+ );
+
+ // All type checking constraints were added, try to fallback unsolved variables.
+ self.select_obligations_where_possible(false, |_| {});
+
+ debug!(
+ "type-inference-fallback post selection obligations: {:#?}",
+ self.fulfillment_cx.borrow_mut().pending_obligations()
+ );
+
+ // Check if we have any unsolved variables. If not, no need for fallback.
+ let unsolved_variables = self.unsolved_variables();
+ if unsolved_variables.is_empty() {
+ return false;
+ }
+
+ let diverging_fallback = self.calculate_diverging_fallback(&unsolved_variables);
+
+ let mut fallback_has_occurred = false;
+ // We do fallback in two passes, to try to generate
+ // better error messages.
+ // The first time, we do *not* replace opaque types.
+ for ty in unsolved_variables {
+ debug!("unsolved_variable = {:?}", ty);
+ fallback_has_occurred |= self.fallback_if_possible(ty, &diverging_fallback);
+ }
+
+ // We now see if we can make progress. This might cause us to
+ // unify inference variables for opaque types, since we may
+ // have unified some other type variables during the first
+ // phase of fallback. This means that we only replace
+ // inference variables with their underlying opaque types as a
+ // last resort.
+ //
+ // In code like this:
+ //
+ // ```rust
+ // type MyType = impl Copy;
+ // fn produce() -> MyType { true }
+ // fn bad_produce() -> MyType { panic!() }
+ // ```
+ //
+ // we want to unify the opaque inference variable in `bad_produce`
+ // with the diverging fallback for `panic!` (e.g. `()` or `!`).
+ // This will produce a nice error message about conflicting concrete
+ // types for `MyType`.
+ //
+ // If we had tried to fallback the opaque inference variable to `MyType`,
+ // we will generate a confusing type-check error that does not explicitly
+ // refer to opaque types.
+ self.select_obligations_where_possible(fallback_has_occurred, |_| {});
+
+ fallback_has_occurred
+ }
+
+ // Tries to apply a fallback to `ty` if it is an unsolved variable.
+ //
+ // - Unconstrained ints are replaced with `i32`.
+ //
+ // - Unconstrained floats are replaced with `f64`.
+ //
+ // - Non-numerics may get replaced with `()` or `!`, depending on
+ // how they were categorized by `calculate_diverging_fallback`
+ // (and the setting of `#![feature(never_type_fallback)]`).
+ //
+ // Fallback becomes very dubious if we have encountered
+ // type-checking errors. In that case, fallback to Error.
+ //
+ // The return value indicates whether fallback has occurred.
+ fn fallback_if_possible(
+ &self,
+ ty: Ty<'tcx>,
+ diverging_fallback: &FxHashMap<Ty<'tcx>, Ty<'tcx>>,
+ ) -> bool {
+ // Careful: we do NOT shallow-resolve `ty`. We know that `ty`
+ // is an unsolved variable, and we determine its fallback
+ // based solely on how it was created, not what other type
+ // variables it may have been unified with since then.
+ //
+ // The reason this matters is that other attempts at fallback
+ // may (in principle) conflict with this fallback, and we wish
+ // to generate a type error in that case. (However, this
+ // actually isn't true right now, because we're only using the
+ // builtin fallback rules. This would be true if we were using
+ // user-supplied fallbacks. But it's still useful to write the
+ // code to detect bugs.)
+ //
+ // (Note though that if we have a general type variable `?T`
+ // that is then unified with an integer type variable `?I`
+ // that ultimately never gets resolved to a special integral
+ // type, `?T` is not considered unsolved, but `?I` is. The
+ // same is true for float variables.)
+ let fallback = match ty.kind() {
+ _ if self.is_tainted_by_errors() => self.tcx.ty_error(),
+ ty::Infer(ty::IntVar(_)) => self.tcx.types.i32,
+ ty::Infer(ty::FloatVar(_)) => self.tcx.types.f64,
+ _ => match diverging_fallback.get(&ty) {
+ Some(&fallback_ty) => fallback_ty,
+ None => return false,
+ },
+ };
+ debug!("fallback_if_possible(ty={:?}): defaulting to `{:?}`", ty, fallback);
+
+ let span = self
+ .infcx
+ .type_var_origin(ty)
+ .map(|origin| origin.span)
+ .unwrap_or(rustc_span::DUMMY_SP);
+ self.demand_eqtype(span, ty, fallback);
+ true
+ }
+
+ /// The "diverging fallback" system is rather complicated. This is
+ /// a result of our need to balance 'do the right thing' with
+ /// backwards compatibility.
+ ///
+ /// "Diverging" type variables are variables created when we
+ /// coerce a `!` type into an unbound type variable `?X`. If they
+ /// never wind up being constrained, the "right and natural" thing
+ /// is that `?X` should "fallback" to `!`. This means that e.g. an
+ /// expression like `Some(return)` will ultimately wind up with a
+ /// type like `Option<!>` (presuming it is not assigned or
+ /// constrained to have some other type).
+ ///
+ /// However, the fallback used to be `()` (before the `!` type was
+ /// added). Moreover, there are cases where the `!` type 'leaks
+ /// out' from dead code into type variables that affect live
+ /// code. The most common case is something like this:
+ ///
+ /// ```rust
+ /// # fn foo() -> i32 { 4 }
+ /// match foo() {
+ /// 22 => Default::default(), // call this type `?D`
+ /// _ => return, // return has type `!`
+ /// } // call the type of this match `?M`
+ /// ```
+ ///
+ /// Here, coercing the type `!` into `?M` will create a diverging
+ /// type variable `?X` where `?X <: ?M`. We also have that `?D <:
+ /// ?M`. If `?M` winds up unconstrained, then `?X` will
+ /// fallback. If it falls back to `!`, then all the type variables
+ /// will wind up equal to `!` -- this includes the type `?D`
+ /// (since `!` doesn't implement `Default`, we wind up a "trait
+ /// not implemented" error in code like this). But since the
+ /// original fallback was `()`, this code used to compile with `?D
+ /// = ()`. This is somewhat surprising, since `Default::default()`
+ /// on its own would give an error because the types are
+ /// insufficiently constrained.
+ ///
+ /// Our solution to this dilemma is to modify diverging variables
+ /// so that they can *either* fallback to `!` (the default) or to
+ /// `()` (the backwards compatibility case). We decide which
+ /// fallback to use based on whether there is a coercion pattern
+ /// like this:
+ ///
+ /// ```ignore (not-rust)
+ /// ?Diverging -> ?V
+ /// ?NonDiverging -> ?V
+ /// ?V != ?NonDiverging
+ /// ```
+ ///
+ /// Here `?Diverging` represents some diverging type variable and
+ /// `?NonDiverging` represents some non-diverging type
+ /// variable. `?V` can be any type variable (diverging or not), so
+ /// long as it is not equal to `?NonDiverging`.
+ ///
+ /// Intuitively, what we are looking for is a case where a
+ /// "non-diverging" type variable (like `?M` in our example above)
+ /// is coerced *into* some variable `?V` that would otherwise
+ /// fallback to `!`. In that case, we make `?V` fallback to `!`,
+ /// along with anything that would flow into `?V`.
+ ///
+ /// The algorithm we use:
+ /// * Identify all variables that are coerced *into* by a
+ /// diverging variable. Do this by iterating over each
+ /// diverging, unsolved variable and finding all variables
+ /// reachable from there. Call that set `D`.
+ /// * Walk over all unsolved, non-diverging variables, and find
+ /// any variable that has an edge into `D`.
+ fn calculate_diverging_fallback(
+ &self,
+ unsolved_variables: &[Ty<'tcx>],
+ ) -> FxHashMap<Ty<'tcx>, Ty<'tcx>> {
+ debug!("calculate_diverging_fallback({:?})", unsolved_variables);
+
+ let relationships = self.fulfillment_cx.borrow_mut().relationships().clone();
+
+ // Construct a coercion graph where an edge `A -> B` indicates
+ // a type variable is that is coerced
+ let coercion_graph = self.create_coercion_graph();
+
+ // Extract the unsolved type inference variable vids; note that some
+ // unsolved variables are integer/float variables and are excluded.
+ let unsolved_vids = unsolved_variables.iter().filter_map(|ty| ty.ty_vid());
+
+ // Compute the diverging root vids D -- that is, the root vid of
+ // those type variables that (a) are the target of a coercion from
+ // a `!` type and (b) have not yet been solved.
+ //
+ // These variables are the ones that are targets for fallback to
+ // either `!` or `()`.
+ let diverging_roots: FxHashSet<ty::TyVid> = self
+ .diverging_type_vars
+ .borrow()
+ .iter()
+ .map(|&ty| self.shallow_resolve(ty))
+ .filter_map(|ty| ty.ty_vid())
+ .map(|vid| self.root_var(vid))
+ .collect();
+ debug!(
+ "calculate_diverging_fallback: diverging_type_vars={:?}",
+ self.diverging_type_vars.borrow()
+ );
+ debug!("calculate_diverging_fallback: diverging_roots={:?}", diverging_roots);
+
+ // Find all type variables that are reachable from a diverging
+ // type variable. These will typically default to `!`, unless
+ // we find later that they are *also* reachable from some
+ // other type variable outside this set.
+ let mut roots_reachable_from_diverging = DepthFirstSearch::new(&coercion_graph);
+ let mut diverging_vids = vec![];
+ let mut non_diverging_vids = vec![];
+ for unsolved_vid in unsolved_vids {
+ let root_vid = self.root_var(unsolved_vid);
+ debug!(
+ "calculate_diverging_fallback: unsolved_vid={:?} root_vid={:?} diverges={:?}",
+ unsolved_vid,
+ root_vid,
+ diverging_roots.contains(&root_vid),
+ );
+ if diverging_roots.contains(&root_vid) {
+ diverging_vids.push(unsolved_vid);
+ roots_reachable_from_diverging.push_start_node(root_vid);
+
+ debug!(
+ "calculate_diverging_fallback: root_vid={:?} reaches {:?}",
+ root_vid,
+ coercion_graph.depth_first_search(root_vid).collect::<Vec<_>>()
+ );
+
+ // drain the iterator to visit all nodes reachable from this node
+ roots_reachable_from_diverging.complete_search();
+ } else {
+ non_diverging_vids.push(unsolved_vid);
+ }
+ }
+
+ debug!(
+ "calculate_diverging_fallback: roots_reachable_from_diverging={:?}",
+ roots_reachable_from_diverging,
+ );
+
+ // Find all type variables N0 that are not reachable from a
+ // diverging variable, and then compute the set reachable from
+ // N0, which we call N. These are the *non-diverging* type
+ // variables. (Note that this set consists of "root variables".)
+ let mut roots_reachable_from_non_diverging = DepthFirstSearch::new(&coercion_graph);
+ for &non_diverging_vid in &non_diverging_vids {
+ let root_vid = self.root_var(non_diverging_vid);
+ if roots_reachable_from_diverging.visited(root_vid) {
+ continue;
+ }
+ roots_reachable_from_non_diverging.push_start_node(root_vid);
+ roots_reachable_from_non_diverging.complete_search();
+ }
+ debug!(
+ "calculate_diverging_fallback: roots_reachable_from_non_diverging={:?}",
+ roots_reachable_from_non_diverging,
+ );
+
+ debug!("inherited: {:#?}", self.inh.fulfillment_cx.borrow_mut().pending_obligations());
+ debug!("obligations: {:#?}", self.fulfillment_cx.borrow_mut().pending_obligations());
+ debug!("relationships: {:#?}", relationships);
+
+ // For each diverging variable, figure out whether it can
+ // reach a member of N. If so, it falls back to `()`. Else
+ // `!`.
+ let mut diverging_fallback = FxHashMap::default();
+ diverging_fallback.reserve(diverging_vids.len());
+ for &diverging_vid in &diverging_vids {
+ let diverging_ty = self.tcx.mk_ty_var(diverging_vid);
+ let root_vid = self.root_var(diverging_vid);
+ let can_reach_non_diverging = coercion_graph
+ .depth_first_search(root_vid)
+ .any(|n| roots_reachable_from_non_diverging.visited(n));
+
+ let mut relationship = ty::FoundRelationships { self_in_trait: false, output: false };
+
+ for (vid, rel) in relationships.iter() {
+ if self.root_var(*vid) == root_vid {
+ relationship.self_in_trait |= rel.self_in_trait;
+ relationship.output |= rel.output;
+ }
+ }
+
+ if relationship.self_in_trait && relationship.output {
+ // This case falls back to () to ensure that the code pattern in
+ // src/test/ui/never_type/fallback-closure-ret.rs continues to
+ // compile when never_type_fallback is enabled.
+ //
+ // This rule is not readily explainable from first principles,
+ // but is rather intended as a patchwork fix to ensure code
+ // which compiles before the stabilization of never type
+ // fallback continues to work.
+ //
+ // Typically this pattern is encountered in a function taking a
+ // closure as a parameter, where the return type of that closure
+ // (checked by `relationship.output`) is expected to implement
+ // some trait (checked by `relationship.self_in_trait`). This
+ // can come up in non-closure cases too, so we do not limit this
+ // rule to specifically `FnOnce`.
+ //
+ // When the closure's body is something like `panic!()`, the
+ // return type would normally be inferred to `!`. However, it
+ // needs to fall back to `()` in order to still compile, as the
+ // trait is specifically implemented for `()` but not `!`.
+ //
+ // For details on the requirements for these relationships to be
+ // set, see the relationship finding module in
+ // compiler/rustc_trait_selection/src/traits/relationships.rs.
+ debug!("fallback to () - found trait and projection: {:?}", diverging_vid);
+ diverging_fallback.insert(diverging_ty, self.tcx.types.unit);
+ } else if can_reach_non_diverging {
+ debug!("fallback to () - reached non-diverging: {:?}", diverging_vid);
+ diverging_fallback.insert(diverging_ty, self.tcx.types.unit);
+ } else {
+ debug!("fallback to ! - all diverging: {:?}", diverging_vid);
+ diverging_fallback.insert(diverging_ty, self.tcx.mk_diverging_default());
+ }
+ }
+
+ diverging_fallback
+ }
+
+ /// Returns a graph whose nodes are (unresolved) inference variables and where
+ /// an edge `?A -> ?B` indicates that the variable `?A` is coerced to `?B`.
+ fn create_coercion_graph(&self) -> VecGraph<ty::TyVid> {
+ let pending_obligations = self.fulfillment_cx.borrow_mut().pending_obligations();
+ debug!("create_coercion_graph: pending_obligations={:?}", pending_obligations);
+ let coercion_edges: Vec<(ty::TyVid, ty::TyVid)> = pending_obligations
+ .into_iter()
+ .filter_map(|obligation| {
+ // The predicates we are looking for look like `Coerce(?A -> ?B)`.
+ // They will have no bound variables.
+ obligation.predicate.kind().no_bound_vars()
+ })
+ .filter_map(|atom| {
+ // We consider both subtyping and coercion to imply 'flow' from
+ // some position in the code `a` to a different position `b`.
+ // This is then used to determine which variables interact with
+ // live code, and as such must fall back to `()` to preserve
+ // soundness.
+ //
+ // In practice currently the two ways that this happens is
+ // coercion and subtyping.
+ let (a, b) = if let ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) = atom {
+ (a, b)
+ } else if let ty::PredicateKind::Subtype(ty::SubtypePredicate {
+ a_is_expected: _,
+ a,
+ b,
+ }) = atom
+ {
+ (a, b)
+ } else {
+ return None;
+ };
+
+ let a_vid = self.root_vid(a)?;
+ let b_vid = self.root_vid(b)?;
+ Some((a_vid, b_vid))
+ })
+ .collect();
+ debug!("create_coercion_graph: coercion_edges={:?}", coercion_edges);
+ let num_ty_vars = self.num_ty_vars();
+ VecGraph::new(num_ty_vars, coercion_edges)
+ }
+
+ /// If `ty` is an unresolved type variable, returns its root vid.
+ fn root_vid(&self, ty: Ty<'tcx>) -> Option<ty::TyVid> {
+ Some(self.root_var(self.shallow_resolve(ty).ty_vid()?))
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
new file mode 100644
index 000000000..6a1cffe3e
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
@@ -0,0 +1,1540 @@
+use crate::callee::{self, DeferredCallResolution};
+use crate::method::{self, MethodCallee, SelfSource};
+use crate::rvalue_scopes;
+use crate::{BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, Diagnostic, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ExprKind, GenericArg, Node, QPath};
+use rustc_hir_analysis::astconv::{
+ AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
+ GenericArgCountResult, IsMethodCall, PathSeg,
+};
+use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
+use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
+use rustc_infer::infer::{InferOk, InferResult};
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{
+ self, AdtKind, CanonicalUserType, DefIdTree, EarlyBinder, GenericParamDefKind, ToPolyTraitRef,
+ ToPredicate, Ty, UserType,
+};
+use rustc_middle::ty::{GenericArgKind, InternalSubsts, SubstsRef, UserSelfTy, UserSubsts};
+use rustc_session::lint;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt,
+};
+
+use std::collections::hash_map::Entry;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Produces warning on the given node, if the current point in the
+ /// function is unreachable, and there hasn't been another warning.
+ pub(in super::super) fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) {
+ // FIXME: Combine these two 'if' expressions into one once
+ // let chains are implemented
+ if let Diverges::Always { span: orig_span, custom_note } = self.diverges.get() {
+ // If span arose from a desugaring of `if` or `while`, then it is the condition itself,
+ // which diverges, that we are about to lint on. This gives suboptimal diagnostics.
+ // Instead, stop here so that the `if`- or `while`-expression's block is linted instead.
+ if !span.is_desugaring(DesugaringKind::CondTemporary)
+ && !span.is_desugaring(DesugaringKind::Async)
+ && !orig_span.is_desugaring(DesugaringKind::Await)
+ {
+ self.diverges.set(Diverges::WarnedAlways);
+
+ debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
+
+ let msg = format!("unreachable {}", kind);
+ self.tcx().struct_span_lint_hir(
+ lint::builtin::UNREACHABLE_CODE,
+ id,
+ span,
+ &msg,
+ |lint| {
+ lint.span_label(span, &msg).span_label(
+ orig_span,
+ custom_note
+ .unwrap_or("any code following this expression is unreachable"),
+ )
+ },
+ )
+ }
+ }
+ }
+
+ /// Resolves type and const variables in `ty` if possible. Unlike the infcx
+ /// version (resolve_vars_if_possible), this version will
+ /// also select obligations if it seems useful, in an effort
+ /// to get more type information.
+ pub(in super::super) fn resolve_vars_with_obligations(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.resolve_vars_with_obligations_and_mutate_fulfillment(ty, |_| {})
+ }
+
+ #[instrument(skip(self, mutate_fulfillment_errors), level = "debug", ret)]
+ pub(in super::super) fn resolve_vars_with_obligations_and_mutate_fulfillment(
+ &self,
+ mut ty: Ty<'tcx>,
+ mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
+ ) -> Ty<'tcx> {
+ // No Infer()? Nothing needs doing.
+ if !ty.has_non_region_infer() {
+ debug!("no inference var, nothing needs doing");
+ return ty;
+ }
+
+ // If `ty` is a type variable, see whether we already know what it is.
+ ty = self.resolve_vars_if_possible(ty);
+ if !ty.has_non_region_infer() {
+ debug!(?ty);
+ return ty;
+ }
+
+ // If not, try resolving pending obligations as much as
+ // possible. This can help substantially when there are
+ // indirect dependencies that don't seem worth tracking
+ // precisely.
+ self.select_obligations_where_possible(false, mutate_fulfillment_errors);
+ self.resolve_vars_if_possible(ty)
+ }
+
+ pub(in super::super) fn record_deferred_call_resolution(
+ &self,
+ closure_def_id: LocalDefId,
+ r: DeferredCallResolution<'tcx>,
+ ) {
+ let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
+ deferred_call_resolutions.entry(closure_def_id).or_default().push(r);
+ }
+
+ pub(in super::super) fn remove_deferred_call_resolutions(
+ &self,
+ closure_def_id: LocalDefId,
+ ) -> Vec<DeferredCallResolution<'tcx>> {
+ let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
+ deferred_call_resolutions.remove(&closure_def_id).unwrap_or_default()
+ }
+
+ pub fn tag(&self) -> String {
+ format!("{:p}", self)
+ }
+
+ pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> {
+ self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| {
+ span_bug!(span, "no type for local variable {}", self.tcx.hir().node_to_string(nid))
+ })
+ }
+
+ #[inline]
+ pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) {
+ debug!("write_ty({:?}, {:?}) in fcx {}", id, self.resolve_vars_if_possible(ty), self.tag());
+ self.typeck_results.borrow_mut().node_types_mut().insert(id, ty);
+
+ if ty.references_error() {
+ self.has_errors.set(true);
+ self.set_tainted_by_errors();
+ }
+ }
+
+ pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) {
+ self.typeck_results.borrow_mut().field_indices_mut().insert(hir_id, index);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub(in super::super) fn write_resolution(
+ &self,
+ hir_id: hir::HirId,
+ r: Result<(DefKind, DefId), ErrorGuaranteed>,
+ ) {
+ self.typeck_results.borrow_mut().type_dependent_defs_mut().insert(hir_id, r);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) {
+ self.write_resolution(hir_id, Ok((DefKind::AssocFn, method.def_id)));
+ self.write_substs(hir_id, method.substs);
+
+ // When the method is confirmed, the `method.substs` includes
+ // parameters from not just the method, but also the impl of
+ // the method -- in particular, the `Self` type will be fully
+ // resolved. However, those are not something that the "user
+ // specified" -- i.e., those types come from the inferred type
+ // of the receiver, not something the user wrote. So when we
+ // create the user-substs, we want to replace those earlier
+ // types with just the types that the user actually wrote --
+ // that is, those that appear on the *method itself*.
+ //
+ // As an example, if the user wrote something like
+ // `foo.bar::<u32>(...)` -- the `Self` type here will be the
+ // type of `foo` (possibly adjusted), but we don't want to
+ // include that. We want just the `[_, u32]` part.
+ if !method.substs.is_empty() {
+ let method_generics = self.tcx.generics_of(method.def_id);
+ if !method_generics.params.is_empty() {
+ let user_type_annotation = self.probe(|_| {
+ let user_substs = UserSubsts {
+ substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| {
+ let i = param.index as usize;
+ if i < method_generics.parent_count {
+ self.var_for_def(DUMMY_SP, param)
+ } else {
+ method.substs[i]
+ }
+ }),
+ user_self_ty: None, // not relevant here
+ };
+
+ self.canonicalize_user_type_annotation(UserType::TypeOf(
+ method.def_id,
+ user_substs,
+ ))
+ });
+
+ debug!("write_method_call: user_type_annotation={:?}", user_type_annotation);
+ self.write_user_type_annotation(hir_id, user_type_annotation);
+ }
+ }
+ }
+
+ pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) {
+ if !substs.is_empty() {
+ debug!("write_substs({:?}, {:?}) in fcx {}", node_id, substs, self.tag());
+
+ self.typeck_results.borrow_mut().node_substs_mut().insert(node_id, substs);
+ }
+ }
+
+ /// Given the substs that we just converted from the HIR, try to
+ /// canonicalize them and store them as user-given substitutions
+ /// (i.e., substitutions that must be respected by the NLL check).
+ ///
+ /// This should be invoked **before any unifications have
+ /// occurred**, so that annotations like `Vec<_>` are preserved
+ /// properly.
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_user_type_annotation_from_substs(
+ &self,
+ hir_id: hir::HirId,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ user_self_ty: Option<UserSelfTy<'tcx>>,
+ ) {
+ debug!("fcx {}", self.tag());
+
+ if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) {
+ let canonicalized = self.canonicalize_user_type_annotation(UserType::TypeOf(
+ def_id,
+ UserSubsts { substs, user_self_ty },
+ ));
+ debug!(?canonicalized);
+ self.write_user_type_annotation(hir_id, canonicalized);
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_user_type_annotation(
+ &self,
+ hir_id: hir::HirId,
+ canonical_user_type_annotation: CanonicalUserType<'tcx>,
+ ) {
+ debug!("fcx {}", self.tag());
+
+ if !canonical_user_type_annotation.is_identity() {
+ self.typeck_results
+ .borrow_mut()
+ .user_provided_types_mut()
+ .insert(hir_id, canonical_user_type_annotation);
+ } else {
+ debug!("skipping identity substs");
+ }
+ }
+
+ #[instrument(skip(self, expr), level = "debug")]
+ pub fn apply_adjustments(&self, expr: &hir::Expr<'_>, adj: Vec<Adjustment<'tcx>>) {
+ debug!("expr = {:#?}", expr);
+
+ if adj.is_empty() {
+ return;
+ }
+
+ for a in &adj {
+ if let Adjust::NeverToAny = a.kind {
+ if a.target.is_ty_var() {
+ self.diverging_type_vars.borrow_mut().insert(a.target);
+ debug!("apply_adjustments: adding `{:?}` as diverging type var", a.target);
+ }
+ }
+ }
+
+ let autoborrow_mut = adj.iter().any(|adj| {
+ matches!(
+ adj,
+ &Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(_, AutoBorrowMutability::Mut { .. })),
+ ..
+ }
+ )
+ });
+
+ match self.typeck_results.borrow_mut().adjustments_mut().entry(expr.hir_id) {
+ Entry::Vacant(entry) => {
+ entry.insert(adj);
+ }
+ Entry::Occupied(mut entry) => {
+ debug!(" - composing on top of {:?}", entry.get());
+ match (&entry.get()[..], &adj[..]) {
+ // Applying any adjustment on top of a NeverToAny
+ // is a valid NeverToAny adjustment, because it can't
+ // be reached.
+ (&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return,
+ (
+ &[
+ Adjustment { kind: Adjust::Deref(_), .. },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
+ ],
+ &[
+ Adjustment { kind: Adjust::Deref(_), .. },
+ .., // Any following adjustments are allowed.
+ ],
+ ) => {
+ // A reborrow has no effect before a dereference.
+ }
+ // FIXME: currently we never try to compose autoderefs
+ // and ReifyFnPointer/UnsafeFnPointer, but we could.
+ _ => {
+ self.tcx.sess.delay_span_bug(
+ expr.span,
+ &format!(
+ "while adjusting {:?}, can't compose {:?} and {:?}",
+ expr,
+ entry.get(),
+ adj
+ ),
+ );
+ }
+ }
+ *entry.get_mut() = adj;
+ }
+ }
+
+ // If there is an mutable auto-borrow, it is equivalent to `&mut <expr>`.
+ // In this case implicit use of `Deref` and `Index` within `<expr>` should
+ // instead be `DerefMut` and `IndexMut`, so fix those up.
+ if autoborrow_mut {
+ self.convert_place_derefs_to_mutable(expr);
+ }
+ }
+
+ /// Basically whenever we are converting from a type scheme into
+ /// the fn body space, we always want to normalize associated
+ /// types as well. This function combines the two.
+ fn instantiate_type_scheme<T>(&self, span: Span, substs: SubstsRef<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!("instantiate_type_scheme(value={:?}, substs={:?})", value, substs);
+ let value = EarlyBinder(value).subst(self.tcx, substs);
+ let result = self.normalize_associated_types_in(span, value);
+ debug!("instantiate_type_scheme = {:?}", result);
+ result
+ }
+
+ /// As `instantiate_type_scheme`, but for the bounds found in a
+ /// generic type scheme.
+ pub(in super::super) fn instantiate_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> (ty::InstantiatedPredicates<'tcx>, Vec<Span>) {
+ let bounds = self.tcx.predicates_of(def_id);
+ let spans: Vec<Span> = bounds.predicates.iter().map(|(_, span)| *span).collect();
+ let result = bounds.instantiate(self.tcx, substs);
+ let result = self.normalize_associated_types_in(span, result);
+ debug!(
+ "instantiate_bounds(bounds={:?}, substs={:?}) = {:?}, {:?}",
+ bounds, substs, result, spans,
+ );
+ (result, spans)
+ }
+
+ pub(in super::super) fn normalize_associated_types_in<T>(&self, span: Span, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value)
+ }
+
+ pub(in super::super) fn normalize_associated_types_in_as_infer_ok<T>(
+ &self,
+ span: Span,
+ value: T,
+ ) -> InferOk<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.inh.partially_normalize_associated_types_in(
+ ObligationCause::misc(span, self.body_id),
+ self.param_env,
+ value,
+ )
+ }
+
+ pub(in super::super) fn normalize_op_associated_types_in_as_infer_ok<T>(
+ &self,
+ span: Span,
+ value: T,
+ opt_input_expr: Option<&hir::Expr<'_>>,
+ ) -> InferOk<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.inh.partially_normalize_associated_types_in(
+ ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, ExprKind::Lit(_))),
+ output_ty: None,
+ },
+ ),
+ self.param_env,
+ value,
+ )
+ }
+
+ pub fn require_type_meets(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ def_id: DefId,
+ ) {
+ self.register_bound(ty, def_id, traits::ObligationCause::new(span, self.body_id, code));
+ }
+
+ pub fn require_type_is_sized(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ ) {
+ if !ty.references_error() {
+ let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
+ self.require_type_meets(ty, span, code, lang_item);
+ }
+ }
+
+ pub fn require_type_is_sized_deferred(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ ) {
+ if !ty.references_error() {
+ self.deferred_sized_obligations.borrow_mut().push((ty, span, code));
+ }
+ }
+
+ pub fn register_bound(
+ &self,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ cause: traits::ObligationCause<'tcx>,
+ ) {
+ if !ty.references_error() {
+ self.fulfillment_cx.borrow_mut().register_bound(
+ self,
+ self.param_env,
+ ty,
+ def_id,
+ cause,
+ );
+ }
+ }
+
+ pub fn to_ty(&self, ast_t: &hir::Ty<'_>) -> Ty<'tcx> {
+ let t = <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_t);
+ self.register_wf_obligation(t.into(), ast_t.span, traits::WellFormed(None));
+ t
+ }
+
+ pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ let ty = self.to_ty(ast_ty);
+ debug!("to_ty_saving_user_provided_ty: ty={:?}", ty);
+
+ if Self::can_contain_user_lifetime_bounds(ty) {
+ let c_ty = self.canonicalize_response(UserType::Ty(ty));
+ debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty);
+ self.typeck_results.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty);
+ }
+
+ ty
+ }
+
+ pub fn array_length_to_const(&self, length: &hir::ArrayLen) -> ty::Const<'tcx> {
+ match length {
+ &hir::ArrayLen::Infer(_, span) => self.ct_infer(self.tcx.types.usize, None, span),
+ hir::ArrayLen::Body(anon_const) => {
+ let const_def_id = self.tcx.hir().local_def_id(anon_const.hir_id);
+ let span = self.tcx.hir().span(anon_const.hir_id);
+ let c = ty::Const::from_anon_const(self.tcx, const_def_id);
+ self.register_wf_obligation(c.into(), span, ObligationCauseCode::WellFormed(None));
+ self.normalize_associated_types_in(span, c)
+ }
+ }
+ }
+
+ pub fn const_arg_to_const(
+ &self,
+ ast_c: &hir::AnonConst,
+ param_def_id: DefId,
+ ) -> ty::Const<'tcx> {
+ let const_def = ty::WithOptConstParam {
+ did: self.tcx.hir().local_def_id(ast_c.hir_id),
+ const_param_did: Some(param_def_id),
+ };
+ let c = ty::Const::from_opt_const_arg_anon_const(self.tcx, const_def);
+ self.register_wf_obligation(
+ c.into(),
+ self.tcx.hir().span(ast_c.hir_id),
+ ObligationCauseCode::WellFormed(None),
+ );
+ c
+ }
+
+ // If the type given by the user has free regions, save it for later, since
+ // NLL would like to enforce those. Also pass in types that involve
+ // projections, since those can resolve to `'static` bounds (modulo #54940,
+ // which hopefully will be fixed by the time you see this comment, dear
+ // reader, although I have my doubts). Also pass in types with inference
+ // types, because they may be repeated. Other sorts of things are already
+ // sufficiently enforced with erased regions. =)
+ fn can_contain_user_lifetime_bounds<T>(t: T) -> bool
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ t.has_free_regions() || t.has_projections() || t.has_infer_types()
+ }
+
+ pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
+ match self.typeck_results.borrow().node_types().get(id) {
+ Some(&t) => t,
+ None if self.is_tainted_by_errors() => self.tcx.ty_error(),
+ None => {
+ bug!(
+ "no type for node {}: {} in fcx {}",
+ id,
+ self.tcx.hir().node_to_string(id),
+ self.tag()
+ );
+ }
+ }
+ }
+
+ pub fn node_ty_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
+ match self.typeck_results.borrow().node_types().get(id) {
+ Some(&t) => Some(t),
+ None if self.is_tainted_by_errors() => Some(self.tcx.ty_error()),
+ None => None,
+ }
+ }
+
+ /// Registers an obligation for checking later, during regionck, that `arg` is well-formed.
+ pub fn register_wf_obligation(
+ &self,
+ arg: ty::GenericArg<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ ) {
+ // WF obligations never themselves fail, so no real need to give a detailed cause:
+ let cause = traits::ObligationCause::new(span, self.body_id, code);
+ self.register_predicate(traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx),
+ ));
+ }
+
+ /// Registers obligations that all `substs` are well-formed.
+ pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr<'_>) {
+ for arg in substs.iter().filter(|arg| {
+ matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
+ }) {
+ self.register_wf_obligation(arg, expr.span, traits::WellFormed(None));
+ }
+ }
+
+ // FIXME(arielb1): use this instead of field.ty everywhere
+ // Only for fields! Returns <none> for methods>
+ // Indifferent to privacy flags
+ pub fn field_ty(
+ &self,
+ span: Span,
+ field: &'tcx ty::FieldDef,
+ substs: SubstsRef<'tcx>,
+ ) -> Ty<'tcx> {
+ self.normalize_associated_types_in(span, field.ty(self.tcx, substs))
+ }
+
+ pub(in super::super) fn resolve_rvalue_scopes(&self, def_id: DefId) {
+ let scope_tree = self.tcx.region_scope_tree(def_id);
+ let rvalue_scopes = { rvalue_scopes::resolve_rvalue_scopes(self, &scope_tree, def_id) };
+ let mut typeck_results = self.inh.typeck_results.borrow_mut();
+ typeck_results.rvalue_scopes = rvalue_scopes;
+ }
+
+ pub(in super::super) fn resolve_generator_interiors(&self, def_id: DefId) {
+ let mut generators = self.deferred_generator_interiors.borrow_mut();
+ for (body_id, interior, kind) in generators.drain(..) {
+ self.select_obligations_where_possible(false, |_| {});
+ crate::generator_interior::resolve_interior(self, def_id, body_id, interior, kind);
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(in super::super) fn select_all_obligations_or_error(&self) {
+ let mut errors = self.fulfillment_cx.borrow_mut().select_all_or_error(&self);
+
+ if !errors.is_empty() {
+ self.adjust_fulfillment_errors_for_expr_obligation(&mut errors);
+ self.err_ctxt().report_fulfillment_errors(&errors, self.inh.body_id, false);
+ }
+ }
+
+ /// Select as many obligations as we can at present.
+ pub(in super::super) fn select_obligations_where_possible(
+ &self,
+ fallback_has_occurred: bool,
+ mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
+ ) {
+ let mut result = self.fulfillment_cx.borrow_mut().select_where_possible(self);
+ if !result.is_empty() {
+ mutate_fulfillment_errors(&mut result);
+ self.adjust_fulfillment_errors_for_expr_obligation(&mut result);
+ self.err_ctxt().report_fulfillment_errors(
+ &result,
+ self.inh.body_id,
+ fallback_has_occurred,
+ );
+ }
+ }
+
+ /// For the overloaded place expressions (`*x`, `x[3]`), the trait
+ /// returns a type of `&T`, but the actual type we assign to the
+ /// *expression* is `T`. So this function just peels off the return
+ /// type by one layer to yield `T`.
+ pub(in super::super) fn make_overloaded_place_return_type(
+ &self,
+ method: MethodCallee<'tcx>,
+ ) -> ty::TypeAndMut<'tcx> {
+ // extract method return type, which will be &T;
+ let ret_ty = method.sig.output();
+
+ // method returns &T, but the type as visible to user is T, so deref
+ ret_ty.builtin_deref(true).unwrap()
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn self_type_matches_expected_vid(
+ &self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ expected_vid: ty::TyVid,
+ ) -> bool {
+ let self_ty = self.shallow_resolve(trait_ref.skip_binder().self_ty());
+ debug!(?self_ty);
+
+ match *self_ty.kind() {
+ ty::Infer(ty::TyVar(found_vid)) => {
+ // FIXME: consider using `sub_root_var` here so we
+ // can see through subtyping.
+ let found_vid = self.root_var(found_vid);
+ debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid);
+ expected_vid == found_vid
+ }
+ _ => false,
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(in super::super) fn obligations_for_self_ty<'b>(
+ &'b self,
+ self_ty: ty::TyVid,
+ ) -> impl Iterator<Item = (ty::PolyTraitRef<'tcx>, traits::PredicateObligation<'tcx>)>
+ + Captures<'tcx>
+ + 'b {
+ // FIXME: consider using `sub_root_var` here so we
+ // can see through subtyping.
+ let ty_var_root = self.root_var(self_ty);
+ trace!("pending_obligations = {:#?}", self.fulfillment_cx.borrow().pending_obligations());
+
+ self.fulfillment_cx
+ .borrow()
+ .pending_obligations()
+ .into_iter()
+ .filter_map(move |obligation| {
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Projection(data) => Some((
+ bound_predicate.rebind(data).required_poly_trait_ref(self.tcx),
+ obligation,
+ )),
+ ty::PredicateKind::Trait(data) => {
+ Some((bound_predicate.rebind(data).to_poly_trait_ref(), obligation))
+ }
+ ty::PredicateKind::Subtype(..) => None,
+ ty::PredicateKind::Coerce(..) => None,
+ ty::PredicateKind::RegionOutlives(..) => None,
+ ty::PredicateKind::TypeOutlives(..) => None,
+ ty::PredicateKind::WellFormed(..) => None,
+ ty::PredicateKind::ObjectSafe(..) => None,
+ ty::PredicateKind::ConstEvaluatable(..) => None,
+ ty::PredicateKind::ConstEquate(..) => None,
+ // N.B., this predicate is created by breaking down a
+ // `ClosureType: FnFoo()` predicate, where
+ // `ClosureType` represents some `Closure`. It can't
+ // possibly be referring to the current closure,
+ // because we haven't produced the `Closure` for
+ // this closure yet; this is exactly why the other
+ // code is looking for a self type of an unresolved
+ // inference variable.
+ ty::PredicateKind::ClosureKind(..) => None,
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ })
+ .filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root))
+ }
+
+ pub(in super::super) fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool {
+ self.obligations_for_self_ty(self_ty)
+ .any(|(tr, _)| Some(tr.def_id()) == self.tcx.lang_items().sized_trait())
+ }
+
+ pub(in super::super) fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
+ vec![self.tcx.ty_error(); len]
+ }
+
+ /// Unifies the output type with the expected type early, for more coercions
+ /// and forward type information on the input expressions.
+ #[instrument(skip(self, call_span), level = "debug")]
+ pub(in super::super) fn expected_inputs_for_expected_output(
+ &self,
+ call_span: Span,
+ expected_ret: Expectation<'tcx>,
+ formal_ret: Ty<'tcx>,
+ formal_args: &[Ty<'tcx>],
+ ) -> Option<Vec<Ty<'tcx>>> {
+ let formal_ret = self.resolve_vars_with_obligations(formal_ret);
+ let ret_ty = expected_ret.only_has_type(self)?;
+
+ // HACK(oli-obk): This is a hack to keep RPIT and TAIT in sync wrt their behaviour.
+ // Without it, the inference
+ // variable will get instantiated with the opaque type. The inference variable often
+ // has various helpful obligations registered for it that help closures figure out their
+ // signature. If we infer the inference var to the opaque type, the closure won't be able
+ // to find those obligations anymore, and it can't necessarily find them from the opaque
+ // type itself. We could be more powerful with inference if we *combined* the obligations
+ // so that we got both the obligations from the opaque type and the ones from the inference
+ // variable. That will accept more code than we do right now, so we need to carefully consider
+ // the implications.
+ // Note: this check is pessimistic, as the inference type could be matched with something other
+ // than the opaque type, but then we need a new `TypeRelation` just for this specific case and
+ // can't re-use `sup` below.
+ // See src/test/ui/impl-trait/hidden-type-is-opaque.rs and
+ // src/test/ui/impl-trait/hidden-type-is-opaque-2.rs for examples that hit this path.
+ if formal_ret.has_infer_types() {
+ for ty in ret_ty.walk() {
+ if let ty::subst::GenericArgKind::Type(ty) = ty.unpack()
+ && let ty::Opaque(def_id, _) = *ty.kind()
+ && let Some(def_id) = def_id.as_local()
+ && self.opaque_type_origin(def_id, DUMMY_SP).is_some() {
+ return None;
+ }
+ }
+ }
+
+ let expect_args = self
+ .fudge_inference_if_ok(|| {
+ // Attempt to apply a subtyping relationship between the formal
+ // return type (likely containing type variables if the function
+ // is polymorphic) and the expected return type.
+ // No argument expectations are produced if unification fails.
+ let origin = self.misc(call_span);
+ let ures = self.at(&origin, self.param_env).sup(ret_ty, formal_ret);
+
+ // FIXME(#27336) can't use ? here, Try::from_error doesn't default
+ // to identity so the resulting type is not constrained.
+ match ures {
+ Ok(ok) => {
+ // Process any obligations locally as much as
+ // we can. We don't care if some things turn
+ // out unconstrained or ambiguous, as we're
+ // just trying to get hints here.
+ let errors = self.save_and_restore_in_snapshot_flag(|_| {
+ let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
+ for obligation in ok.obligations {
+ fulfill.register_predicate_obligation(self, obligation);
+ }
+ fulfill.select_where_possible(self)
+ });
+
+ if !errors.is_empty() {
+ return Err(());
+ }
+ }
+ Err(_) => return Err(()),
+ }
+
+ // Record all the argument types, with the substitutions
+ // produced from the above subtyping unification.
+ Ok(Some(formal_args.iter().map(|&ty| self.resolve_vars_if_possible(ty)).collect()))
+ })
+ .unwrap_or_default();
+ debug!(?formal_args, ?formal_ret, ?expect_args, ?expected_ret);
+ expect_args
+ }
+
+ pub(in super::super) fn resolve_lang_item_path(
+ &self,
+ lang_item: hir::LangItem,
+ span: Span,
+ hir_id: hir::HirId,
+ expr_hir_id: Option<hir::HirId>,
+ ) -> (Res, Ty<'tcx>) {
+ let def_id = self.tcx.require_lang_item(lang_item, Some(span));
+ let def_kind = self.tcx.def_kind(def_id);
+
+ let item_ty = if let DefKind::Variant = def_kind {
+ self.tcx.bound_type_of(self.tcx.parent(def_id))
+ } else {
+ self.tcx.bound_type_of(def_id)
+ };
+ let substs = self.fresh_substs_for_item(span, def_id);
+ let ty = item_ty.subst(self.tcx, substs);
+
+ self.write_resolution(hir_id, Ok((def_kind, def_id)));
+
+ let code = match lang_item {
+ hir::LangItem::IntoFutureIntoFuture => {
+ Some(ObligationCauseCode::AwaitableExpr(expr_hir_id))
+ }
+ hir::LangItem::IteratorNext | hir::LangItem::IntoIterIntoIter => {
+ Some(ObligationCauseCode::ForLoopIterator)
+ }
+ hir::LangItem::TryTraitFromOutput
+ | hir::LangItem::TryTraitFromResidual
+ | hir::LangItem::TryTraitBranch => Some(ObligationCauseCode::QuestionMark),
+ _ => None,
+ };
+ if let Some(code) = code {
+ self.add_required_obligations_with_code(span, def_id, substs, move |_, _| code.clone());
+ } else {
+ self.add_required_obligations_for_hir(span, def_id, substs, hir_id);
+ }
+
+ (Res::Def(def_kind, def_id), ty)
+ }
+
+ /// Resolves an associated value path into a base type and associated constant, or method
+ /// resolution. The newly resolved definition is written into `type_dependent_defs`.
+ pub fn resolve_ty_and_res_fully_qualified_call(
+ &self,
+ qpath: &'tcx QPath<'tcx>,
+ hir_id: hir::HirId,
+ span: Span,
+ ) -> (Res, Option<Ty<'tcx>>, &'tcx [hir::PathSegment<'tcx>]) {
+ debug!(
+ "resolve_ty_and_res_fully_qualified_call: qpath={:?} hir_id={:?} span={:?}",
+ qpath, hir_id, span
+ );
+ let (ty, qself, item_segment) = match *qpath {
+ QPath::Resolved(ref opt_qself, ref path) => {
+ return (
+ path.res,
+ opt_qself.as_ref().map(|qself| self.to_ty(qself)),
+ path.segments,
+ );
+ }
+ QPath::TypeRelative(ref qself, ref segment) => {
+ // Don't use `self.to_ty`, since this will register a WF obligation.
+ // If we're trying to call a non-existent method on a trait
+ // (e.g. `MyTrait::missing_method`), then resolution will
+ // give us a `QPath::TypeRelative` with a trait object as
+ // `qself`. In that case, we want to avoid registering a WF obligation
+ // for `dyn MyTrait`, since we don't actually need the trait
+ // to be object-safe.
+ // We manually call `register_wf_obligation` in the success path
+ // below.
+ (<dyn AstConv<'_>>::ast_ty_to_ty_in_path(self, qself), qself, segment)
+ }
+ QPath::LangItem(..) => {
+ bug!("`resolve_ty_and_res_fully_qualified_call` called on `LangItem`")
+ }
+ };
+ if let Some(&cached_result) = self.typeck_results.borrow().type_dependent_defs().get(hir_id)
+ {
+ self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None));
+ // Return directly on cache hit. This is useful to avoid doubly reporting
+ // errors with default match binding modes. See #44614.
+ let def = cached_result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id));
+ return (def, Some(ty), slice::from_ref(&**item_segment));
+ }
+ let item_name = item_segment.ident;
+ let result = self
+ .resolve_fully_qualified_call(span, item_name, ty, qself.span, hir_id)
+ .or_else(|error| {
+ let result = match error {
+ method::MethodError::PrivateMatch(kind, def_id, _) => Ok((kind, def_id)),
+ _ => Err(ErrorGuaranteed::unchecked_claim_error_was_emitted()),
+ };
+
+ // If we have a path like `MyTrait::missing_method`, then don't register
+ // a WF obligation for `dyn MyTrait` when method lookup fails. Otherwise,
+ // register a WF obligation so that we can detect any additional
+ // errors in the self type.
+ if !(matches!(error, method::MethodError::NoMatch(_)) && ty.is_trait()) {
+ self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None));
+ }
+ if item_name.name != kw::Empty {
+ if let Some(mut e) = self.report_method_error(
+ span,
+ ty,
+ item_name,
+ SelfSource::QPath(qself),
+ error,
+ None,
+ ) {
+ e.emit();
+ }
+ }
+ result
+ });
+
+ if result.is_ok() {
+ self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None));
+ }
+
+ // Write back the new resolution.
+ self.write_resolution(hir_id, result);
+ (
+ result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
+ Some(ty),
+ slice::from_ref(&**item_segment),
+ )
+ }
+
+ /// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise.
+ pub(in super::super) fn get_node_fn_decl(
+ &self,
+ node: Node<'tcx>,
+ ) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident, bool)> {
+ match node {
+ Node::Item(&hir::Item { ident, kind: hir::ItemKind::Fn(ref sig, ..), .. }) => {
+ // This is less than ideal, it will not suggest a return type span on any
+ // method called `main`, regardless of whether it is actually the entry point,
+ // but it will still present it as the reason for the expected type.
+ Some((&sig.decl, ident, ident.name != sym::main))
+ }
+ Node::TraitItem(&hir::TraitItem {
+ ident,
+ kind: hir::TraitItemKind::Fn(ref sig, ..),
+ ..
+ }) => Some((&sig.decl, ident, true)),
+ Node::ImplItem(&hir::ImplItem {
+ ident,
+ kind: hir::ImplItemKind::Fn(ref sig, ..),
+ ..
+ }) => Some((&sig.decl, ident, false)),
+ _ => None,
+ }
+ }
+
+ /// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a
+ /// suggestion can be made, `None` otherwise.
+ pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, bool)> {
+ // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or
+ // `while` before reaching it, as block tail returns are not available in them.
+ self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| {
+ let parent = self.tcx.hir().get(blk_id);
+ self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
+ })
+ }
+
+ pub(in super::super) fn note_internal_mutation_in_method(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ if found != self.tcx.types.unit {
+ return;
+ }
+ if let ExprKind::MethodCall(path_segment, rcvr, ..) = expr.kind {
+ if self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted_opt(rcvr)
+ .map_or(true, |ty| expected.peel_refs() != ty.peel_refs())
+ {
+ return;
+ }
+ let mut sp = MultiSpan::from_span(path_segment.ident.span);
+ sp.push_span_label(
+ path_segment.ident.span,
+ format!(
+ "this call modifies {} in-place",
+ match rcvr.kind {
+ ExprKind::Path(QPath::Resolved(
+ None,
+ hir::Path { segments: [segment], .. },
+ )) => format!("`{}`", segment.ident),
+ _ => "its receiver".to_string(),
+ }
+ ),
+ );
+ sp.push_span_label(
+ rcvr.span,
+ "you probably want to use this value after calling the method...",
+ );
+ err.span_note(
+ sp,
+ &format!("method `{}` modifies its receiver in-place", path_segment.ident),
+ );
+ err.note(&format!("...instead of the `()` output of method `{}`", path_segment.ident));
+ }
+ }
+
+ pub(in super::super) fn note_need_for_fn_pointer(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ let (sig, did, substs) = match (&expected.kind(), &found.kind()) {
+ (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
+ let sig1 = self.tcx.bound_fn_sig(*did1).subst(self.tcx, substs1);
+ let sig2 = self.tcx.bound_fn_sig(*did2).subst(self.tcx, substs2);
+ if sig1 != sig2 {
+ return;
+ }
+ err.note(
+ "different `fn` items always have unique types, even if their signatures are \
+ the same",
+ );
+ (sig1, *did1, substs1)
+ }
+ (ty::FnDef(did, substs), ty::FnPtr(sig2)) => {
+ let sig1 = self.tcx.bound_fn_sig(*did).subst(self.tcx, substs);
+ if sig1 != *sig2 {
+ return;
+ }
+ (sig1, *did, substs)
+ }
+ _ => return,
+ };
+ err.help(&format!("change the expected type to be function pointer `{}`", sig));
+ err.help(&format!(
+ "if the expected type is due to type inference, cast the expected `fn` to a function \
+ pointer: `{} as {}`",
+ self.tcx.def_path_str_with_substs(did, substs),
+ sig
+ ));
+ }
+
+ // Instantiates the given path, which must refer to an item with the given
+ // number of type parameters and type.
+ #[instrument(skip(self, span), level = "debug")]
+ pub fn instantiate_value_path(
+ &self,
+ segments: &[hir::PathSegment<'_>],
+ self_ty: Option<Ty<'tcx>>,
+ res: Res,
+ span: Span,
+ hir_id: hir::HirId,
+ ) -> (Ty<'tcx>, Res) {
+ let tcx = self.tcx;
+
+ let path_segs = match res {
+ Res::Local(_) | Res::SelfCtor(_) => vec![],
+ Res::Def(kind, def_id) => <dyn AstConv<'_>>::def_ids_for_value_path_segments(
+ self, segments, self_ty, kind, def_id,
+ ),
+ _ => bug!("instantiate_value_path on {:?}", res),
+ };
+
+ let mut user_self_ty = None;
+ let mut is_alias_variant_ctor = false;
+ match res {
+ Res::Def(DefKind::Ctor(CtorOf::Variant, _), _)
+ if let Some(self_ty) = self_ty =>
+ {
+ let adt_def = self_ty.ty_adt_def().unwrap();
+ user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did(), self_ty });
+ is_alias_variant_ctor = true;
+ }
+ Res::Def(DefKind::AssocFn | DefKind::AssocConst, def_id) => {
+ let assoc_item = tcx.associated_item(def_id);
+ let container = assoc_item.container;
+ let container_id = assoc_item.container_id(tcx);
+ debug!(?def_id, ?container, ?container_id);
+ match container {
+ ty::TraitContainer => {
+ callee::check_legal_trait_for_method_call(tcx, span, None, span, container_id)
+ }
+ ty::ImplContainer => {
+ if segments.len() == 1 {
+ // `<T>::assoc` will end up here, and so
+ // can `T::assoc`. It this came from an
+ // inherent impl, we need to record the
+ // `T` for posterity (see `UserSelfTy` for
+ // details).
+ let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
+ user_self_ty = Some(UserSelfTy { impl_def_id: container_id, self_ty });
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+
+ // Now that we have categorized what space the parameters for each
+ // segment belong to, let's sort out the parameters that the user
+ // provided (if any) into their appropriate spaces. We'll also report
+ // errors if type parameters are provided in an inappropriate place.
+
+ let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect();
+ let generics_has_err = <dyn AstConv<'_>>::prohibit_generics(
+ self,
+ segments.iter().enumerate().filter_map(|(index, seg)| {
+ if !generic_segs.contains(&index) || is_alias_variant_ctor {
+ Some(seg)
+ } else {
+ None
+ }
+ }),
+ |_| {},
+ );
+
+ if let Res::Local(hid) = res {
+ let ty = self.local_ty(span, hid).decl_ty;
+ let ty = self.normalize_associated_types_in(span, ty);
+ self.write_ty(hir_id, ty);
+ return (ty, res);
+ }
+
+ if generics_has_err {
+ // Don't try to infer type parameters when prohibited generic arguments were given.
+ user_self_ty = None;
+ }
+
+ // Now we have to compare the types that the user *actually*
+ // provided against the types that were *expected*. If the user
+ // did not provide any types, then we want to substitute inference
+ // variables. If the user provided some types, we may still need
+ // to add defaults. If the user provided *too many* types, that's
+ // a problem.
+
+ let mut infer_args_for_err = FxHashSet::default();
+
+ let mut explicit_late_bound = ExplicitLateBound::No;
+ for &PathSeg(def_id, index) in &path_segs {
+ let seg = &segments[index];
+ let generics = tcx.generics_of(def_id);
+
+ // Argument-position `impl Trait` is treated as a normal generic
+ // parameter internally, but we don't allow users to specify the
+ // parameter's value explicitly, so we have to do some error-
+ // checking here.
+ let arg_count = <dyn AstConv<'_>>::check_generic_arg_count_for_call(
+ tcx,
+ span,
+ def_id,
+ &generics,
+ seg,
+ IsMethodCall::No,
+ );
+
+ if let ExplicitLateBound::Yes = arg_count.explicit_late_bound {
+ explicit_late_bound = ExplicitLateBound::Yes;
+ }
+
+ if let Err(GenericArgCountMismatch { reported: Some(_), .. }) = arg_count.correct {
+ infer_args_for_err.insert(index);
+ self.set_tainted_by_errors(); // See issue #53251.
+ }
+ }
+
+ let has_self = path_segs
+ .last()
+ .map(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self)
+ .unwrap_or(false);
+
+ let (res, self_ctor_substs) = if let Res::SelfCtor(impl_def_id) = res {
+ let ty = self.normalize_ty(span, tcx.at(span).type_of(impl_def_id));
+ match *ty.kind() {
+ ty::Adt(adt_def, substs) if adt_def.has_ctor() => {
+ let variant = adt_def.non_enum_variant();
+ let ctor_def_id = variant.ctor_def_id.unwrap();
+ (
+ Res::Def(DefKind::Ctor(CtorOf::Struct, variant.ctor_kind), ctor_def_id),
+ Some(substs),
+ )
+ }
+ _ => {
+ let mut err = tcx.sess.struct_span_err(
+ span,
+ "the `Self` constructor can only be used with tuple or unit structs",
+ );
+ if let Some(adt_def) = ty.ty_adt_def() {
+ match adt_def.adt_kind() {
+ AdtKind::Enum => {
+ err.help("did you mean to use one of the enum's variants?");
+ }
+ AdtKind::Struct | AdtKind::Union => {
+ err.span_suggestion(
+ span,
+ "use curly brackets",
+ "Self { /* fields */ }",
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ }
+ err.emit();
+
+ return (tcx.ty_error(), res);
+ }
+ }
+ } else {
+ (res, None)
+ };
+ let def_id = res.def_id();
+
+ // The things we are substituting into the type should not contain
+ // escaping late-bound regions, and nor should the base type scheme.
+ let ty = tcx.type_of(def_id);
+
+ let arg_count = GenericArgCountResult {
+ explicit_late_bound,
+ correct: if infer_args_for_err.is_empty() {
+ Ok(())
+ } else {
+ Err(GenericArgCountMismatch::default())
+ },
+ };
+
+ struct CreateCtorSubstsContext<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ path_segs: &'a [PathSeg],
+ infer_args_for_err: &'a FxHashSet<usize>,
+ segments: &'a [hir::PathSegment<'a>],
+ }
+ impl<'tcx, 'a> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for CreateCtorSubstsContext<'a, 'tcx> {
+ fn args_for_def_id(
+ &mut self,
+ def_id: DefId,
+ ) -> (Option<&'a hir::GenericArgs<'a>>, bool) {
+ if let Some(&PathSeg(_, index)) =
+ self.path_segs.iter().find(|&PathSeg(did, _)| *did == def_id)
+ {
+ // If we've encountered an `impl Trait`-related error, we're just
+ // going to infer the arguments for better error messages.
+ if !self.infer_args_for_err.contains(&index) {
+ // Check whether the user has provided generic arguments.
+ if let Some(ref data) = self.segments[index].args {
+ return (Some(data), self.segments[index].infer_args);
+ }
+ }
+ return (None, self.segments[index].infer_args);
+ }
+
+ (None, true)
+ }
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> ty::GenericArg<'tcx> {
+ match (&param.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ <dyn AstConv<'_>>::ast_region_to_region(self.fcx, lt, Some(param)).into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.fcx.to_ty(ty).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
+ self.fcx.const_arg_to_const(&ct.value, param.def_id).into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => {
+ self.fcx.ty_infer(Some(param), inf.span).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => {
+ let tcx = self.fcx.tcx();
+ self.fcx.ct_infer(tcx.type_of(param.def_id), Some(param), inf.span).into()
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn inferred_kind(
+ &mut self,
+ substs: Option<&[ty::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ infer_args: bool,
+ ) -> ty::GenericArg<'tcx> {
+ let tcx = self.fcx.tcx();
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ self.fcx.re_infer(Some(param), self.span).unwrap().into()
+ }
+ GenericParamDefKind::Type { has_default, .. } => {
+ if !infer_args && has_default {
+ // If we have a default, then we it doesn't matter that we're not
+ // inferring the type arguments: we provide the default where any
+ // is missing.
+ let default = tcx.bound_type_of(param.def_id);
+ self.fcx
+ .normalize_ty(self.span, default.subst(tcx, substs.unwrap()))
+ .into()
+ } else {
+ // If no type arguments were provided, we have to infer them.
+ // This case also occurs as a result of some malformed input, e.g.
+ // a lifetime argument being given instead of a type parameter.
+ // Using inference instead of `Error` gives better error messages.
+ self.fcx.var_for_def(self.span, param)
+ }
+ }
+ GenericParamDefKind::Const { has_default } => {
+ if !infer_args && has_default {
+ tcx.bound_const_param_default(param.def_id)
+ .subst(tcx, substs.unwrap())
+ .into()
+ } else {
+ self.fcx.var_for_def(self.span, param)
+ }
+ }
+ }
+ }
+ }
+
+ let substs = self_ctor_substs.unwrap_or_else(|| {
+ <dyn AstConv<'_>>::create_substs_for_generic_args(
+ tcx,
+ def_id,
+ &[],
+ has_self,
+ self_ty,
+ &arg_count,
+ &mut CreateCtorSubstsContext {
+ fcx: self,
+ span,
+ path_segs: &path_segs,
+ infer_args_for_err: &infer_args_for_err,
+ segments,
+ },
+ )
+ });
+ assert!(!substs.has_escaping_bound_vars());
+ assert!(!ty.has_escaping_bound_vars());
+
+ // First, store the "user substs" for later.
+ self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty);
+
+ self.add_required_obligations_for_hir(span, def_id, &substs, hir_id);
+
+ // Substitute the values for the type parameters into the type of
+ // the referenced item.
+ let ty_substituted = self.instantiate_type_scheme(span, &substs, ty);
+
+ if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
+ // In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
+ // is inherent, there is no `Self` parameter; instead, the impl needs
+ // type parameters, which we can infer by unifying the provided `Self`
+ // with the substituted impl type.
+ // This also occurs for an enum variant on a type alias.
+ let ty = tcx.type_of(impl_def_id);
+
+ let impl_ty = self.instantiate_type_scheme(span, &substs, ty);
+ match self.at(&self.misc(span), self.param_env).eq(impl_ty, self_ty) {
+ Ok(ok) => self.register_infer_ok_obligations(ok),
+ Err(_) => {
+ self.tcx.sess.delay_span_bug(
+ span,
+ &format!(
+ "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
+ self_ty,
+ impl_ty,
+ ),
+ );
+ }
+ }
+ }
+
+ debug!("instantiate_value_path: type of {:?} is {:?}", hir_id, ty_substituted);
+ self.write_substs(hir_id, substs);
+
+ (ty_substituted, res)
+ }
+
+ /// Add all the obligations that are required, substituting and normalized appropriately.
+ pub(crate) fn add_required_obligations_for_hir(
+ &self,
+ span: Span,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ hir_id: hir::HirId,
+ ) {
+ self.add_required_obligations_with_code(span, def_id, substs, |idx, span| {
+ if span.is_dummy() {
+ ObligationCauseCode::ExprItemObligation(def_id, hir_id, idx)
+ } else {
+ ObligationCauseCode::ExprBindingObligation(def_id, span, hir_id, idx)
+ }
+ })
+ }
+
+ #[instrument(level = "debug", skip(self, code, span, substs))]
+ fn add_required_obligations_with_code(
+ &self,
+ span: Span,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ code: impl Fn(usize, Span) -> ObligationCauseCode<'tcx>,
+ ) {
+ let param_env = self.param_env;
+
+ let remap = match self.tcx.def_kind(def_id) {
+ // Associated consts have `Self: ~const Trait` bounds that should be satisfiable when
+ // `Self: Trait` is satisfied because it does not matter whether the impl is `const`.
+ // Therefore we have to remap the param env here to be non-const.
+ hir::def::DefKind::AssocConst => true,
+ hir::def::DefKind::AssocFn
+ if self.tcx.def_kind(self.tcx.parent(def_id)) == hir::def::DefKind::Trait =>
+ {
+ // N.B.: All callsites to this function involve checking a path expression.
+ //
+ // When instantiating a trait method as a function item, it does not actually matter whether
+ // the trait is `const` or not, or whether `where T: ~const Tr` needs to be satisfied as
+ // `const`. If we were to introduce instantiating trait methods as `const fn`s, we would
+ // check that after this, either via a bound `where F: ~const FnOnce` or when coercing to a
+ // `const fn` pointer.
+ //
+ // FIXME(fee1-dead) FIXME(const_trait_impl): update this doc when trait methods can satisfy
+ // `~const FnOnce` or can be coerced to `const fn` pointer.
+ true
+ }
+ _ => false,
+ };
+ let (bounds, _) = self.instantiate_bounds(span, def_id, &substs);
+
+ for mut obligation in traits::predicates_for_generics(
+ |idx, predicate_span| {
+ traits::ObligationCause::new(span, self.body_id, code(idx, predicate_span))
+ },
+ param_env,
+ bounds,
+ ) {
+ if remap {
+ obligation = obligation.without_const(self.tcx);
+ }
+ self.register_predicate(obligation);
+ }
+ }
+
+ /// Resolves `typ` by a single level if `typ` is a type variable.
+ /// If no resolution is possible, then an error is reported.
+ /// Numeric inference variables may be left unresolved.
+ pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let ty = self.resolve_vars_with_obligations(ty);
+ if !ty.is_ty_var() {
+ ty
+ } else {
+ if !self.is_tainted_by_errors() {
+ self.err_ctxt()
+ .emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282, true)
+ .emit();
+ }
+ let err = self.tcx.ty_error();
+ self.demand_suptype(sp, err, ty);
+ err
+ }
+ }
+
+ pub(in super::super) fn with_breakable_ctxt<F: FnOnce() -> R, R>(
+ &self,
+ id: hir::HirId,
+ ctxt: BreakableCtxt<'tcx>,
+ f: F,
+ ) -> (BreakableCtxt<'tcx>, R) {
+ let index;
+ {
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ index = enclosing_breakables.stack.len();
+ enclosing_breakables.by_id.insert(id, index);
+ enclosing_breakables.stack.push(ctxt);
+ }
+ let result = f();
+ let ctxt = {
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ debug_assert!(enclosing_breakables.stack.len() == index + 1);
+ enclosing_breakables.by_id.remove(&id).expect("missing breakable context");
+ enclosing_breakables.stack.pop().expect("missing breakable context")
+ };
+ (ctxt, result)
+ }
+
+ /// Instantiate a QueryResponse in a probe context, without a
+ /// good ObligationCause.
+ pub(in super::super) fn probe_instantiate_query_response(
+ &self,
+ span: Span,
+ original_values: &OriginalQueryValues<'tcx>,
+ query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+ ) -> InferResult<'tcx, Ty<'tcx>> {
+ self.instantiate_query_response_and_region_obligations(
+ &traits::ObligationCause::misc(span, self.body_id),
+ self.param_env,
+ original_values,
+ query_result,
+ )
+ }
+
+ /// Returns `true` if an expression is contained inside the LHS of an assignment expression.
+ pub(in super::super) fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool {
+ let mut contained_in_place = false;
+
+ while let hir::Node::Expr(parent_expr) =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(expr_id))
+ {
+ match &parent_expr.kind {
+ hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => {
+ if lhs.hir_id == expr_id {
+ contained_in_place = true;
+ break;
+ }
+ }
+ _ => (),
+ }
+ expr_id = parent_expr.hir_id;
+ }
+
+ contained_in_place
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs
new file mode 100644
index 000000000..fc83994ca
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs
@@ -0,0 +1,383 @@
+use std::cmp;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::error::TypeError;
+
+rustc_index::newtype_index! {
+ pub(crate) struct ExpectedIdx {
+ DEBUG_FORMAT = "ExpectedIdx({})",
+ }
+}
+
+rustc_index::newtype_index! {
+ pub(crate) struct ProvidedIdx {
+ DEBUG_FORMAT = "ProvidedIdx({})",
+ }
+}
+
+impl ExpectedIdx {
+ pub fn to_provided_idx(self) -> ProvidedIdx {
+ ProvidedIdx::from_usize(self.as_usize())
+ }
+}
+
+// An issue that might be found in the compatibility matrix
+#[derive(Debug)]
+enum Issue {
+ /// The given argument is the invalid type for the input
+ Invalid(usize),
+ /// There is a missing input
+ Missing(usize),
+ /// There's a superfluous argument
+ Extra(usize),
+ /// Two arguments should be swapped
+ Swap(usize, usize),
+ /// Several arguments should be reordered
+ Permutation(Vec<Option<usize>>),
+}
+
+#[derive(Clone, Debug)]
+pub(crate) enum Compatibility<'tcx> {
+ Compatible,
+ Incompatible(Option<TypeError<'tcx>>),
+}
+
+/// Similar to `Issue`, but contains some extra information
+#[derive(Debug)]
+pub(crate) enum Error<'tcx> {
+ /// The provided argument is the invalid type for the expected input
+ Invalid(ProvidedIdx, ExpectedIdx, Compatibility<'tcx>),
+ /// There is a missing input
+ Missing(ExpectedIdx),
+ /// There's a superfluous argument
+ Extra(ProvidedIdx),
+ /// Two arguments should be swapped
+ Swap(ProvidedIdx, ProvidedIdx, ExpectedIdx, ExpectedIdx),
+ /// Several arguments should be reordered
+ Permutation(Vec<(ExpectedIdx, ProvidedIdx)>),
+}
+
+pub(crate) struct ArgMatrix<'tcx> {
+ /// Maps the indices in the `compatibility_matrix` rows to the indices of
+ /// the *user provided* inputs
+ provided_indices: Vec<ProvidedIdx>,
+ /// Maps the indices in the `compatibility_matrix` columns to the indices
+ /// of the *expected* args
+ expected_indices: Vec<ExpectedIdx>,
+ /// The first dimension (rows) are the remaining user provided inputs to
+ /// match and the second dimension (cols) are the remaining expected args
+ /// to match
+ compatibility_matrix: Vec<Vec<Compatibility<'tcx>>>,
+}
+
+impl<'tcx> ArgMatrix<'tcx> {
+ pub(crate) fn new<F: FnMut(ProvidedIdx, ExpectedIdx) -> Compatibility<'tcx>>(
+ provided_count: usize,
+ expected_input_count: usize,
+ mut is_compatible: F,
+ ) -> Self {
+ let compatibility_matrix = (0..provided_count)
+ .map(|i| {
+ (0..expected_input_count)
+ .map(|j| is_compatible(ProvidedIdx::from_usize(i), ExpectedIdx::from_usize(j)))
+ .collect()
+ })
+ .collect();
+ ArgMatrix {
+ provided_indices: (0..provided_count).map(ProvidedIdx::from_usize).collect(),
+ expected_indices: (0..expected_input_count).map(ExpectedIdx::from_usize).collect(),
+ compatibility_matrix,
+ }
+ }
+
+ /// Remove a given input from consideration
+ fn eliminate_provided(&mut self, idx: usize) {
+ self.provided_indices.remove(idx);
+ self.compatibility_matrix.remove(idx);
+ }
+
+ /// Remove a given argument from consideration
+ fn eliminate_expected(&mut self, idx: usize) {
+ self.expected_indices.remove(idx);
+ for row in &mut self.compatibility_matrix {
+ row.remove(idx);
+ }
+ }
+
+ /// "satisfy" an input with a given arg, removing both from consideration
+ fn satisfy_input(&mut self, provided_idx: usize, expected_idx: usize) {
+ self.eliminate_provided(provided_idx);
+ self.eliminate_expected(expected_idx);
+ }
+
+ // Returns a `Vec` of (user input, expected arg) of matched arguments. These
+ // are inputs on the remaining diagonal that match.
+ fn eliminate_satisfied(&mut self) -> Vec<(ProvidedIdx, ExpectedIdx)> {
+ let num_args = cmp::min(self.provided_indices.len(), self.expected_indices.len());
+ let mut eliminated = vec![];
+ for i in (0..num_args).rev() {
+ if matches!(self.compatibility_matrix[i][i], Compatibility::Compatible) {
+ eliminated.push((self.provided_indices[i], self.expected_indices[i]));
+ self.satisfy_input(i, i);
+ }
+ }
+ eliminated
+ }
+
+ // Find some issue in the compatibility matrix
+ fn find_issue(&self) -> Option<Issue> {
+ let mat = &self.compatibility_matrix;
+ let ai = &self.expected_indices;
+ let ii = &self.provided_indices;
+
+ // Issue: 100478, when we end the iteration,
+ // `next_unmatched_idx` will point to the index of the first unmatched
+ let mut next_unmatched_idx = 0;
+ for i in 0..cmp::max(ai.len(), ii.len()) {
+ // If we eliminate the last row, any left-over arguments are considered missing
+ if i >= mat.len() {
+ return Some(Issue::Missing(next_unmatched_idx));
+ }
+ // If we eliminate the last column, any left-over inputs are extra
+ if mat[i].len() == 0 {
+ return Some(Issue::Extra(next_unmatched_idx));
+ }
+
+ // Make sure we don't pass the bounds of our matrix
+ let is_arg = i < ai.len();
+ let is_input = i < ii.len();
+ if is_arg && is_input && matches!(mat[i][i], Compatibility::Compatible) {
+ // This is a satisfied input, so move along
+ next_unmatched_idx += 1;
+ continue;
+ }
+
+ let mut useless = true;
+ let mut unsatisfiable = true;
+ if is_arg {
+ for j in 0..ii.len() {
+ // If we find at least one input this argument could satisfy
+ // this argument isn't unsatisfiable
+ if matches!(mat[j][i], Compatibility::Compatible) {
+ unsatisfiable = false;
+ break;
+ }
+ }
+ }
+ if is_input {
+ for j in 0..ai.len() {
+ // If we find at least one argument that could satisfy this input
+ // this input isn't useless
+ if matches!(mat[i][j], Compatibility::Compatible) {
+ useless = false;
+ break;
+ }
+ }
+ }
+
+ match (is_input, is_arg, useless, unsatisfiable) {
+ // If an argument is unsatisfied, and the input in its position is useless
+ // then the most likely explanation is that we just got the types wrong
+ (true, true, true, true) => return Some(Issue::Invalid(i)),
+ // Otherwise, if an input is useless, then indicate that this is an extra argument
+ (true, _, true, _) => return Some(Issue::Extra(i)),
+ // Otherwise, if an argument is unsatisfiable, indicate that it's missing
+ (_, true, _, true) => return Some(Issue::Missing(i)),
+ (true, true, _, _) => {
+ // The argument isn't useless, and the input isn't unsatisfied,
+ // so look for a parameter we might swap it with
+ // We look for swaps explicitly, instead of just falling back on permutations
+ // so that cases like (A,B,C,D) given (B,A,D,C) show up as two swaps,
+ // instead of a large permutation of 4 elements.
+ for j in 0..cmp::min(ai.len(), ii.len()) {
+ if i == j || matches!(mat[j][j], Compatibility::Compatible) {
+ continue;
+ }
+ if matches!(mat[i][j], Compatibility::Compatible)
+ && matches!(mat[j][i], Compatibility::Compatible)
+ {
+ return Some(Issue::Swap(i, j));
+ }
+ }
+ }
+ _ => {
+ continue;
+ }
+ }
+ }
+
+ // We didn't find any of the individual issues above, but
+ // there might be a larger permutation of parameters, so we now check for that
+ // by checking for cycles
+ // We use a double option at position i in this vec to represent:
+ // - None: We haven't computed anything about this argument yet
+ // - Some(None): This argument definitely doesn't participate in a cycle
+ // - Some(Some(x)): the i-th argument could permute to the x-th position
+ let mut permutation: Vec<Option<Option<usize>>> = vec![None; mat.len()];
+ let mut permutation_found = false;
+ for i in 0..mat.len() {
+ if permutation[i].is_some() {
+ // We've already decided whether this argument is or is not in a loop
+ continue;
+ }
+
+ let mut stack = vec![];
+ let mut j = i;
+ let mut last = i;
+ let mut is_cycle = true;
+ loop {
+ stack.push(j);
+ // Look for params this one could slot into
+ let compat: Vec<_> =
+ mat[j]
+ .iter()
+ .enumerate()
+ .filter_map(|(i, c)| {
+ if matches!(c, Compatibility::Compatible) { Some(i) } else { None }
+ })
+ .collect();
+ if compat.len() < 1 {
+ // try to find a cycle even when this could go into multiple slots, see #101097
+ is_cycle = false;
+ break;
+ }
+ j = compat[0];
+ if stack.contains(&j) {
+ last = j;
+ break;
+ }
+ }
+ if stack.len() <= 2 {
+ // If we encounter a cycle of 1 or 2 elements, we'll let the
+ // "satisfy" and "swap" code above handle those
+ is_cycle = false;
+ }
+ // We've built up some chain, some of which might be a cycle
+ // ex: [1,2,3,4]; last = 2; j = 2;
+ // So, we want to mark 4, 3, and 2 as part of a permutation
+ permutation_found = is_cycle;
+ while let Some(x) = stack.pop() {
+ if is_cycle {
+ permutation[x] = Some(Some(j));
+ j = x;
+ if j == last {
+ // From here on out, we're a tail leading into a cycle,
+ // not the cycle itself
+ is_cycle = false;
+ }
+ } else {
+ // Some(None) ensures we save time by skipping this argument again
+ permutation[x] = Some(None);
+ }
+ }
+ }
+
+ if permutation_found {
+ // Map unwrap to remove the first layer of Some
+ let final_permutation: Vec<Option<usize>> =
+ permutation.into_iter().map(|x| x.unwrap()).collect();
+ return Some(Issue::Permutation(final_permutation));
+ }
+ return None;
+ }
+
+ // Obviously, detecting exact user intention is impossible, so the goal here is to
+ // come up with as likely of a story as we can to be helpful.
+ //
+ // We'll iteratively removed "satisfied" input/argument pairs,
+ // then check for the cases above, until we've eliminated the entire grid
+ //
+ // We'll want to know which arguments and inputs these rows and columns correspond to
+ // even after we delete them.
+ pub(crate) fn find_errors(
+ mut self,
+ ) -> (Vec<Error<'tcx>>, IndexVec<ExpectedIdx, Option<ProvidedIdx>>) {
+ let provided_arg_count = self.provided_indices.len();
+
+ let mut errors: Vec<Error<'tcx>> = vec![];
+ // For each expected argument, the matched *actual* input
+ let mut matched_inputs: IndexVec<ExpectedIdx, Option<ProvidedIdx>> =
+ IndexVec::from_elem_n(None, self.expected_indices.len());
+
+ // Before we start looking for issues, eliminate any arguments that are already satisfied,
+ // so that an argument which is already spoken for by the input it's in doesn't
+ // spill over into another similarly typed input
+ // ex:
+ // fn some_func(_a: i32, _b: i32) {}
+ // some_func(1, "");
+ // Without this elimination, the first argument causes the second argument
+ // to show up as both a missing input and extra argument, rather than
+ // just an invalid type.
+ for (provided, expected) in self.eliminate_satisfied() {
+ matched_inputs[expected] = Some(provided);
+ }
+
+ while !self.provided_indices.is_empty() || !self.expected_indices.is_empty() {
+ let res = self.find_issue();
+ match res {
+ Some(Issue::Invalid(idx)) => {
+ let compatibility = self.compatibility_matrix[idx][idx].clone();
+ let input_idx = self.provided_indices[idx];
+ let arg_idx = self.expected_indices[idx];
+ self.satisfy_input(idx, idx);
+ errors.push(Error::Invalid(input_idx, arg_idx, compatibility));
+ }
+ Some(Issue::Extra(idx)) => {
+ let input_idx = self.provided_indices[idx];
+ self.eliminate_provided(idx);
+ errors.push(Error::Extra(input_idx));
+ }
+ Some(Issue::Missing(idx)) => {
+ let arg_idx = self.expected_indices[idx];
+ self.eliminate_expected(idx);
+ errors.push(Error::Missing(arg_idx));
+ }
+ Some(Issue::Swap(idx, other)) => {
+ let input_idx = self.provided_indices[idx];
+ let other_input_idx = self.provided_indices[other];
+ let arg_idx = self.expected_indices[idx];
+ let other_arg_idx = self.expected_indices[other];
+ let (min, max) = (cmp::min(idx, other), cmp::max(idx, other));
+ self.satisfy_input(min, max);
+ // Subtract 1 because we already removed the "min" row
+ self.satisfy_input(max - 1, min);
+ errors.push(Error::Swap(input_idx, other_input_idx, arg_idx, other_arg_idx));
+ matched_inputs[other_arg_idx] = Some(input_idx);
+ matched_inputs[arg_idx] = Some(other_input_idx);
+ }
+ Some(Issue::Permutation(args)) => {
+ let mut idxs: Vec<usize> = args.iter().filter_map(|&a| a).collect();
+
+ let mut real_idxs: IndexVec<ProvidedIdx, Option<(ExpectedIdx, ProvidedIdx)>> =
+ IndexVec::from_elem_n(None, provided_arg_count);
+ for (src, dst) in
+ args.iter().enumerate().filter_map(|(src, dst)| dst.map(|dst| (src, dst)))
+ {
+ let src_input_idx = self.provided_indices[src];
+ let dst_input_idx = self.provided_indices[dst];
+ let dest_arg_idx = self.expected_indices[dst];
+ real_idxs[src_input_idx] = Some((dest_arg_idx, dst_input_idx));
+ matched_inputs[dest_arg_idx] = Some(src_input_idx);
+ }
+ idxs.sort();
+ idxs.reverse();
+ for i in idxs {
+ self.satisfy_input(i, i);
+ }
+ errors.push(Error::Permutation(real_idxs.into_iter().flatten().collect()));
+ }
+ None => {
+ // We didn't find any issues, so we need to push the algorithm forward
+ // First, eliminate any arguments that currently satisfy their inputs
+ let eliminated = self.eliminate_satisfied();
+ assert!(!eliminated.is_empty(), "didn't eliminated any indice in this round");
+ for (inp, arg) in eliminated {
+ matched_inputs[arg] = Some(inp);
+ }
+ }
+ };
+ }
+
+ return (errors, matched_inputs);
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
new file mode 100644
index 000000000..8e0fcb56c
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -0,0 +1,2236 @@
+use crate::coercion::CoerceMany;
+use crate::fn_ctxt::arg_matrix::{ArgMatrix, Compatibility, Error, ExpectedIdx, ProvidedIdx};
+use crate::gather_locals::Declaration;
+use crate::method::MethodCallee;
+use crate::Expectation::*;
+use crate::TupleArgumentsFlag::*;
+use crate::{
+ struct_span_err, BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy, Needs,
+ TupleArgumentsFlag,
+};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{pluralize, Applicability, Diagnostic, DiagnosticId, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{ExprKind, Node, QPath};
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_hir_analysis::check::intrinsicck::InlineAsmCtxt;
+use rustc_hir_analysis::check::potentially_plural_count;
+use rustc_hir_analysis::structured_errors::StructuredDiagnostic;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::error_reporting::{FailureCode, ObligationCauseExt};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_infer::infer::TypeTrace;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, DefIdTree, IsSuggestable, Ty, TypeSuperVisitable, TypeVisitor};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::{self, sym, Span};
+use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext};
+
+use std::iter;
+use std::mem;
+use std::ops::ControlFlow;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(in super::super) fn check_casts(&mut self) {
+ // don't hold the borrow to deferred_cast_checks while checking to avoid borrow checker errors
+ // when writing to `self.param_env`.
+ let mut deferred_cast_checks = mem::take(&mut *self.deferred_cast_checks.borrow_mut());
+
+ debug!("FnCtxt::check_casts: {} deferred checks", deferred_cast_checks.len());
+ for cast in deferred_cast_checks.drain(..) {
+ let prev_env = self.param_env;
+ self.param_env = self.param_env.with_constness(cast.constness);
+
+ cast.check(self);
+
+ self.param_env = prev_env;
+ }
+
+ *self.deferred_cast_checks.borrow_mut() = deferred_cast_checks;
+ }
+
+ pub(in super::super) fn check_transmutes(&self) {
+ let mut deferred_transmute_checks = self.deferred_transmute_checks.borrow_mut();
+ debug!("FnCtxt::check_transmutes: {} deferred checks", deferred_transmute_checks.len());
+ for (from, to, hir_id) in deferred_transmute_checks.drain(..) {
+ self.check_transmute(from, to, hir_id);
+ }
+ }
+
+ pub(in super::super) fn check_asms(&self) {
+ let mut deferred_asm_checks = self.deferred_asm_checks.borrow_mut();
+ debug!("FnCtxt::check_asm: {} deferred checks", deferred_asm_checks.len());
+ for (asm, hir_id) in deferred_asm_checks.drain(..) {
+ let enclosing_id = self.tcx.hir().enclosing_body_owner(hir_id);
+ let get_operand_ty = |expr| {
+ let ty = self.typeck_results.borrow().expr_ty_adjusted(expr);
+ let ty = self.resolve_vars_if_possible(ty);
+ if ty.has_non_region_infer() {
+ assert!(self.is_tainted_by_errors());
+ self.tcx.ty_error()
+ } else {
+ self.tcx.erase_regions(ty)
+ }
+ };
+ InlineAsmCtxt::new_in_fn(self.tcx, self.param_env, get_operand_ty)
+ .check_asm(asm, self.tcx.hir().local_def_id_to_hir_id(enclosing_id));
+ }
+ }
+
+ pub(in super::super) fn check_method_argument_types(
+ &self,
+ sp: Span,
+ expr: &'tcx hir::Expr<'tcx>,
+ method: Result<MethodCallee<'tcx>, ()>,
+ args_no_rcvr: &'tcx [hir::Expr<'tcx>],
+ tuple_arguments: TupleArgumentsFlag,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let has_error = match method {
+ Ok(method) => method.substs.references_error() || method.sig.references_error(),
+ Err(_) => true,
+ };
+ if has_error {
+ let err_inputs = self.err_args(args_no_rcvr.len());
+
+ let err_inputs = match tuple_arguments {
+ DontTupleArguments => err_inputs,
+ TupleArguments => vec![self.tcx.intern_tup(&err_inputs)],
+ };
+
+ self.check_argument_types(
+ sp,
+ expr,
+ &err_inputs,
+ None,
+ args_no_rcvr,
+ false,
+ tuple_arguments,
+ method.ok().map(|method| method.def_id),
+ );
+ return self.tcx.ty_error();
+ }
+
+ let method = method.unwrap();
+ // HACK(eddyb) ignore self in the definition (see above).
+ let expected_input_tys = self.expected_inputs_for_expected_output(
+ sp,
+ expected,
+ method.sig.output(),
+ &method.sig.inputs()[1..],
+ );
+ self.check_argument_types(
+ sp,
+ expr,
+ &method.sig.inputs()[1..],
+ expected_input_tys,
+ args_no_rcvr,
+ method.sig.c_variadic,
+ tuple_arguments,
+ Some(method.def_id),
+ );
+ method.sig.output()
+ }
+
+ /// Generic function that factors out common logic from function calls,
+ /// method calls and overloaded operators.
+ pub(in super::super) fn check_argument_types(
+ &self,
+ // Span enclosing the call site
+ call_span: Span,
+ // Expression of the call site
+ call_expr: &'tcx hir::Expr<'tcx>,
+ // Types (as defined in the *signature* of the target function)
+ formal_input_tys: &[Ty<'tcx>],
+ // More specific expected types, after unifying with caller output types
+ expected_input_tys: Option<Vec<Ty<'tcx>>>,
+ // The expressions for each provided argument
+ provided_args: &'tcx [hir::Expr<'tcx>],
+ // Whether the function is variadic, for example when imported from C
+ c_variadic: bool,
+ // Whether the arguments have been bundled in a tuple (ex: closures)
+ tuple_arguments: TupleArgumentsFlag,
+ // The DefId for the function being called, for better error messages
+ fn_def_id: Option<DefId>,
+ ) {
+ let tcx = self.tcx;
+
+ // Conceptually, we've got some number of expected inputs, and some number of provided arguments
+ // and we can form a grid of whether each argument could satisfy a given input:
+ // in1 | in2 | in3 | ...
+ // arg1 ? | | |
+ // arg2 | ? | |
+ // arg3 | | ? |
+ // ...
+ // Initially, we just check the diagonal, because in the case of correct code
+ // these are the only checks that matter
+ // However, in the unhappy path, we'll fill in this whole grid to attempt to provide
+ // better error messages about invalid method calls.
+
+ // All the input types from the fn signature must outlive the call
+ // so as to validate implied bounds.
+ for (&fn_input_ty, arg_expr) in iter::zip(formal_input_tys, provided_args) {
+ self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation);
+ }
+
+ let mut err_code = "E0061";
+
+ // If the arguments should be wrapped in a tuple (ex: closures), unwrap them here
+ let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments {
+ let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]);
+ match tuple_type.kind() {
+ // We expected a tuple and got a tuple
+ ty::Tuple(arg_types) => {
+ // Argument length differs
+ if arg_types.len() != provided_args.len() {
+ err_code = "E0057";
+ }
+ let expected_input_tys = match expected_input_tys {
+ Some(expected_input_tys) => match expected_input_tys.get(0) {
+ Some(ty) => match ty.kind() {
+ ty::Tuple(tys) => Some(tys.iter().collect()),
+ _ => None,
+ },
+ None => None,
+ },
+ None => None,
+ };
+ (arg_types.iter().collect(), expected_input_tys)
+ }
+ _ => {
+ // Otherwise, there's a mismatch, so clear out what we're expecting, and set
+ // our input types to err_args so we don't blow up the error messages
+ struct_span_err!(
+ tcx.sess,
+ call_span,
+ E0059,
+ "cannot use call notation; the first type parameter \
+ for the function trait is neither a tuple nor unit"
+ )
+ .emit();
+ (self.err_args(provided_args.len()), None)
+ }
+ }
+ } else {
+ (formal_input_tys.to_vec(), expected_input_tys)
+ };
+
+ // If there are no external expectations at the call site, just use the types from the function defn
+ let expected_input_tys = if let Some(expected_input_tys) = expected_input_tys {
+ assert_eq!(expected_input_tys.len(), formal_input_tys.len());
+ expected_input_tys
+ } else {
+ formal_input_tys.clone()
+ };
+
+ let minimum_input_count = expected_input_tys.len();
+ let provided_arg_count = provided_args.len();
+
+ let is_const_eval_select = matches!(fn_def_id, Some(def_id) if
+ self.tcx.def_kind(def_id) == hir::def::DefKind::Fn
+ && self.tcx.is_intrinsic(def_id)
+ && self.tcx.item_name(def_id) == sym::const_eval_select);
+
+ // We introduce a helper function to demand that a given argument satisfy a given input
+ // This is more complicated than just checking type equality, as arguments could be coerced
+ // This version writes those types back so further type checking uses the narrowed types
+ let demand_compatible = |idx| {
+ let formal_input_ty: Ty<'tcx> = formal_input_tys[idx];
+ let expected_input_ty: Ty<'tcx> = expected_input_tys[idx];
+ let provided_arg = &provided_args[idx];
+
+ debug!("checking argument {}: {:?} = {:?}", idx, provided_arg, formal_input_ty);
+
+ // We're on the happy path here, so we'll do a more involved check and write back types
+ // To check compatibility, we'll do 3 things:
+ // 1. Unify the provided argument with the expected type
+ let expectation = Expectation::rvalue_hint(self, expected_input_ty);
+
+ let checked_ty = self.check_expr_with_expectation(provided_arg, expectation);
+
+ // 2. Coerce to the most detailed type that could be coerced
+ // to, which is `expected_ty` if `rvalue_hint` returns an
+ // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
+ let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
+
+ // Cause selection errors caused by resolving a single argument to point at the
+ // argument and not the call. This lets us customize the span pointed to in the
+ // fulfillment error to be more accurate.
+ let coerced_ty = self.resolve_vars_with_obligations(coerced_ty);
+
+ let coerce_error = self
+ .try_coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None)
+ .err();
+
+ if coerce_error.is_some() {
+ return Compatibility::Incompatible(coerce_error);
+ }
+
+ // Check that second and third argument of `const_eval_select` must be `FnDef`, and additionally that
+ // the second argument must be `const fn`. The first argument must be a tuple, but this is already expressed
+ // in the function signature (`F: FnOnce<ARG>`), so I did not bother to add another check here.
+ //
+ // This check is here because there is currently no way to express a trait bound for `FnDef` types only.
+ if is_const_eval_select && (1..=2).contains(&idx) {
+ if let ty::FnDef(def_id, _) = checked_ty.kind() {
+ if idx == 1 && !self.tcx.is_const_fn_raw(*def_id) {
+ self.tcx
+ .sess
+ .struct_span_err(provided_arg.span, "this argument must be a `const fn`")
+ .help("consult the documentation on `const_eval_select` for more information")
+ .emit();
+ }
+ } else {
+ self.tcx
+ .sess
+ .struct_span_err(provided_arg.span, "this argument must be a function item")
+ .note(format!("expected a function item, found {checked_ty}"))
+ .help(
+ "consult the documentation on `const_eval_select` for more information",
+ )
+ .emit();
+ }
+ }
+
+ // 3. Check if the formal type is a supertype of the checked one
+ // and register any such obligations for future type checks
+ let supertype_error = self
+ .at(&self.misc(provided_arg.span), self.param_env)
+ .sup(formal_input_ty, coerced_ty);
+ let subtyping_error = match supertype_error {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(err) => Some(err),
+ };
+
+ // If neither check failed, the types are compatible
+ match subtyping_error {
+ None => Compatibility::Compatible,
+ Some(_) => Compatibility::Incompatible(subtyping_error),
+ }
+ };
+
+ // To start, we only care "along the diagonal", where we expect every
+ // provided arg to be in the right spot
+ let mut compatibility_diagonal =
+ vec![Compatibility::Incompatible(None); provided_args.len()];
+
+ // Keep track of whether we *could possibly* be satisfied, i.e. whether we're on the happy path
+ // if the wrong number of arguments were supplied, we CAN'T be satisfied,
+ // and if we're c_variadic, the supplied arguments must be >= the minimum count from the function
+ // otherwise, they need to be identical, because rust doesn't currently support variadic functions
+ let mut call_appears_satisfied = if c_variadic {
+ provided_arg_count >= minimum_input_count
+ } else {
+ provided_arg_count == minimum_input_count
+ };
+
+ // Check the arguments.
+ // We do this in a pretty awful way: first we type-check any arguments
+ // that are not closures, then we type-check the closures. This is so
+ // that we have more information about the types of arguments when we
+ // type-check the functions. This isn't really the right way to do this.
+ for check_closures in [false, true] {
+ // More awful hacks: before we check argument types, try to do
+ // an "opportunistic" trait resolution of any trait bounds on
+ // the call. This helps coercions.
+ if check_closures {
+ self.select_obligations_where_possible(false, |_| {})
+ }
+
+ // Check each argument, to satisfy the input it was provided for
+ // Visually, we're traveling down the diagonal of the compatibility matrix
+ for (idx, arg) in provided_args.iter().enumerate() {
+ // Warn only for the first loop (the "no closures" one).
+ // Closure arguments themselves can't be diverging, but
+ // a previous argument can, e.g., `foo(panic!(), || {})`.
+ if !check_closures {
+ self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
+ }
+
+ // For C-variadic functions, we don't have a declared type for all of
+ // the arguments hence we only do our usual type checking with
+ // the arguments who's types we do know. However, we *can* check
+ // for unreachable expressions (see above).
+ // FIXME: unreachable warning current isn't emitted
+ if idx >= minimum_input_count {
+ continue;
+ }
+
+ let is_closure = matches!(arg.kind, ExprKind::Closure { .. });
+ if is_closure != check_closures {
+ continue;
+ }
+
+ let compatible = demand_compatible(idx);
+ let is_compatible = matches!(compatible, Compatibility::Compatible);
+ compatibility_diagonal[idx] = compatible;
+
+ if !is_compatible {
+ call_appears_satisfied = false;
+ }
+ }
+ }
+
+ if c_variadic && provided_arg_count < minimum_input_count {
+ err_code = "E0060";
+ }
+
+ for arg in provided_args.iter().skip(minimum_input_count) {
+ // Make sure we've checked this expr at least once.
+ let arg_ty = self.check_expr(&arg);
+
+ // If the function is c-style variadic, we skipped a bunch of arguments
+ // so we need to check those, and write out the types
+ // Ideally this would be folded into the above, for uniform style
+ // but c-variadic is already a corner case
+ if c_variadic {
+ fn variadic_error<'tcx>(
+ sess: &'tcx Session,
+ span: Span,
+ ty: Ty<'tcx>,
+ cast_ty: &str,
+ ) {
+ use rustc_hir_analysis::structured_errors::MissingCastForVariadicArg;
+
+ MissingCastForVariadicArg { sess, span, ty, cast_ty }.diagnostic().emit();
+ }
+
+ // There are a few types which get autopromoted when passed via varargs
+ // in C but we just error out instead and require explicit casts.
+ let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
+ match arg_ty.kind() {
+ ty::Float(ty::FloatTy::F32) => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
+ }
+ ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
+ }
+ ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
+ }
+ ty::FnDef(..) => {
+ let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
+ let ptr_ty = self.resolve_vars_if_possible(ptr_ty);
+ variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
+ }
+ _ => {}
+ }
+ }
+ }
+
+ if !call_appears_satisfied {
+ let compatibility_diagonal = IndexVec::from_raw(compatibility_diagonal);
+ let provided_args = IndexVec::from_iter(provided_args.iter().take(if c_variadic {
+ minimum_input_count
+ } else {
+ provided_arg_count
+ }));
+ debug_assert_eq!(
+ formal_input_tys.len(),
+ expected_input_tys.len(),
+ "expected formal_input_tys to be the same size as expected_input_tys"
+ );
+ let formal_and_expected_inputs = IndexVec::from_iter(
+ formal_input_tys
+ .iter()
+ .copied()
+ .zip(expected_input_tys.iter().copied())
+ .map(|vars| self.resolve_vars_if_possible(vars)),
+ );
+
+ self.report_arg_errors(
+ compatibility_diagonal,
+ formal_and_expected_inputs,
+ provided_args,
+ c_variadic,
+ err_code,
+ fn_def_id,
+ call_span,
+ call_expr,
+ );
+ }
+ }
+
+ fn report_arg_errors(
+ &self,
+ compatibility_diagonal: IndexVec<ProvidedIdx, Compatibility<'tcx>>,
+ formal_and_expected_inputs: IndexVec<ExpectedIdx, (Ty<'tcx>, Ty<'tcx>)>,
+ provided_args: IndexVec<ProvidedIdx, &'tcx hir::Expr<'tcx>>,
+ c_variadic: bool,
+ err_code: &str,
+ fn_def_id: Option<DefId>,
+ call_span: Span,
+ call_expr: &hir::Expr<'tcx>,
+ ) {
+ // Next, let's construct the error
+ let (error_span, full_call_span, ctor_of, is_method) = match &call_expr.kind {
+ hir::ExprKind::Call(
+ hir::Expr { hir_id, span, kind: hir::ExprKind::Path(qpath), .. },
+ _,
+ ) => {
+ if let Res::Def(DefKind::Ctor(of, _), _) =
+ self.typeck_results.borrow().qpath_res(qpath, *hir_id)
+ {
+ (call_span, *span, Some(of), false)
+ } else {
+ (call_span, *span, None, false)
+ }
+ }
+ hir::ExprKind::Call(hir::Expr { span, .. }, _) => (call_span, *span, None, false),
+ hir::ExprKind::MethodCall(path_segment, _, _, span) => {
+ let ident_span = path_segment.ident.span;
+ let ident_span = if let Some(args) = path_segment.args {
+ ident_span.with_hi(args.span_ext.hi())
+ } else {
+ ident_span
+ };
+ // methods are never ctors
+ (*span, ident_span, None, true)
+ }
+ k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k),
+ };
+ let args_span = error_span.trim_start(full_call_span).unwrap_or(error_span);
+ let call_name = match ctor_of {
+ Some(CtorOf::Struct) => "struct",
+ Some(CtorOf::Variant) => "enum variant",
+ None => "function",
+ };
+
+ // Don't print if it has error types or is just plain `_`
+ fn has_error_or_infer<'tcx>(tys: impl IntoIterator<Item = Ty<'tcx>>) -> bool {
+ tys.into_iter().any(|ty| ty.references_error() || ty.is_ty_var())
+ }
+
+ self.set_tainted_by_errors();
+ let tcx = self.tcx;
+
+ // Get the argument span in the context of the call span so that
+ // suggestions and labels are (more) correct when an arg is a
+ // macro invocation.
+ let normalize_span = |span: Span| -> Span {
+ let normalized_span = span.find_ancestor_inside(error_span).unwrap_or(span);
+ // Sometimes macros mess up the spans, so do not normalize the
+ // arg span to equal the error span, because that's less useful
+ // than pointing out the arg expr in the wrong context.
+ if normalized_span.source_equal(error_span) { span } else { normalized_span }
+ };
+
+ // Precompute the provided types and spans, since that's all we typically need for below
+ let provided_arg_tys: IndexVec<ProvidedIdx, (Ty<'tcx>, Span)> = provided_args
+ .iter()
+ .map(|expr| {
+ let ty = self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted_opt(*expr)
+ .unwrap_or_else(|| tcx.ty_error());
+ (self.resolve_vars_if_possible(ty), normalize_span(expr.span))
+ })
+ .collect();
+ let callee_expr = match &call_expr.peel_blocks().kind {
+ hir::ExprKind::Call(callee, _) => Some(*callee),
+ hir::ExprKind::MethodCall(_, receiver, ..) => {
+ if let Some((DefKind::AssocFn, def_id)) =
+ self.typeck_results.borrow().type_dependent_def(call_expr.hir_id)
+ && let Some(assoc) = tcx.opt_associated_item(def_id)
+ && assoc.fn_has_self_parameter
+ {
+ Some(*receiver)
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+ let callee_ty = callee_expr
+ .and_then(|callee_expr| self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr));
+
+ // A "softer" version of the `demand_compatible`, which checks types without persisting them,
+ // and treats error types differently
+ // This will allow us to "probe" for other argument orders that would likely have been correct
+ let check_compatible = |provided_idx: ProvidedIdx, expected_idx: ExpectedIdx| {
+ if provided_idx.as_usize() == expected_idx.as_usize() {
+ return compatibility_diagonal[provided_idx].clone();
+ }
+
+ let (formal_input_ty, expected_input_ty) = formal_and_expected_inputs[expected_idx];
+ // If either is an error type, we defy the usual convention and consider them to *not* be
+ // coercible. This prevents our error message heuristic from trying to pass errors into
+ // every argument.
+ if (formal_input_ty, expected_input_ty).references_error() {
+ return Compatibility::Incompatible(None);
+ }
+
+ let (arg_ty, arg_span) = provided_arg_tys[provided_idx];
+
+ let expectation = Expectation::rvalue_hint(self, expected_input_ty);
+ let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
+ let can_coerce = self.can_coerce(arg_ty, coerced_ty);
+ if !can_coerce {
+ return Compatibility::Incompatible(Some(ty::error::TypeError::Sorts(
+ ty::error::ExpectedFound::new(true, coerced_ty, arg_ty),
+ )));
+ }
+
+ // Using probe here, since we don't want this subtyping to affect inference.
+ let subtyping_error = self.probe(|_| {
+ self.at(&self.misc(arg_span), self.param_env).sup(formal_input_ty, coerced_ty).err()
+ });
+
+ // Same as above: if either the coerce type or the checked type is an error type,
+ // consider them *not* compatible.
+ let references_error = (coerced_ty, arg_ty).references_error();
+ match (references_error, subtyping_error) {
+ (false, None) => Compatibility::Compatible,
+ (_, subtyping_error) => Compatibility::Incompatible(subtyping_error),
+ }
+ };
+
+ // The algorithm here is inspired by levenshtein distance and longest common subsequence.
+ // We'll try to detect 4 different types of mistakes:
+ // - An extra parameter has been provided that doesn't satisfy *any* of the other inputs
+ // - An input is missing, which isn't satisfied by *any* of the other arguments
+ // - Some number of arguments have been provided in the wrong order
+ // - A type is straight up invalid
+
+ // First, let's find the errors
+ let (mut errors, matched_inputs) =
+ ArgMatrix::new(provided_args.len(), formal_and_expected_inputs.len(), check_compatible)
+ .find_errors();
+
+ // First, check if we just need to wrap some arguments in a tuple.
+ if let Some((mismatch_idx, terr)) =
+ compatibility_diagonal.iter().enumerate().find_map(|(i, c)| {
+ if let Compatibility::Incompatible(Some(terr)) = c {
+ Some((i, *terr))
+ } else {
+ None
+ }
+ })
+ {
+ // Is the first bad expected argument a tuple?
+ // Do we have as many extra provided arguments as the tuple's length?
+ // If so, we might have just forgotten to wrap some args in a tuple.
+ if let Some(ty::Tuple(tys)) =
+ formal_and_expected_inputs.get(mismatch_idx.into()).map(|tys| tys.1.kind())
+ // If the tuple is unit, we're not actually wrapping any arguments.
+ && !tys.is_empty()
+ && provided_arg_tys.len() == formal_and_expected_inputs.len() - 1 + tys.len()
+ {
+ // Wrap up the N provided arguments starting at this position in a tuple.
+ let provided_as_tuple = tcx.mk_tup(
+ provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx).take(tys.len()),
+ );
+
+ let mut satisfied = true;
+ // Check if the newly wrapped tuple + rest of the arguments are compatible.
+ for ((_, expected_ty), provided_ty) in std::iter::zip(
+ formal_and_expected_inputs.iter().skip(mismatch_idx),
+ [provided_as_tuple].into_iter().chain(
+ provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx + tys.len()),
+ ),
+ ) {
+ if !self.can_coerce(provided_ty, *expected_ty) {
+ satisfied = false;
+ break;
+ }
+ }
+
+ // If they're compatible, suggest wrapping in an arg, and we're done!
+ // Take some care with spans, so we don't suggest wrapping a macro's
+ // innards in parenthesis, for example.
+ if satisfied
+ && let Some((_, lo)) =
+ provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx))
+ && let Some((_, hi)) =
+ provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx + tys.len() - 1))
+ {
+ let mut err;
+ if tys.len() == 1 {
+ // A tuple wrap suggestion actually occurs within,
+ // so don't do anything special here.
+ err = self.err_ctxt().report_and_explain_type_error(
+ TypeTrace::types(
+ &self.misc(*lo),
+ true,
+ formal_and_expected_inputs[mismatch_idx.into()].1,
+ provided_arg_tys[mismatch_idx.into()].0,
+ ),
+ terr,
+ );
+ err.span_label(
+ full_call_span,
+ format!("arguments to this {} are incorrect", call_name),
+ );
+ } else {
+ err = tcx.sess.struct_span_err_with_code(
+ full_call_span,
+ &format!(
+ "this {} takes {}{} but {} {} supplied",
+ call_name,
+ if c_variadic { "at least " } else { "" },
+ potentially_plural_count(
+ formal_and_expected_inputs.len(),
+ "argument"
+ ),
+ potentially_plural_count(provided_args.len(), "argument"),
+ pluralize!("was", provided_args.len())
+ ),
+ DiagnosticId::Error(err_code.to_owned()),
+ );
+ err.multipart_suggestion_verbose(
+ "wrap these arguments in parentheses to construct a tuple",
+ vec![
+ (lo.shrink_to_lo(), "(".to_string()),
+ (hi.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ };
+ self.label_fn_like(
+ &mut err,
+ fn_def_id,
+ callee_ty,
+ Some(mismatch_idx),
+ is_method,
+ );
+ err.emit();
+ return;
+ }
+ }
+ }
+
+ // Okay, so here's where it gets complicated in regards to what errors
+ // we emit and how.
+ // There are 3 different "types" of errors we might encounter.
+ // 1) Missing/extra/swapped arguments
+ // 2) Valid but incorrect arguments
+ // 3) Invalid arguments
+ // - Currently I think this only comes up with `CyclicTy`
+ //
+ // We first need to go through, remove those from (3) and emit those
+ // as their own error, particularly since they're error code and
+ // message is special. From what I can tell, we *must* emit these
+ // here (vs somewhere prior to this function) since the arguments
+ // become invalid *because* of how they get used in the function.
+ // It is what it is.
+
+ if errors.is_empty() {
+ if cfg!(debug_assertions) {
+ span_bug!(error_span, "expected errors from argument matrix");
+ } else {
+ tcx.sess
+ .struct_span_err(
+ error_span,
+ "argument type mismatch was detected, \
+ but rustc had trouble determining where",
+ )
+ .note(
+ "we would appreciate a bug report: \
+ https://github.com/rust-lang/rust/issues/new",
+ )
+ .emit();
+ }
+ return;
+ }
+
+ errors.drain_filter(|error| {
+ let Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(e))) = error else { return false };
+ let (provided_ty, provided_span) = provided_arg_tys[*provided_idx];
+ let (expected_ty, _) = formal_and_expected_inputs[*expected_idx];
+ let cause = &self.misc(provided_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ if !matches!(trace.cause.as_failure_code(*e), FailureCode::Error0308(_)) {
+ self.err_ctxt().report_and_explain_type_error(trace, *e).emit();
+ return true;
+ }
+ false
+ });
+
+ // We're done if we found errors, but we already emitted them.
+ if errors.is_empty() {
+ return;
+ }
+
+ // Okay, now that we've emitted the special errors separately, we
+ // are only left missing/extra/swapped and mismatched arguments, both
+ // can be collated pretty easily if needed.
+
+ // Next special case: if there is only one "Incompatible" error, just emit that
+ if let [
+ Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(err))),
+ ] = &errors[..]
+ {
+ let (formal_ty, expected_ty) = formal_and_expected_inputs[*expected_idx];
+ let (provided_ty, provided_arg_span) = provided_arg_tys[*provided_idx];
+ let cause = &self.misc(provided_arg_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ let mut err = self.err_ctxt().report_and_explain_type_error(trace, *err);
+ self.emit_coerce_suggestions(
+ &mut err,
+ &provided_args[*provided_idx],
+ provided_ty,
+ Expectation::rvalue_hint(self, expected_ty)
+ .only_has_type(self)
+ .unwrap_or(formal_ty),
+ None,
+ None,
+ );
+ err.span_label(
+ full_call_span,
+ format!("arguments to this {} are incorrect", call_name),
+ );
+ // Call out where the function is defined
+ self.label_fn_like(
+ &mut err,
+ fn_def_id,
+ callee_ty,
+ Some(expected_idx.as_usize()),
+ is_method,
+ );
+ err.emit();
+ return;
+ }
+
+ let mut err = if formal_and_expected_inputs.len() == provided_args.len() {
+ struct_span_err!(
+ tcx.sess,
+ full_call_span,
+ E0308,
+ "arguments to this {} are incorrect",
+ call_name,
+ )
+ } else {
+ tcx.sess.struct_span_err_with_code(
+ full_call_span,
+ &format!(
+ "this {} takes {}{} but {} {} supplied",
+ call_name,
+ if c_variadic { "at least " } else { "" },
+ potentially_plural_count(formal_and_expected_inputs.len(), "argument"),
+ potentially_plural_count(provided_args.len(), "argument"),
+ pluralize!("was", provided_args.len())
+ ),
+ DiagnosticId::Error(err_code.to_owned()),
+ )
+ };
+
+ // As we encounter issues, keep track of what we want to provide for the suggestion
+ let mut labels = vec![];
+ // If there is a single error, we give a specific suggestion; otherwise, we change to
+ // "did you mean" with the suggested function call
+ enum SuggestionText {
+ None,
+ Provide(bool),
+ Remove(bool),
+ Swap,
+ Reorder,
+ DidYouMean,
+ }
+ let mut suggestion_text = SuggestionText::None;
+
+ let mut errors = errors.into_iter().peekable();
+ while let Some(error) = errors.next() {
+ match error {
+ Error::Invalid(provided_idx, expected_idx, compatibility) => {
+ let (formal_ty, expected_ty) = formal_and_expected_inputs[expected_idx];
+ let (provided_ty, provided_span) = provided_arg_tys[provided_idx];
+ if let Compatibility::Incompatible(error) = compatibility {
+ let cause = &self.misc(provided_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ if let Some(e) = error {
+ self.err_ctxt().note_type_err(
+ &mut err,
+ &trace.cause,
+ None,
+ Some(trace.values),
+ e,
+ false,
+ true,
+ );
+ }
+ }
+
+ self.emit_coerce_suggestions(
+ &mut err,
+ &provided_args[provided_idx],
+ provided_ty,
+ Expectation::rvalue_hint(self, expected_ty)
+ .only_has_type(self)
+ .unwrap_or(formal_ty),
+ None,
+ None,
+ );
+ }
+ Error::Extra(arg_idx) => {
+ let (provided_ty, provided_span) = provided_arg_tys[arg_idx];
+ let provided_ty_name = if !has_error_or_infer([provided_ty]) {
+ // FIXME: not suggestable, use something else
+ format!(" of type `{}`", provided_ty)
+ } else {
+ "".to_string()
+ };
+ labels
+ .push((provided_span, format!("argument{} unexpected", provided_ty_name)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Remove(false),
+ SuggestionText::Remove(_) => SuggestionText::Remove(true),
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ Error::Missing(expected_idx) => {
+ // If there are multiple missing arguments adjacent to each other,
+ // then we can provide a single error.
+
+ let mut missing_idxs = vec![expected_idx];
+ while let Some(e) = errors.next_if(|e| {
+ matches!(e, Error::Missing(next_expected_idx)
+ if *next_expected_idx == *missing_idxs.last().unwrap() + 1)
+ }) {
+ match e {
+ Error::Missing(expected_idx) => missing_idxs.push(expected_idx),
+ _ => unreachable!(),
+ }
+ }
+
+ // NOTE: Because we might be re-arranging arguments, might have extra
+ // arguments, etc. it's hard to *really* know where we should provide
+ // this error label, so as a heuristic, we point to the provided arg, or
+ // to the call if the missing inputs pass the provided args.
+ match &missing_idxs[..] {
+ &[expected_idx] => {
+ let (_, input_ty) = formal_and_expected_inputs[expected_idx];
+ let span = if let Some((_, arg_span)) =
+ provided_arg_tys.get(expected_idx.to_provided_idx())
+ {
+ *arg_span
+ } else {
+ args_span
+ };
+ let rendered = if !has_error_or_infer([input_ty]) {
+ format!(" of type `{}`", input_ty)
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("an argument{} is missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Provide(false),
+ SuggestionText::Provide(_) => SuggestionText::Provide(true),
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ &[first_idx, second_idx] => {
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
+ let span = if let (Some((_, first_span)), Some((_, second_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(second_idx.to_provided_idx()),
+ ) {
+ first_span.to(*second_span)
+ } else {
+ args_span
+ };
+ let rendered =
+ if !has_error_or_infer([first_expected_ty, second_expected_ty]) {
+ format!(
+ " of type `{}` and `{}`",
+ first_expected_ty, second_expected_ty
+ )
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("two arguments{} are missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ &[first_idx, second_idx, third_idx] => {
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
+ let (_, third_expected_ty) = formal_and_expected_inputs[third_idx];
+ let span = if let (Some((_, first_span)), Some((_, third_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(third_idx.to_provided_idx()),
+ ) {
+ first_span.to(*third_span)
+ } else {
+ args_span
+ };
+ let rendered = if !has_error_or_infer([
+ first_expected_ty,
+ second_expected_ty,
+ third_expected_ty,
+ ]) {
+ format!(
+ " of type `{}`, `{}`, and `{}`",
+ first_expected_ty, second_expected_ty, third_expected_ty
+ )
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("three arguments{} are missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ missing_idxs => {
+ let first_idx = *missing_idxs.first().unwrap();
+ let last_idx = *missing_idxs.last().unwrap();
+ // NOTE: Because we might be re-arranging arguments, might have extra arguments, etc.
+ // It's hard to *really* know where we should provide this error label, so this is a
+ // decent heuristic
+ let span = if let (Some((_, first_span)), Some((_, last_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(last_idx.to_provided_idx()),
+ ) {
+ first_span.to(*last_span)
+ } else {
+ args_span
+ };
+ labels.push((span, format!("multiple arguments are missing")));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ }
+ }
+ Error::Swap(
+ first_provided_idx,
+ second_provided_idx,
+ first_expected_idx,
+ second_expected_idx,
+ ) => {
+ let (first_provided_ty, first_span) = provided_arg_tys[first_provided_idx];
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_expected_idx];
+ let first_provided_ty_name = if !has_error_or_infer([first_provided_ty]) {
+ format!(", found `{}`", first_provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ first_span,
+ format!("expected `{}`{}", first_expected_ty, first_provided_ty_name),
+ ));
+
+ let (second_provided_ty, second_span) = provided_arg_tys[second_provided_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_expected_idx];
+ let second_provided_ty_name = if !has_error_or_infer([second_provided_ty]) {
+ format!(", found `{}`", second_provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ second_span,
+ format!("expected `{}`{}", second_expected_ty, second_provided_ty_name),
+ ));
+
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Swap,
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ Error::Permutation(args) => {
+ for (dst_arg, dest_input) in args {
+ let (_, expected_ty) = formal_and_expected_inputs[dst_arg];
+ let (provided_ty, provided_span) = provided_arg_tys[dest_input];
+ let provided_ty_name = if !has_error_or_infer([provided_ty]) {
+ format!(", found `{}`", provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ provided_span,
+ format!("expected `{}`{}", expected_ty, provided_ty_name),
+ ));
+ }
+
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Reorder,
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ }
+ }
+
+ // If we have less than 5 things to say, it would be useful to call out exactly what's wrong
+ if labels.len() <= 5 {
+ for (span, label) in labels {
+ err.span_label(span, label);
+ }
+ }
+
+ // Call out where the function is defined
+ self.label_fn_like(&mut err, fn_def_id, callee_ty, None, is_method);
+
+ // And add a suggestion block for all of the parameters
+ let suggestion_text = match suggestion_text {
+ SuggestionText::None => None,
+ SuggestionText::Provide(plural) => {
+ Some(format!("provide the argument{}", if plural { "s" } else { "" }))
+ }
+ SuggestionText::Remove(plural) => {
+ Some(format!("remove the extra argument{}", if plural { "s" } else { "" }))
+ }
+ SuggestionText::Swap => Some("swap these arguments".to_string()),
+ SuggestionText::Reorder => Some("reorder these arguments".to_string()),
+ SuggestionText::DidYouMean => Some("did you mean".to_string()),
+ };
+ if let Some(suggestion_text) = suggestion_text {
+ let source_map = self.sess().source_map();
+ let (mut suggestion, suggestion_span) =
+ if let Some(call_span) = full_call_span.find_ancestor_inside(error_span) {
+ ("(".to_string(), call_span.shrink_to_hi().to(error_span.shrink_to_hi()))
+ } else {
+ (
+ format!(
+ "{}(",
+ source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| {
+ fn_def_id.map_or("".to_string(), |fn_def_id| {
+ tcx.item_name(fn_def_id).to_string()
+ })
+ })
+ ),
+ error_span,
+ )
+ };
+ let mut needs_comma = false;
+ for (expected_idx, provided_idx) in matched_inputs.iter_enumerated() {
+ if needs_comma {
+ suggestion += ", ";
+ } else {
+ needs_comma = true;
+ }
+ let suggestion_text = if let Some(provided_idx) = provided_idx
+ && let (_, provided_span) = provided_arg_tys[*provided_idx]
+ && let Ok(arg_text) = source_map.span_to_snippet(provided_span)
+ {
+ arg_text
+ } else {
+ // Propose a placeholder of the correct type
+ let (_, expected_ty) = formal_and_expected_inputs[expected_idx];
+ if expected_ty.is_unit() {
+ "()".to_string()
+ } else if expected_ty.is_suggestable(tcx, false) {
+ format!("/* {} */", expected_ty)
+ } else {
+ "/* value */".to_string()
+ }
+ };
+ suggestion += &suggestion_text;
+ }
+ suggestion += ")";
+ err.span_suggestion_verbose(
+ suggestion_span,
+ &suggestion_text,
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ }
+
+ err.emit();
+ }
+
+ // AST fragment checking
+ pub(in super::super) fn check_lit(
+ &self,
+ lit: &hir::Lit,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ match lit.node {
+ ast::LitKind::Str(..) => tcx.mk_static_str(),
+ ast::LitKind::ByteStr(ref v) => {
+ tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64))
+ }
+ ast::LitKind::Byte(_) => tcx.types.u8,
+ ast::LitKind::Char(_) => tcx.types.char,
+ ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(ty::int_ty(t)),
+ ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(ty::uint_ty(t)),
+ ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+ ty::Int(_) | ty::Uint(_) => Some(ty),
+ ty::Char => Some(tcx.types.u8),
+ ty::RawPtr(..) => Some(tcx.types.usize),
+ ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
+ _ => None,
+ });
+ opt_ty.unwrap_or_else(|| self.next_int_var())
+ }
+ ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => {
+ tcx.mk_mach_float(ty::float_ty(t))
+ }
+ ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+ ty::Float(_) => Some(ty),
+ _ => None,
+ });
+ opt_ty.unwrap_or_else(|| self.next_float_var())
+ }
+ ast::LitKind::Bool(_) => tcx.types.bool,
+ ast::LitKind::Err => tcx.ty_error(),
+ }
+ }
+
+ pub fn check_struct_path(
+ &self,
+ qpath: &QPath<'_>,
+ hir_id: hir::HirId,
+ ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
+ let path_span = qpath.span();
+ let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
+ let variant = match def {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ return None;
+ }
+ Res::Def(DefKind::Variant, _) => match ty.kind() {
+ ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did(), substs)),
+ _ => bug!("unexpected type: {:?}", ty),
+ },
+ Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. } => match ty.kind() {
+ ty::Adt(adt, substs) if !adt.is_enum() => {
+ Some((adt.non_enum_variant(), adt.did(), substs))
+ }
+ _ => None,
+ },
+ _ => bug!("unexpected definition: {:?}", def),
+ };
+
+ if let Some((variant, did, substs)) = variant {
+ debug!("check_struct_path: did={:?} substs={:?}", did, substs);
+ self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
+
+ // Check bounds on type arguments used in the path.
+ self.add_required_obligations_for_hir(path_span, did, substs, hir_id);
+
+ Some((variant, ty))
+ } else {
+ match ty.kind() {
+ ty::Error(_) => {
+ // E0071 might be caused by a spelling error, which will have
+ // already caused an error message and probably a suggestion
+ // elsewhere. Refrain from emitting more unhelpful errors here
+ // (issue #88844).
+ }
+ _ => {
+ struct_span_err!(
+ self.tcx.sess,
+ path_span,
+ E0071,
+ "expected struct, variant or union type, found {}",
+ ty.sort_string(self.tcx)
+ )
+ .span_label(path_span, "not a struct")
+ .emit();
+ }
+ }
+ None
+ }
+ }
+
+ pub fn check_decl_initializer(
+ &self,
+ hir_id: hir::HirId,
+ pat: &'tcx hir::Pat<'tcx>,
+ init: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
+ // for #42640 (default match binding modes).
+ //
+ // See #44848.
+ let ref_bindings = pat.contains_explicit_ref_binding();
+
+ let local_ty = self.local_ty(init.span, hir_id).revealed_ty;
+ if let Some(m) = ref_bindings {
+ // Somewhat subtle: if we have a `ref` binding in the pattern,
+ // we want to avoid introducing coercions for the RHS. This is
+ // both because it helps preserve sanity and, in the case of
+ // ref mut, for soundness (issue #23116). In particular, in
+ // the latter case, we need to be clear that the type of the
+ // referent for the reference that results is *equal to* the
+ // type of the place it is referencing, and not some
+ // supertype thereof.
+ let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
+ self.demand_eqtype(init.span, local_ty, init_ty);
+ init_ty
+ } else {
+ self.check_expr_coercable_to_type(init, local_ty, None)
+ }
+ }
+
+ pub(in super::super) fn check_decl(&self, decl: Declaration<'tcx>) {
+ // Determine and write the type which we'll check the pattern against.
+ let decl_ty = self.local_ty(decl.span, decl.hir_id).decl_ty;
+ self.write_ty(decl.hir_id, decl_ty);
+
+ // Type check the initializer.
+ if let Some(ref init) = decl.init {
+ let init_ty = self.check_decl_initializer(decl.hir_id, decl.pat, &init);
+ self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, init_ty);
+ }
+
+ // Does the expected pattern type originate from an expression and what is the span?
+ let (origin_expr, ty_span) = match (decl.ty, decl.init) {
+ (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type.
+ (_, Some(init)) => {
+ (true, Some(init.span.find_ancestor_inside(decl.span).unwrap_or(init.span)))
+ } // No explicit type; so use the scrutinee.
+ _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained.
+ };
+
+ // Type check the pattern. Override if necessary to avoid knock-on errors.
+ self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr);
+ let pat_ty = self.node_ty(decl.pat.hir_id);
+ self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, pat_ty);
+
+ if let Some(blk) = decl.els {
+ let previous_diverges = self.diverges.get();
+ let else_ty = self.check_block_with_expected(blk, NoExpectation);
+ let cause = self.cause(blk.span, ObligationCauseCode::LetElse);
+ if let Some(mut err) =
+ self.demand_eqtype_with_origin(&cause, self.tcx.types.never, else_ty)
+ {
+ err.emit();
+ }
+ self.diverges.set(previous_diverges);
+ }
+ }
+
+ /// Type check a `let` statement.
+ pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) {
+ self.check_decl(local.into());
+ }
+
+ pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) {
+ // Don't do all the complex logic below for `DeclItem`.
+ match stmt.kind {
+ hir::StmtKind::Item(..) => return,
+ hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
+ }
+
+ self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
+
+ // Hide the outer diverging and `has_errors` flags.
+ let old_diverges = self.diverges.replace(Diverges::Maybe);
+ let old_has_errors = self.has_errors.replace(false);
+
+ match stmt.kind {
+ hir::StmtKind::Local(l) => {
+ self.check_decl_local(l);
+ }
+ // Ignore for now.
+ hir::StmtKind::Item(_) => {}
+ hir::StmtKind::Expr(ref expr) => {
+ // Check with expected type of `()`.
+ self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
+ if expr.can_have_side_effects() {
+ self.suggest_semicolon_at_end(expr.span, err);
+ }
+ });
+ }
+ hir::StmtKind::Semi(ref expr) => {
+ // All of this is equivalent to calling `check_expr`, but it is inlined out here
+ // in order to capture the fact that this `match` is the last statement in its
+ // function. This is done for better suggestions to remove the `;`.
+ let expectation = match expr.kind {
+ hir::ExprKind::Match(..) if is_last => IsLast(stmt.span),
+ _ => NoExpectation,
+ };
+ self.check_expr_with_expectation(expr, expectation);
+ }
+ }
+
+ // Combine the diverging and `has_error` flags.
+ self.diverges.set(self.diverges.get() | old_diverges);
+ self.has_errors.set(self.has_errors.get() | old_has_errors);
+ }
+
+ pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) {
+ let unit = self.tcx.mk_unit();
+ let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
+
+ // if the block produces a `!` value, that can always be
+ // (effectively) coerced to unit.
+ if !ty.is_never() {
+ self.demand_suptype(blk.span, unit, ty);
+ }
+ }
+
+ pub(in super::super) fn check_block_with_expected(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let prev = self.ps.replace(self.ps.get().recurse(blk));
+
+ // In some cases, blocks have just one exit, but other blocks
+ // can be targeted by multiple breaks. This can happen both
+ // with labeled blocks as well as when we desugar
+ // a `try { ... }` expression.
+ //
+ // Example 1:
+ //
+ // 'a: { if true { break 'a Err(()); } Ok(()) }
+ //
+ // Here we would wind up with two coercions, one from
+ // `Err(())` and the other from the tail expression
+ // `Ok(())`. If the tail expression is omitted, that's a
+ // "forced unit" -- unless the block diverges, in which
+ // case we can ignore the tail expression (e.g., `'a: {
+ // break 'a 22; }` would not force the type of the block
+ // to be `()`).
+ let tail_expr = blk.expr.as_ref();
+ let coerce_to_ty = expected.coercion_target_type(self, blk.span);
+ let coerce = if blk.targeted_by_break {
+ CoerceMany::new(coerce_to_ty)
+ } else {
+ let tail_expr: &[&hir::Expr<'_>] = match tail_expr {
+ Some(e) => slice::from_ref(e),
+ None => &[],
+ };
+ CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
+ };
+
+ let prev_diverges = self.diverges.get();
+ let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
+
+ let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
+ for (pos, s) in blk.stmts.iter().enumerate() {
+ self.check_stmt(s, blk.stmts.len() - 1 == pos);
+ }
+
+ // check the tail expression **without** holding the
+ // `enclosing_breakables` lock below.
+ let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
+
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
+ let coerce = ctxt.coerce.as_mut().unwrap();
+ if let Some(tail_expr_ty) = tail_expr_ty {
+ let tail_expr = tail_expr.unwrap();
+ let span = self.get_expr_coercion_span(tail_expr);
+ let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
+ let ty_for_diagnostic = coerce.merged_ty();
+ // We use coerce_inner here because we want to augment the error
+ // suggesting to wrap the block in square brackets if it might've
+ // been mistaken array syntax
+ coerce.coerce_inner(
+ self,
+ &cause,
+ Some(tail_expr),
+ tail_expr_ty,
+ Some(&mut |diag: &mut Diagnostic| {
+ self.suggest_block_to_brackets(diag, blk, tail_expr_ty, ty_for_diagnostic);
+ }),
+ false,
+ );
+ } else {
+ // Subtle: if there is no explicit tail expression,
+ // that is typically equivalent to a tail expression
+ // of `()` -- except if the block diverges. In that
+ // case, there is no value supplied from the tail
+ // expression (assuming there are no other breaks,
+ // this implies that the type of the block will be
+ // `!`).
+ //
+ // #41425 -- label the implicit `()` as being the
+ // "found type" here, rather than the "expected type".
+ if !self.diverges.get().is_always() {
+ // #50009 -- Do not point at the entire fn block span, point at the return type
+ // span, as it is the cause of the requirement, and
+ // `consider_hint_about_removing_semicolon` will point at the last expression
+ // if it were a relevant part of the error. This improves usability in editors
+ // that highlight errors inline.
+ let mut sp = blk.span;
+ let mut fn_span = None;
+ if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
+ let ret_sp = decl.output.span();
+ if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
+ // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
+ // output would otherwise be incorrect and even misleading. Make sure
+ // the span we're aiming at correspond to a `fn` body.
+ if block_sp == blk.span {
+ sp = ret_sp;
+ fn_span = Some(ident.span);
+ }
+ }
+ }
+ coerce.coerce_forced_unit(
+ self,
+ &self.misc(sp),
+ &mut |err| {
+ if let Some(expected_ty) = expected.only_has_type(self) {
+ if !self.consider_removing_semicolon(blk, expected_ty, err) {
+ self.err_ctxt().consider_returning_binding(
+ blk,
+ expected_ty,
+ err,
+ );
+ }
+ if expected_ty == self.tcx.types.bool {
+ // If this is caused by a missing `let` in a `while let`,
+ // silence this redundant error, as we already emit E0070.
+
+ // Our block must be a `assign desugar local; assignment`
+ if let Some(hir::Node::Block(hir::Block {
+ stmts:
+ [
+ hir::Stmt {
+ kind:
+ hir::StmtKind::Local(hir::Local {
+ source:
+ hir::LocalSource::AssignDesugar(_),
+ ..
+ }),
+ ..
+ },
+ hir::Stmt {
+ kind:
+ hir::StmtKind::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(..),
+ ..
+ }),
+ ..
+ },
+ ],
+ ..
+ })) = self.tcx.hir().find(blk.hir_id)
+ {
+ self.comes_from_while_condition(blk.hir_id, |_| {
+ err.downgrade_to_delayed_bug();
+ })
+ }
+ }
+ }
+ if let Some(fn_span) = fn_span {
+ err.span_label(
+ fn_span,
+ "implicitly returns `()` as its body has no tail or `return` \
+ expression",
+ );
+ }
+ },
+ false,
+ );
+ }
+ }
+ });
+
+ if ctxt.may_break {
+ // If we can break from the block, then the block's exit is always reachable
+ // (... as long as the entry is reachable) - regardless of the tail of the block.
+ self.diverges.set(prev_diverges);
+ }
+
+ let mut ty = ctxt.coerce.unwrap().complete(self);
+
+ if self.has_errors.get() || ty.references_error() {
+ ty = self.tcx.ty_error()
+ }
+
+ self.write_ty(blk.hir_id, ty);
+
+ self.ps.set(prev);
+ ty
+ }
+
+ fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
+ let node = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(id).def_id);
+ match node {
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => {
+ let body = self.tcx.hir().body(body_id);
+ if let ExprKind::Block(block, _) = &body.value.kind {
+ return Some(block.span);
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
+ fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> {
+ let parent = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(blk_id).def_id);
+ self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
+ }
+
+ /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail
+ /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors
+ /// when given code like the following:
+ /// ```text
+ /// if false { return 0i32; } else { 1u32 }
+ /// // ^^^^ point at this instead of the whole `if` expression
+ /// ```
+ fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span {
+ let check_in_progress = |elem: &hir::Expr<'_>| {
+ self.typeck_results.borrow().node_type_opt(elem.hir_id).filter(|ty| !ty.is_never()).map(
+ |_| match elem.kind {
+ // Point at the tail expression when possible.
+ hir::ExprKind::Block(block, _) => block.expr.map_or(block.span, |e| e.span),
+ _ => elem.span,
+ },
+ )
+ };
+
+ if let hir::ExprKind::If(_, _, Some(el)) = expr.kind {
+ if let Some(rslt) = check_in_progress(el) {
+ return rslt;
+ }
+ }
+
+ if let hir::ExprKind::Match(_, arms, _) = expr.kind {
+ let mut iter = arms.iter().filter_map(|arm| check_in_progress(arm.body));
+ if let Some(span) = iter.next() {
+ if iter.next().is_none() {
+ return span;
+ }
+ }
+ }
+
+ expr.span
+ }
+
+ fn overwrite_local_ty_if_err(
+ &self,
+ hir_id: hir::HirId,
+ pat: &'tcx hir::Pat<'tcx>,
+ decl_ty: Ty<'tcx>,
+ ty: Ty<'tcx>,
+ ) {
+ if ty.references_error() {
+ // Override the types everywhere with `err()` to avoid knock on errors.
+ self.write_ty(hir_id, ty);
+ self.write_ty(pat.hir_id, ty);
+ let local_ty = LocalTy { decl_ty, revealed_ty: ty };
+ self.locals.borrow_mut().insert(hir_id, local_ty);
+ self.locals.borrow_mut().insert(pat.hir_id, local_ty);
+ }
+ }
+
+ // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
+ // The newly resolved definition is written into `type_dependent_defs`.
+ fn finish_resolving_struct_path(
+ &self,
+ qpath: &QPath<'_>,
+ path_span: Span,
+ hir_id: hir::HirId,
+ ) -> (Res, Ty<'tcx>) {
+ match *qpath {
+ QPath::Resolved(ref maybe_qself, ref path) => {
+ let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
+ let ty = <dyn AstConv<'_>>::res_to_ty(self, self_ty, path, true);
+ (path.res, ty)
+ }
+ QPath::TypeRelative(ref qself, ref segment) => {
+ let ty = self.to_ty(qself);
+
+ let result = <dyn AstConv<'_>>::associated_path_to_ty(
+ self, hir_id, path_span, ty, qself, segment, true,
+ );
+ let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
+ let result = result.map(|(_, kind, def_id)| (kind, def_id));
+
+ // Write back the new resolution.
+ self.write_resolution(hir_id, result);
+
+ (result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), ty)
+ }
+ QPath::LangItem(lang_item, span, id) => {
+ self.resolve_lang_item_path(lang_item, span, hir_id, id)
+ }
+ }
+ }
+
+ /// Given a vector of fulfillment errors, try to adjust the spans of the
+ /// errors to more accurately point at the cause of the failure.
+ ///
+ /// This applies to calls, methods, and struct expressions. This will also
+ /// try to deduplicate errors that are due to the same cause but might
+ /// have been created with different [`ObligationCause`][traits::ObligationCause]s.
+ pub(super) fn adjust_fulfillment_errors_for_expr_obligation(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ ) {
+ // Store a mapping from `(Span, Predicate) -> ObligationCause`, so that
+ // other errors that have the same span and predicate can also get fixed,
+ // even if their `ObligationCauseCode` isn't an `Expr*Obligation` kind.
+ // This is important since if we adjust one span but not the other, then
+ // we will have "duplicated" the error on the UI side.
+ let mut remap_cause = FxHashSet::default();
+ let mut not_adjusted = vec![];
+
+ for error in errors {
+ let before_span = error.obligation.cause.span;
+ if self.adjust_fulfillment_error_for_expr_obligation(error)
+ || before_span != error.obligation.cause.span
+ {
+ // Store both the predicate and the predicate *without constness*
+ // since sometimes we instantiate and check both of these in a
+ // method call, for example.
+ remap_cause.insert((
+ before_span,
+ error.obligation.predicate,
+ error.obligation.cause.clone(),
+ ));
+ remap_cause.insert((
+ before_span,
+ error.obligation.predicate.without_const(self.tcx),
+ error.obligation.cause.clone(),
+ ));
+ } else {
+ // If it failed to be adjusted once around, it may be adjusted
+ // via the "remap cause" mapping the second time...
+ not_adjusted.push(error);
+ }
+ }
+
+ for error in not_adjusted {
+ for (span, predicate, cause) in &remap_cause {
+ if *predicate == error.obligation.predicate
+ && span.contains(error.obligation.cause.span)
+ {
+ error.obligation.cause = cause.clone();
+ continue;
+ }
+ }
+ }
+ }
+
+ fn adjust_fulfillment_error_for_expr_obligation(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ ) -> bool {
+ let (traits::ExprItemObligation(def_id, hir_id, idx) | traits::ExprBindingObligation(def_id, _, hir_id, idx))
+ = *error.obligation.cause.code().peel_derives() else { return false; };
+ let hir = self.tcx.hir();
+ let hir::Node::Expr(expr) = hir.get(hir_id) else { return false; };
+
+ // Skip over mentioning async lang item
+ if Some(def_id) == self.tcx.lang_items().from_generator_fn()
+ && error.obligation.cause.span.desugaring_kind()
+ == Some(rustc_span::DesugaringKind::Async)
+ {
+ return false;
+ }
+
+ let Some(unsubstituted_pred) =
+ self.tcx.predicates_of(def_id).instantiate_identity(self.tcx).predicates.into_iter().nth(idx)
+ else { return false; };
+
+ let generics = self.tcx.generics_of(def_id);
+ let predicate_substs = match unsubstituted_pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => pred.trait_ref.substs,
+ ty::PredicateKind::Projection(pred) => pred.projection_ty.substs,
+ _ => ty::List::empty(),
+ };
+
+ let find_param_matching = |matches: &dyn Fn(&ty::ParamTy) -> bool| {
+ predicate_substs.types().find_map(|ty| {
+ ty.walk().find_map(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Param(param_ty) = ty.kind()
+ && matches(param_ty)
+ {
+ Some(arg)
+ } else {
+ None
+ }
+ })
+ })
+ };
+
+ // Prefer generics that are local to the fn item, since these are likely
+ // to be the cause of the unsatisfied predicate.
+ let mut param_to_point_at = find_param_matching(&|param_ty| {
+ self.tcx.parent(generics.type_param(param_ty, self.tcx).def_id) == def_id
+ });
+ // Fall back to generic that isn't local to the fn item. This will come
+ // from a trait or impl, for example.
+ let mut fallback_param_to_point_at = find_param_matching(&|param_ty| {
+ self.tcx.parent(generics.type_param(param_ty, self.tcx).def_id) != def_id
+ && param_ty.name != rustc_span::symbol::kw::SelfUpper
+ });
+ // Finally, the `Self` parameter is possibly the reason that the predicate
+ // is unsatisfied. This is less likely to be true for methods, because
+ // method probe means that we already kinda check that the predicates due
+ // to the `Self` type are true.
+ let mut self_param_to_point_at =
+ find_param_matching(&|param_ty| param_ty.name == rustc_span::symbol::kw::SelfUpper);
+
+ // Finally, for ambiguity-related errors, we actually want to look
+ // for a parameter that is the source of the inference type left
+ // over in this predicate.
+ if let traits::FulfillmentErrorCode::CodeAmbiguity = error.code {
+ fallback_param_to_point_at = None;
+ self_param_to_point_at = None;
+ param_to_point_at =
+ self.find_ambiguous_parameter_in(def_id, error.root_obligation.predicate);
+ }
+
+ if self.closure_span_overlaps_error(error, expr.span) {
+ return false;
+ }
+
+ match &expr.kind {
+ hir::ExprKind::Path(qpath) => {
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Call(callee, args),
+ hir_id: call_hir_id,
+ span: call_span,
+ ..
+ }) = hir.get(hir.get_parent_node(expr.hir_id))
+ && callee.hir_id == expr.hir_id
+ {
+ if self.closure_span_overlaps_error(error, *call_span) {
+ return false;
+ }
+
+ for param in
+ [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ .into_iter()
+ .flatten()
+ {
+ if self.point_at_arg_if_possible(
+ error,
+ def_id,
+ param,
+ *call_hir_id,
+ callee.span,
+ None,
+ args,
+ )
+ {
+ return true;
+ }
+ }
+ }
+ // Notably, we only point to params that are local to the
+ // item we're checking, since those are the ones we are able
+ // to look in the final `hir::PathSegment` for. Everything else
+ // would require a deeper search into the `qpath` than I think
+ // is worthwhile.
+ if let Some(param_to_point_at) = param_to_point_at
+ && self.point_at_path_if_possible(error, def_id, param_to_point_at, qpath)
+ {
+ return true;
+ }
+ }
+ hir::ExprKind::MethodCall(segment, receiver, args, ..) => {
+ for param in [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ .into_iter()
+ .flatten()
+ {
+ if self.point_at_arg_if_possible(
+ error,
+ def_id,
+ param,
+ hir_id,
+ segment.ident.span,
+ Some(receiver),
+ args,
+ ) {
+ return true;
+ }
+ }
+ if let Some(param_to_point_at) = param_to_point_at
+ && self.point_at_generic_if_possible(error, def_id, param_to_point_at, segment)
+ {
+ return true;
+ }
+ }
+ hir::ExprKind::Struct(qpath, fields, ..) => {
+ if let Res::Def(DefKind::Struct | DefKind::Variant, variant_def_id) =
+ self.typeck_results.borrow().qpath_res(qpath, hir_id)
+ {
+ for param in
+ [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ {
+ if let Some(param) = param
+ && self.point_at_field_if_possible(
+ error,
+ def_id,
+ param,
+ variant_def_id,
+ fields,
+ )
+ {
+ return true;
+ }
+ }
+ }
+ if let Some(param_to_point_at) = param_to_point_at
+ && self.point_at_path_if_possible(error, def_id, param_to_point_at, qpath)
+ {
+ return true;
+ }
+ }
+ _ => {}
+ }
+
+ false
+ }
+
+ fn closure_span_overlaps_error(
+ &self,
+ error: &traits::FulfillmentError<'tcx>,
+ span: Span,
+ ) -> bool {
+ if let traits::FulfillmentErrorCode::CodeSelectionError(
+ traits::SelectionError::OutputTypeParameterMismatch(_, expected, _),
+ ) = error.code
+ && let ty::Closure(def_id, _) | ty::Generator(def_id, ..) = expected.skip_binder().self_ty().kind()
+ && span.overlaps(self.tcx.def_span(*def_id))
+ {
+ true
+ } else {
+ false
+ }
+ }
+
+ fn point_at_arg_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param_to_point_at: ty::GenericArg<'tcx>,
+ call_hir_id: hir::HirId,
+ callee_span: Span,
+ receiver: Option<&'tcx hir::Expr<'tcx>>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> bool {
+ let sig = self.tcx.fn_sig(def_id).skip_binder();
+ let args_referencing_param: Vec<_> = sig
+ .inputs()
+ .iter()
+ .enumerate()
+ .filter(|(_, ty)| find_param_in_ty(**ty, param_to_point_at))
+ .collect();
+ // If there's one field that references the given generic, great!
+ if let [(idx, _)] = args_referencing_param.as_slice()
+ && let Some(arg) = receiver
+ .map_or(args.get(*idx), |rcvr| if *idx == 0 { Some(rcvr) } else { args.get(*idx - 1) }) {
+ error.obligation.cause.span = arg.span.find_ancestor_in_same_ctxt(error.obligation.cause.span).unwrap_or(arg.span);
+ error.obligation.cause.map_code(|parent_code| {
+ ObligationCauseCode::FunctionArgumentObligation {
+ arg_hir_id: arg.hir_id,
+ call_hir_id,
+ parent_code,
+ }
+ });
+ return true;
+ } else if args_referencing_param.len() > 0 {
+ // If more than one argument applies, then point to the callee span at least...
+ // We have chance to fix this up further in `point_at_generics_if_possible`
+ error.obligation.cause.span = callee_span;
+ }
+
+ false
+ }
+
+ fn point_at_field_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param_to_point_at: ty::GenericArg<'tcx>,
+ variant_def_id: DefId,
+ expr_fields: &[hir::ExprField<'tcx>],
+ ) -> bool {
+ let def = self.tcx.adt_def(def_id);
+
+ let identity_substs = ty::InternalSubsts::identity_for_item(self.tcx, def_id);
+ let fields_referencing_param: Vec<_> = def
+ .variant_with_id(variant_def_id)
+ .fields
+ .iter()
+ .filter(|field| {
+ let field_ty = field.ty(self.tcx, identity_substs);
+ find_param_in_ty(field_ty, param_to_point_at)
+ })
+ .collect();
+
+ if let [field] = fields_referencing_param.as_slice() {
+ for expr_field in expr_fields {
+ // Look for the ExprField that matches the field, using the
+ // same rules that check_expr_struct uses for macro hygiene.
+ if self.tcx.adjust_ident(expr_field.ident, variant_def_id) == field.ident(self.tcx)
+ {
+ error.obligation.cause.span = expr_field
+ .expr
+ .span
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(expr_field.span);
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ fn point_at_path_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param: ty::GenericArg<'tcx>,
+ qpath: &QPath<'tcx>,
+ ) -> bool {
+ match qpath {
+ hir::QPath::Resolved(_, path) => {
+ if let Some(segment) = path.segments.last()
+ && self.point_at_generic_if_possible(error, def_id, param, segment)
+ {
+ return true;
+ }
+ }
+ hir::QPath::TypeRelative(_, segment) => {
+ if self.point_at_generic_if_possible(error, def_id, param, segment) {
+ return true;
+ }
+ }
+ _ => {}
+ }
+
+ false
+ }
+
+ fn point_at_generic_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param_to_point_at: ty::GenericArg<'tcx>,
+ segment: &hir::PathSegment<'tcx>,
+ ) -> bool {
+ let own_substs = self
+ .tcx
+ .generics_of(def_id)
+ .own_substs(ty::InternalSubsts::identity_for_item(self.tcx, def_id));
+ let Some((index, _)) = own_substs
+ .iter()
+ .filter(|arg| matches!(arg.unpack(), ty::GenericArgKind::Type(_)))
+ .enumerate()
+ .find(|(_, arg)| **arg == param_to_point_at) else { return false };
+ let Some(arg) = segment
+ .args()
+ .args
+ .iter()
+ .filter(|arg| matches!(arg, hir::GenericArg::Type(_)))
+ .nth(index) else { return false; };
+ error.obligation.cause.span = arg
+ .span()
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(arg.span());
+ true
+ }
+
+ fn find_ambiguous_parameter_in<T: TypeVisitable<'tcx>>(
+ &self,
+ item_def_id: DefId,
+ t: T,
+ ) -> Option<ty::GenericArg<'tcx>> {
+ struct FindAmbiguousParameter<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, DefId);
+ impl<'tcx> TypeVisitor<'tcx> for FindAmbiguousParameter<'_, 'tcx> {
+ type BreakTy = ty::GenericArg<'tcx>;
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> std::ops::ControlFlow<Self::BreakTy> {
+ if let Some(origin) = self.0.type_var_origin(ty)
+ && let TypeVariableOriginKind::TypeParameterDefinition(_, Some(def_id)) =
+ origin.kind
+ && let generics = self.0.tcx.generics_of(self.1)
+ && let Some(index) = generics.param_def_id_to_index(self.0.tcx, def_id)
+ && let Some(subst) = ty::InternalSubsts::identity_for_item(self.0.tcx, self.1)
+ .get(index as usize)
+ {
+ ControlFlow::Break(*subst)
+ } else {
+ ty.super_visit_with(self)
+ }
+ }
+ }
+ t.visit_with(&mut FindAmbiguousParameter(self, item_def_id)).break_value()
+ }
+
+ fn label_fn_like(
+ &self,
+ err: &mut Diagnostic,
+ callable_def_id: Option<DefId>,
+ callee_ty: Option<Ty<'tcx>>,
+ // A specific argument should be labeled, instead of all of them
+ expected_idx: Option<usize>,
+ is_method: bool,
+ ) {
+ let Some(mut def_id) = callable_def_id else {
+ return;
+ };
+
+ if let Some(assoc_item) = self.tcx.opt_associated_item(def_id)
+ // Possibly points at either impl or trait item, so try to get it
+ // to point to trait item, then get the parent.
+ // This parent might be an impl in the case of an inherent function,
+ // but the next check will fail.
+ && let maybe_trait_item_def_id = assoc_item.trait_item_def_id.unwrap_or(def_id)
+ && let maybe_trait_def_id = self.tcx.parent(maybe_trait_item_def_id)
+ // Just an easy way to check "trait_def_id == Fn/FnMut/FnOnce"
+ && let Some(call_kind) = ty::ClosureKind::from_def_id(self.tcx, maybe_trait_def_id)
+ && let Some(callee_ty) = callee_ty
+ {
+ let callee_ty = callee_ty.peel_refs();
+ match *callee_ty.kind() {
+ ty::Param(param) => {
+ let param =
+ self.tcx.generics_of(self.body_id.owner).type_param(&param, self.tcx);
+ if param.kind.is_synthetic() {
+ // if it's `impl Fn() -> ..` then just fall down to the def-id based logic
+ def_id = param.def_id;
+ } else {
+ // Otherwise, find the predicate that makes this generic callable,
+ // and point at that.
+ let instantiated = self
+ .tcx
+ .explicit_predicates_of(self.body_id.owner)
+ .instantiate_identity(self.tcx);
+ // FIXME(compiler-errors): This could be problematic if something has two
+ // fn-like predicates with different args, but callable types really never
+ // do that, so it's OK.
+ for (predicate, span) in
+ std::iter::zip(instantiated.predicates, instantiated.spans)
+ {
+ if let ty::PredicateKind::Trait(pred) = predicate.kind().skip_binder()
+ && pred.self_ty().peel_refs() == callee_ty
+ && ty::ClosureKind::from_def_id(self.tcx, pred.def_id()).is_some()
+ {
+ err.span_note(span, "callable defined here");
+ return;
+ }
+ }
+ }
+ }
+ ty::Opaque(new_def_id, _)
+ | ty::Closure(new_def_id, _)
+ | ty::FnDef(new_def_id, _) => {
+ def_id = new_def_id;
+ }
+ _ => {
+ // Look for a user-provided impl of a `Fn` trait, and point to it.
+ let new_def_id = self.probe(|_| {
+ let trait_ref = ty::TraitRef::new(
+ call_kind.to_def_id(self.tcx),
+ self.tcx.mk_substs(
+ [
+ ty::GenericArg::from(callee_ty),
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: rustc_span::DUMMY_SP,
+ })
+ .into(),
+ ]
+ .into_iter(),
+ ),
+ );
+ let obligation = traits::Obligation::new(
+ traits::ObligationCause::dummy(),
+ self.param_env,
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+ match SelectionContext::new(&self).select(&obligation) {
+ Ok(Some(traits::ImplSource::UserDefined(impl_source))) => {
+ Some(impl_source.impl_def_id)
+ }
+ _ => None,
+ }
+ });
+ if let Some(new_def_id) = new_def_id {
+ def_id = new_def_id;
+ } else {
+ return;
+ }
+ }
+ }
+ }
+
+ if let Some(def_span) = self.tcx.def_ident_span(def_id) && !def_span.is_dummy() {
+ let mut spans: MultiSpan = def_span.into();
+
+ let params = self
+ .tcx
+ .hir()
+ .get_if_local(def_id)
+ .and_then(|node| node.body_id())
+ .into_iter()
+ .flat_map(|id| self.tcx.hir().body(id).params)
+ .skip(if is_method { 1 } else { 0 });
+
+ for (_, param) in params
+ .into_iter()
+ .enumerate()
+ .filter(|(idx, _)| expected_idx.map_or(true, |expected_idx| expected_idx == *idx))
+ {
+ spans.push_span_label(param.span, "");
+ }
+
+ let def_kind = self.tcx.def_kind(def_id);
+ err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id)));
+ } else if let Some(hir::Node::Expr(e)) = self.tcx.hir().get_if_local(def_id)
+ && let hir::ExprKind::Closure(hir::Closure { body, .. }) = &e.kind
+ {
+ let param = expected_idx
+ .and_then(|expected_idx| self.tcx.hir().body(*body).params.get(expected_idx));
+ let (kind, span) = if let Some(param) = param {
+ ("closure parameter", param.span)
+ } else {
+ ("closure", self.tcx.def_span(def_id))
+ };
+ err.span_note(span, &format!("{} defined here", kind));
+ } else {
+ let def_kind = self.tcx.def_kind(def_id);
+ err.span_note(
+ self.tcx.def_span(def_id),
+ &format!("{} defined here", def_kind.descr(def_id)),
+ );
+ }
+ }
+}
+
+fn find_param_in_ty<'tcx>(ty: Ty<'tcx>, param_to_point_at: ty::GenericArg<'tcx>) -> bool {
+ let mut walk = ty.walk();
+ while let Some(arg) = walk.next() {
+ if arg == param_to_point_at {
+ return true;
+ } else if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Projection(..) = ty.kind()
+ {
+ // This logic may seem a bit strange, but typically when
+ // we have a projection type in a function signature, the
+ // argument that's being passed into that signature is
+ // not actually constraining that projection's substs in
+ // a meaningful way. So we skip it, and see improvements
+ // in some UI tests.
+ walk.skip_current_subtree();
+ }
+ }
+ false
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
new file mode 100644
index 000000000..0c600daf4
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
@@ -0,0 +1,312 @@
+mod _impl;
+mod arg_matrix;
+mod checks;
+mod suggestions;
+
+pub use _impl::*;
+pub use suggestions::*;
+
+use crate::coercion::DynamicCoerceMany;
+use crate::{Diverges, EnclosingBreakables, Inherited, UnsafetyState};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer;
+use rustc_infer::infer::error_reporting::TypeErrCtxt;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Const, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::{self, Span};
+use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
+
+use std::cell::{Cell, RefCell};
+use std::ops::Deref;
+
+/// The `FnCtxt` stores type-checking context needed to type-check bodies of
+/// functions, closures, and `const`s, including performing type inference
+/// with [`InferCtxt`].
+///
+/// This is in contrast to [`ItemCtxt`], which is used to type-check item *signatures*
+/// and thus does not perform type inference.
+///
+/// See [`ItemCtxt`]'s docs for more.
+///
+/// [`ItemCtxt`]: rustc_hir_analysis::collect::ItemCtxt
+/// [`InferCtxt`]: infer::InferCtxt
+pub struct FnCtxt<'a, 'tcx> {
+ pub(super) body_id: hir::HirId,
+
+ /// The parameter environment used for proving trait obligations
+ /// in this function. This can change when we descend into
+ /// closures (as they bring new things into scope), hence it is
+ /// not part of `Inherited` (as of the time of this writing,
+ /// closures do not yet change the environment, but they will
+ /// eventually).
+ pub(super) param_env: ty::ParamEnv<'tcx>,
+
+ /// Number of errors that had been reported when we started
+ /// checking this function. On exit, if we find that *more* errors
+ /// have been reported, we will skip regionck and other work that
+ /// expects the types within the function to be consistent.
+ // FIXME(matthewjasper) This should not exist, and it's not correct
+ // if type checking is run in parallel.
+ err_count_on_creation: usize,
+
+ /// If `Some`, this stores coercion information for returned
+ /// expressions. If `None`, this is in a context where return is
+ /// inappropriate, such as a const expression.
+ ///
+ /// This is a `RefCell<DynamicCoerceMany>`, which means that we
+ /// can track all the return expressions and then use them to
+ /// compute a useful coercion from the set, similar to a match
+ /// expression or other branching context. You can use methods
+ /// like `expected_ty` to access the declared return type (if
+ /// any).
+ pub(super) ret_coercion: Option<RefCell<DynamicCoerceMany<'tcx>>>,
+
+ /// Used exclusively to reduce cost of advanced evaluation used for
+ /// more helpful diagnostics.
+ pub(super) in_tail_expr: bool,
+
+ /// First span of a return site that we find. Used in error messages.
+ pub(super) ret_coercion_span: Cell<Option<Span>>,
+
+ pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>,
+
+ pub(super) ps: Cell<UnsafetyState>,
+
+ /// Whether the last checked node generates a divergence (e.g.,
+ /// `return` will set this to `Always`). In general, when entering
+ /// an expression or other node in the tree, the initial value
+ /// indicates whether prior parts of the containing expression may
+ /// have diverged. It is then typically set to `Maybe` (and the
+ /// old value remembered) for processing the subparts of the
+ /// current expression. As each subpart is processed, they may set
+ /// the flag to `Always`, etc. Finally, at the end, we take the
+ /// result and "union" it with the original value, so that when we
+ /// return the flag indicates if any subpart of the parent
+ /// expression (up to and including this part) has diverged. So,
+ /// if you read it after evaluating a subexpression `X`, the value
+ /// you get indicates whether any subexpression that was
+ /// evaluating up to and including `X` diverged.
+ ///
+ /// We currently use this flag only for diagnostic purposes:
+ ///
+ /// - To warn about unreachable code: if, after processing a
+ /// sub-expression but before we have applied the effects of the
+ /// current node, we see that the flag is set to `Always`, we
+ /// can issue a warning. This corresponds to something like
+ /// `foo(return)`; we warn on the `foo()` expression. (We then
+ /// update the flag to `WarnedAlways` to suppress duplicate
+ /// reports.) Similarly, if we traverse to a fresh statement (or
+ /// tail expression) from an `Always` setting, we will issue a
+ /// warning. This corresponds to something like `{return;
+ /// foo();}` or `{return; 22}`, where we would warn on the
+ /// `foo()` or `22`.
+ ///
+ /// An expression represents dead code if, after checking it,
+ /// the diverges flag is set to something other than `Maybe`.
+ pub(super) diverges: Cell<Diverges>,
+
+ /// Whether any child nodes have any type errors.
+ pub(super) has_errors: Cell<bool>,
+
+ pub(super) enclosing_breakables: RefCell<EnclosingBreakables<'tcx>>,
+
+ pub(super) inh: &'a Inherited<'tcx>,
+
+ /// True if the function or closure's return type is known before
+ /// entering the function/closure, i.e. if the return type is
+ /// either given explicitly or inferred from, say, an `Fn*` trait
+ /// bound. Used for diagnostic purposes only.
+ pub(super) return_type_pre_known: bool,
+
+ /// True if the return type has an Opaque type
+ pub(super) return_type_has_opaque: bool,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn new(
+ inh: &'a Inherited<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ) -> FnCtxt<'a, 'tcx> {
+ FnCtxt {
+ body_id,
+ param_env,
+ err_count_on_creation: inh.tcx.sess.err_count(),
+ ret_coercion: None,
+ in_tail_expr: false,
+ ret_coercion_span: Cell::new(None),
+ resume_yield_tys: None,
+ ps: Cell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)),
+ diverges: Cell::new(Diverges::Maybe),
+ has_errors: Cell::new(false),
+ enclosing_breakables: RefCell::new(EnclosingBreakables {
+ stack: Vec::new(),
+ by_id: Default::default(),
+ }),
+ inh,
+ return_type_pre_known: true,
+ return_type_has_opaque: false,
+ }
+ }
+
+ pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> {
+ ObligationCause::new(span, self.body_id, code)
+ }
+
+ pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
+ self.cause(span, ObligationCauseCode::MiscObligation)
+ }
+
+ pub fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ /// Creates an `TypeErrCtxt` with a reference to the in-progress
+ /// `TypeckResults` which is used for diagnostics.
+ /// Use [`InferCtxt::err_ctxt`] to start one without a `TypeckResults`.
+ ///
+ /// [`InferCtxt::err_ctxt`]: infer::InferCtxt::err_ctxt
+ pub fn err_ctxt(&'a self) -> TypeErrCtxt<'a, 'tcx> {
+ TypeErrCtxt { infcx: &self.infcx, typeck_results: Some(self.typeck_results.borrow()) }
+ }
+
+ pub fn errors_reported_since_creation(&self) -> bool {
+ self.tcx.sess.err_count() > self.err_count_on_creation
+ }
+}
+
+impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> {
+ type Target = Inherited<'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.inh
+ }
+}
+
+impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn item_def_id(&self) -> Option<DefId> {
+ None
+ }
+
+ fn get_type_parameter_bounds(
+ &self,
+ _: Span,
+ def_id: DefId,
+ _: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
+ let tcx = self.tcx;
+ let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local());
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ ty::GenericPredicates {
+ parent: None,
+ predicates: tcx.arena.alloc_from_iter(
+ self.param_env.caller_bounds().iter().filter_map(|predicate| {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(data) if data.self_ty().is_param(index) => {
+ // HACK(eddyb) should get the original `Span`.
+ let span = tcx.def_span(def_id);
+ Some((predicate, span))
+ }
+ _ => None,
+ }
+ }),
+ ),
+ }
+ }
+
+ fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option<ty::Region<'tcx>> {
+ let v = match def {
+ Some(def) => infer::EarlyBoundRegion(span, def.name),
+ None => infer::MiscVariable(span),
+ };
+ Some(self.next_region_var(v))
+ }
+
+ fn allow_ty_infer(&self) -> bool {
+ true
+ }
+
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ if let Some(param) = param {
+ if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() {
+ return ty;
+ }
+ unreachable!()
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ })
+ }
+ }
+
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> Const<'tcx> {
+ if let Some(param) = param {
+ if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() {
+ return ct;
+ }
+ unreachable!()
+ } else {
+ self.next_const_var(
+ ty,
+ ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span },
+ )
+ }
+ }
+
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx> {
+ let trait_ref = self.replace_bound_vars_with_fresh_vars(
+ span,
+ infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
+ poly_trait_ref,
+ );
+
+ let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
+ self,
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+
+ self.tcx().mk_projection(item_def_id, item_substs)
+ }
+
+ fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.has_escaping_bound_vars() {
+ ty // FIXME: normalization and escaping regions
+ } else {
+ self.normalize_associated_types_in(span, ty)
+ }
+ }
+
+ fn set_tainted_by_errors(&self) {
+ self.infcx.set_tainted_by_errors()
+ }
+
+ fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
+ self.write_ty(hir_id, ty)
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
new file mode 100644
index 000000000..4db9c56f9
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
@@ -0,0 +1,1250 @@
+use super::FnCtxt;
+
+use crate::errors::{AddReturnTypeSuggestion, ExpectedReturnTypeLabel};
+use rustc_ast::util::parser::{ExprPrecedence, PREC_POSTFIX};
+use rustc_errors::{Applicability, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{
+ Expr, ExprKind, GenericBound, Node, Path, QPath, Stmt, StmtKind, TyKind, WherePredicate,
+};
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer::{self, TyCtxtInferExt};
+use rustc_infer::traits::{self, StatementAsExpression};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::{self, Binder, IsSuggestable, ToPredicate, Ty};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::DefIdOrName;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(in super::super) fn suggest_semicolon_at_end(&self, span: Span, err: &mut Diagnostic) {
+ err.span_suggestion_short(
+ span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MachineApplicable,
+ );
+ }
+
+ /// On implicit return expressions with mismatched types, provides the following suggestions:
+ ///
+ /// - Points out the method's return type as the reason for the expected type.
+ /// - Possible missing semicolon.
+ /// - Possible missing return type if the return type is the default, and not `fn main()`.
+ pub fn suggest_mismatched_types_on_tail(
+ &self,
+ err: &mut Diagnostic,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ blk_id: hir::HirId,
+ ) -> bool {
+ let expr = expr.peel_drop_temps();
+ self.suggest_missing_semicolon(err, expr, expected, false);
+ let mut pointing_at_return_type = false;
+ if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
+ let fn_id = self.tcx.hir().get_return_block(blk_id).unwrap();
+ pointing_at_return_type = self.suggest_missing_return_type(
+ err,
+ &fn_decl,
+ expected,
+ found,
+ can_suggest,
+ fn_id,
+ );
+ self.suggest_missing_break_or_return_expr(
+ err, expr, &fn_decl, expected, found, blk_id, fn_id,
+ );
+ }
+ pointing_at_return_type
+ }
+
+ /// When encountering an fn-like type, try accessing the output of the type
+ /// and suggesting calling it if it satisfies a predicate (i.e. if the
+ /// output has a method or a field):
+ /// ```compile_fail,E0308
+ /// fn foo(x: usize) -> usize { x }
+ /// let x: usize = foo; // suggest calling the `foo` function: `foo(42)`
+ /// ```
+ pub(crate) fn suggest_fn_call(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ found: Ty<'tcx>,
+ can_satisfy: impl FnOnce(Ty<'tcx>) -> bool,
+ ) -> bool {
+ let Some((def_id_or_name, output, inputs)) = self.extract_callable_info(expr, found)
+ else { return false; };
+ if can_satisfy(output) {
+ let (sugg_call, mut applicability) = match inputs.len() {
+ 0 => ("".to_string(), Applicability::MachineApplicable),
+ 1..=4 => (
+ inputs
+ .iter()
+ .map(|ty| {
+ if ty.is_suggestable(self.tcx, false) {
+ format!("/* {ty} */")
+ } else {
+ "/* value */".to_string()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(", "),
+ Applicability::HasPlaceholders,
+ ),
+ _ => ("/* ... */".to_string(), Applicability::HasPlaceholders),
+ };
+
+ let msg = match def_id_or_name {
+ DefIdOrName::DefId(def_id) => match self.tcx.def_kind(def_id) {
+ DefKind::Ctor(CtorOf::Struct, _) => "construct this tuple struct".to_string(),
+ DefKind::Ctor(CtorOf::Variant, _) => "construct this tuple variant".to_string(),
+ kind => format!("call this {}", kind.descr(def_id)),
+ },
+ DefIdOrName::Name(name) => format!("call this {name}"),
+ };
+
+ let sugg = match expr.kind {
+ hir::ExprKind::Call(..)
+ | hir::ExprKind::Path(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Lit(..) => {
+ vec![(expr.span.shrink_to_hi(), format!("({sugg_call})"))]
+ }
+ hir::ExprKind::Closure { .. } => {
+ // Might be `{ expr } || { bool }`
+ applicability = Applicability::MaybeIncorrect;
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]
+ }
+ _ => {
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]
+ }
+ };
+
+ err.multipart_suggestion_verbose(
+ format!("use parentheses to {msg}"),
+ sugg,
+ applicability,
+ );
+ return true;
+ }
+ false
+ }
+
+ /// Extracts information about a callable type for diagnostics. This is a
+ /// heuristic -- it doesn't necessarily mean that a type is always callable,
+ /// because the callable type must also be well-formed to be called.
+ pub(in super::super) fn extract_callable_info(
+ &self,
+ expr: &Expr<'_>,
+ found: Ty<'tcx>,
+ ) -> Option<(DefIdOrName, Ty<'tcx>, Vec<Ty<'tcx>>)> {
+ // Autoderef is useful here because sometimes we box callables, etc.
+ let Some((def_id_or_name, output, inputs)) = self.autoderef(expr.span, found).silence_errors().find_map(|(found, _)| {
+ match *found.kind() {
+ ty::FnPtr(fn_sig) =>
+ Some((DefIdOrName::Name("function pointer"), fn_sig.output(), fn_sig.inputs())),
+ ty::FnDef(def_id, _) => {
+ let fn_sig = found.fn_sig(self.tcx);
+ Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs()))
+ }
+ ty::Closure(def_id, substs) => {
+ let fn_sig = substs.as_closure().sig();
+ Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs().map_bound(|inputs| &inputs[1..])))
+ }
+ ty::Opaque(def_id, substs) => {
+ self.tcx.bound_item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ DefIdOrName::DefId(def_id),
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ pred.kind().rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ ty::Dynamic(data, _, ty::Dyn) => {
+ data.iter().find_map(|pred| {
+ if let ty::ExistentialPredicate::Projection(proj) = pred.skip_binder()
+ && Some(proj.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // for existential projection, substs are shifted over by 1
+ && let ty::Tuple(args) = proj.substs.type_at(0).kind()
+ {
+ Some((
+ DefIdOrName::Name("trait object"),
+ pred.rebind(proj.term.ty().unwrap()),
+ pred.rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ ty::Param(param) => {
+ let def_id = self.tcx.generics_of(self.body_id.owner).type_param(&param, self.tcx).def_id;
+ self.tcx.predicates_of(self.body_id.owner).predicates.iter().find_map(|(pred, _)| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ && proj.projection_ty.self_ty() == found
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ DefIdOrName::DefId(def_id),
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ pred.kind().rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ _ => None,
+ }
+ }) else { return None; };
+
+ let output = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, output);
+ let inputs = inputs
+ .skip_binder()
+ .iter()
+ .map(|ty| {
+ self.replace_bound_vars_with_fresh_vars(
+ expr.span,
+ infer::FnCall,
+ inputs.rebind(*ty),
+ )
+ })
+ .collect();
+
+ // We don't want to register any extra obligations, which should be
+ // implied by wf, but also because that would possibly result in
+ // erroneous errors later on.
+ let infer::InferOk { value: output, obligations: _ } =
+ self.normalize_associated_types_in_as_infer_ok(expr.span, output);
+
+ if output.is_ty_var() { None } else { Some((def_id_or_name, output, inputs)) }
+ }
+
+ pub fn suggest_two_fn_call(
+ &self,
+ err: &mut Diagnostic,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ lhs_ty: Ty<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_ty: Ty<'tcx>,
+ can_satisfy: impl FnOnce(Ty<'tcx>, Ty<'tcx>) -> bool,
+ ) -> bool {
+ let Some((_, lhs_output_ty, lhs_inputs)) = self.extract_callable_info(lhs_expr, lhs_ty)
+ else { return false; };
+ let Some((_, rhs_output_ty, rhs_inputs)) = self.extract_callable_info(rhs_expr, rhs_ty)
+ else { return false; };
+
+ if can_satisfy(lhs_output_ty, rhs_output_ty) {
+ let mut sugg = vec![];
+ let mut applicability = Applicability::MachineApplicable;
+
+ for (expr, inputs) in [(lhs_expr, lhs_inputs), (rhs_expr, rhs_inputs)] {
+ let (sugg_call, this_applicability) = match inputs.len() {
+ 0 => ("".to_string(), Applicability::MachineApplicable),
+ 1..=4 => (
+ inputs
+ .iter()
+ .map(|ty| {
+ if ty.is_suggestable(self.tcx, false) {
+ format!("/* {ty} */")
+ } else {
+ "/* value */".to_string()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(", "),
+ Applicability::HasPlaceholders,
+ ),
+ _ => ("/* ... */".to_string(), Applicability::HasPlaceholders),
+ };
+
+ applicability = applicability.max(this_applicability);
+
+ match expr.kind {
+ hir::ExprKind::Call(..)
+ | hir::ExprKind::Path(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Lit(..) => {
+ sugg.extend([(expr.span.shrink_to_hi(), format!("({sugg_call})"))]);
+ }
+ hir::ExprKind::Closure { .. } => {
+ // Might be `{ expr } || { bool }`
+ applicability = Applicability::MaybeIncorrect;
+ sugg.extend([
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]);
+ }
+ _ => {
+ sugg.extend([
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]);
+ }
+ }
+ }
+
+ err.multipart_suggestion_verbose(
+ format!("use parentheses to call these"),
+ sugg,
+ applicability,
+ );
+
+ true
+ } else {
+ false
+ }
+ }
+
+ pub fn suggest_deref_ref_or_into(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> bool {
+ let expr = expr.peel_blocks();
+ if let Some((sp, msg, suggestion, applicability, verbose, annotation)) =
+ self.check_ref(expr, found, expected)
+ {
+ if verbose {
+ err.span_suggestion_verbose(sp, &msg, suggestion, applicability);
+ } else {
+ err.span_suggestion(sp, &msg, suggestion, applicability);
+ }
+ if annotation {
+ let suggest_annotation = match expr.peel_drop_temps().kind {
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, _) => "&",
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, _) => "&mut ",
+ _ => return true,
+ };
+ let mut tuple_indexes = Vec::new();
+ let mut expr_id = expr.hir_id;
+ for (parent_id, node) in self.tcx.hir().parent_iter(expr.hir_id) {
+ match node {
+ Node::Expr(&Expr { kind: ExprKind::Tup(subs), .. }) => {
+ tuple_indexes.push(
+ subs.iter()
+ .enumerate()
+ .find(|(_, sub_expr)| sub_expr.hir_id == expr_id)
+ .unwrap()
+ .0,
+ );
+ expr_id = parent_id;
+ }
+ Node::Local(local) => {
+ if let Some(mut ty) = local.ty {
+ while let Some(index) = tuple_indexes.pop() {
+ match ty.kind {
+ TyKind::Tup(tys) => ty = &tys[index],
+ _ => return true,
+ }
+ }
+ let annotation_span = ty.span;
+ err.span_suggestion(
+ annotation_span.with_hi(annotation_span.lo()),
+ format!("alternatively, consider changing the type annotation"),
+ suggest_annotation,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ break;
+ }
+ _ => break,
+ }
+ }
+ }
+ return true;
+ } else if self.suggest_else_fn_with_closure(err, expr, found, expected) {
+ return true;
+ } else if self.suggest_fn_call(err, expr, found, |output| self.can_coerce(output, expected))
+ && let ty::FnDef(def_id, ..) = &found.kind()
+ && let Some(sp) = self.tcx.hir().span_if_local(*def_id)
+ {
+ err.span_label(sp, format!("{found} defined here"));
+ return true;
+ } else if self.check_for_cast(err, expr, found, expected, expected_ty_expr) {
+ return true;
+ } else {
+ let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id);
+ if !methods.is_empty() {
+ let mut suggestions = methods.iter()
+ .filter_map(|conversion_method| {
+ let receiver_method_ident = expr.method_ident();
+ if let Some(method_ident) = receiver_method_ident
+ && method_ident.name == conversion_method.name
+ {
+ return None // do not suggest code that is already there (#53348)
+ }
+
+ let method_call_list = [sym::to_vec, sym::to_string];
+ let mut sugg = if let ExprKind::MethodCall(receiver_method, ..) = expr.kind
+ && receiver_method.ident.name == sym::clone
+ && method_call_list.contains(&conversion_method.name)
+ // If receiver is `.clone()` and found type has one of those methods,
+ // we guess that the user wants to convert from a slice type (`&[]` or `&str`)
+ // to an owned type (`Vec` or `String`). These conversions clone internally,
+ // so we remove the user's `clone` call.
+ {
+ vec![(
+ receiver_method.ident.span,
+ conversion_method.name.to_string()
+ )]
+ } else if expr.precedence().order()
+ < ExprPrecedence::MethodCall.order()
+ {
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(").{}()", conversion_method.name)),
+ ]
+ } else {
+ vec![(expr.span.shrink_to_hi(), format!(".{}()", conversion_method.name))]
+ };
+ let struct_pat_shorthand_field = self.maybe_get_struct_pattern_shorthand_field(expr);
+ if let Some(name) = struct_pat_shorthand_field {
+ sugg.insert(
+ 0,
+ (expr.span.shrink_to_lo(), format!("{}: ", name)),
+ );
+ }
+ Some(sugg)
+ })
+ .peekable();
+ if suggestions.peek().is_some() {
+ err.multipart_suggestions(
+ "try using a conversion method",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ } else if let ty::Adt(found_adt, found_substs) = found.kind()
+ && self.tcx.is_diagnostic_item(sym::Option, found_adt.did())
+ && let ty::Adt(expected_adt, expected_substs) = expected.kind()
+ && self.tcx.is_diagnostic_item(sym::Option, expected_adt.did())
+ && let ty::Ref(_, inner_ty, _) = expected_substs.type_at(0).kind()
+ && inner_ty.is_str()
+ {
+ let ty = found_substs.type_at(0);
+ let mut peeled = ty;
+ let mut ref_cnt = 0;
+ while let ty::Ref(_, inner, _) = peeled.kind() {
+ peeled = *inner;
+ ref_cnt += 1;
+ }
+ if let ty::Adt(adt, _) = peeled.kind()
+ && self.tcx.is_diagnostic_item(sym::String, adt.did())
+ {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ "try converting the passed type into a `&str`",
+ format!(".map(|x| &*{}x)", "*".repeat(ref_cnt)),
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ /// When encountering the expected boxed value allocated in the stack, suggest allocating it
+ /// in the heap by calling `Box::new()`.
+ pub(in super::super) fn suggest_boxing_when_appropriate(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+ // Do not suggest `Box::new` in const context.
+ return false;
+ }
+ if !expected.is_box() || found.is_box() {
+ return false;
+ }
+ let boxed_found = self.tcx.mk_box(found);
+ if self.can_coerce(boxed_found, expected) {
+ err.multipart_suggestion(
+ "store this in the heap by calling `Box::new`",
+ vec![
+ (expr.span.shrink_to_lo(), "Box::new(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ err.note(
+ "for more on the distinction between the stack and the heap, read \
+ https://doc.rust-lang.org/book/ch15-01-box.html, \
+ https://doc.rust-lang.org/rust-by-example/std/box.html, and \
+ https://doc.rust-lang.org/std/boxed/index.html",
+ );
+ true
+ } else {
+ false
+ }
+ }
+
+ /// When encountering a closure that captures variables, where a FnPtr is expected,
+ /// suggest a non-capturing closure
+ pub(in super::super) fn suggest_no_capture_closure(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ if let (ty::FnPtr(_), ty::Closure(def_id, _)) = (expected.kind(), found.kind()) {
+ if let Some(upvars) = self.tcx.upvars_mentioned(*def_id) {
+ // Report upto four upvars being captured to reduce the amount error messages
+ // reported back to the user.
+ let spans_and_labels = upvars
+ .iter()
+ .take(4)
+ .map(|(var_hir_id, upvar)| {
+ let var_name = self.tcx.hir().name(*var_hir_id).to_string();
+ let msg = format!("`{}` captured here", var_name);
+ (upvar.span, msg)
+ })
+ .collect::<Vec<_>>();
+
+ let mut multi_span: MultiSpan =
+ spans_and_labels.iter().map(|(sp, _)| *sp).collect::<Vec<_>>().into();
+ for (sp, label) in spans_and_labels {
+ multi_span.push_span_label(sp, label);
+ }
+ err.span_note(
+ multi_span,
+ "closures can only be coerced to `fn` types if they do not capture any variables"
+ );
+ return true;
+ }
+ }
+ false
+ }
+
+ /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`.
+ #[instrument(skip(self, err))]
+ pub(in super::super) fn suggest_calling_boxed_future_when_appropriate(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ // Handle #68197.
+
+ if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+ // Do not suggest `Box::new` in const context.
+ return false;
+ }
+ let pin_did = self.tcx.lang_items().pin_type();
+ // This guards the `unwrap` and `mk_box` below.
+ if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() {
+ return false;
+ }
+ let box_found = self.tcx.mk_box(found);
+ let pin_box_found = self.tcx.mk_lang_item(box_found, LangItem::Pin).unwrap();
+ let pin_found = self.tcx.mk_lang_item(found, LangItem::Pin).unwrap();
+ match expected.kind() {
+ ty::Adt(def, _) if Some(def.did()) == pin_did => {
+ if self.can_coerce(pin_box_found, expected) {
+ debug!("can coerce {:?} to {:?}, suggesting Box::pin", pin_box_found, expected);
+ match found.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ err.help("use `Box::pin`");
+ }
+ _ => {
+ err.multipart_suggestion(
+ "you need to pin and box this expression",
+ vec![
+ (expr.span.shrink_to_lo(), "Box::pin(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ true
+ } else if self.can_coerce(pin_found, expected) {
+ match found.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ err.help("use `Box::pin`");
+ true
+ }
+ _ => false,
+ }
+ } else {
+ false
+ }
+ }
+ ty::Adt(def, _) if def.is_box() && self.can_coerce(box_found, expected) => {
+ // Check if the parent expression is a call to Pin::new. If it
+ // is and we were expecting a Box, ergo Pin<Box<expected>>, we
+ // can suggest Box::pin.
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) = self.tcx.hir().find(parent) else {
+ return false;
+ };
+ match fn_name.kind {
+ ExprKind::Path(QPath::TypeRelative(
+ hir::Ty {
+ kind: TyKind::Path(QPath::Resolved(_, Path { res: recv_ty, .. })),
+ ..
+ },
+ method,
+ )) if recv_ty.opt_def_id() == pin_did && method.ident.name == sym::new => {
+ err.span_suggestion(
+ fn_name.span,
+ "use `Box::pin` to pin and box this expression",
+ "Box::pin",
+ Applicability::MachineApplicable,
+ );
+ true
+ }
+ _ => false,
+ }
+ }
+ _ => false,
+ }
+ }
+
+ /// A common error is to forget to add a semicolon at the end of a block, e.g.,
+ ///
+ /// ```compile_fail,E0308
+ /// # fn bar_that_returns_u32() -> u32 { 4 }
+ /// fn foo() {
+ /// bar_that_returns_u32()
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the return expression in a block would make sense on its own as a
+ /// statement and the return type has been left as default or has been specified as `()`. If so,
+ /// it suggests adding a semicolon.
+ ///
+ /// If the expression is the expression of a closure without block (`|| expr`), a
+ /// block is needed to be added too (`|| { expr; }`). This is denoted by `needs_block`.
+ pub fn suggest_missing_semicolon(
+ &self,
+ err: &mut Diagnostic,
+ expression: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ needs_block: bool,
+ ) {
+ if expected.is_unit() {
+ // `BlockTailExpression` only relevant if the tail expr would be
+ // useful on its own.
+ match expression.kind {
+ ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Loop(..)
+ | ExprKind::If(..)
+ | ExprKind::Match(..)
+ | ExprKind::Block(..)
+ if expression.can_have_side_effects()
+ // If the expression is from an external macro, then do not suggest
+ // adding a semicolon, because there's nowhere to put it.
+ // See issue #81943.
+ && !in_external_macro(self.tcx.sess, expression.span) =>
+ {
+ if needs_block {
+ err.multipart_suggestion(
+ "consider using a semicolon here",
+ vec![
+ (expression.span.shrink_to_lo(), "{ ".to_owned()),
+ (expression.span.shrink_to_hi(), "; }".to_owned()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ expression.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+
+ /// A possible error is to forget to add a return type that is needed:
+ ///
+ /// ```compile_fail,E0308
+ /// # fn bar_that_returns_u32() -> u32 { 4 }
+ /// fn foo() {
+ /// bar_that_returns_u32()
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the return type is left as default, the method is not part of an
+ /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
+ /// type.
+ pub(in super::super) fn suggest_missing_return_type(
+ &self,
+ err: &mut Diagnostic,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ can_suggest: bool,
+ fn_id: hir::HirId,
+ ) -> bool {
+ let found =
+ self.resolve_numeric_literals_with_default(self.resolve_vars_if_possible(found));
+ // Only suggest changing the return type for methods that
+ // haven't set a return type at all (and aren't `fn main()` or an impl).
+ match &fn_decl.output {
+ &hir::FnRetTy::DefaultReturn(span) if expected.is_unit() && !can_suggest => {
+ // `fn main()` must return `()`, do not suggest changing return type
+ err.subdiagnostic(ExpectedReturnTypeLabel::Unit { span });
+ return true;
+ }
+ &hir::FnRetTy::DefaultReturn(span) if expected.is_unit() => {
+ if found.is_suggestable(self.tcx, false) {
+ err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: found.to_string() });
+ return true;
+ } else if let ty::Closure(_, substs) = found.kind()
+ // FIXME(compiler-errors): Get better at printing binders...
+ && let closure = substs.as_closure()
+ && closure.sig().is_suggestable(self.tcx, false)
+ {
+ err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: closure.print_as_impl_trait().to_string() });
+ return true;
+ } else {
+ // FIXME: if `found` could be `impl Iterator` we should suggest that.
+ err.subdiagnostic(AddReturnTypeSuggestion::MissingHere { span });
+ return true
+ }
+ }
+ &hir::FnRetTy::Return(ref ty) => {
+ // Only point to return type if the expected type is the return type, as if they
+ // are not, the expectation must have been caused by something else.
+ debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
+ let span = ty.span;
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ debug!("suggest_missing_return_type: return type {:?}", ty);
+ debug!("suggest_missing_return_type: expected type {:?}", ty);
+ let bound_vars = self.tcx.late_bound_vars(fn_id);
+ let ty = Binder::bind_with_vars(ty, bound_vars);
+ let ty = self.normalize_associated_types_in(span, ty);
+ let ty = self.tcx.erase_late_bound_regions(ty);
+ if self.can_coerce(expected, ty) {
+ err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected });
+ self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
+ return true;
+ }
+ }
+ _ => {}
+ }
+ false
+ }
+
+ /// check whether the return type is a generic type with a trait bound
+ /// only suggest this if the generic param is not present in the arguments
+ /// if this is true, hint them towards changing the return type to `impl Trait`
+ /// ```compile_fail,E0308
+ /// fn cant_name_it<T: Fn() -> u32>() -> T {
+ /// || 3
+ /// }
+ /// ```
+ fn try_suggest_return_impl_trait(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ fn_id: hir::HirId,
+ ) {
+ // Only apply the suggestion if:
+ // - the return type is a generic parameter
+ // - the generic param is not used as a fn param
+ // - the generic param has at least one bound
+ // - the generic param doesn't appear in any other bounds where it's not the Self type
+ // Suggest:
+ // - Changing the return type to be `impl <all bounds>`
+
+ debug!("try_suggest_return_impl_trait, expected = {:?}, found = {:?}", expected, found);
+
+ let ty::Param(expected_ty_as_param) = expected.kind() else { return };
+
+ let fn_node = self.tcx.hir().find(fn_id);
+
+ let Some(hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Fn(
+ hir::FnSig { decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. }, .. },
+ hir::Generics { params, predicates, .. },
+ _body_id,
+ ),
+ ..
+ })) = fn_node else { return };
+
+ if params.get(expected_ty_as_param.index as usize).is_none() {
+ return;
+ };
+
+ // get all where BoundPredicates here, because they are used in to cases below
+ let where_predicates = predicates
+ .iter()
+ .filter_map(|p| match p {
+ WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bounds,
+ bounded_ty,
+ ..
+ }) => {
+ // FIXME: Maybe these calls to `ast_ty_to_ty` can be removed (and the ones below)
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, bounded_ty);
+ Some((ty, bounds))
+ }
+ _ => None,
+ })
+ .map(|(ty, bounds)| match ty.kind() {
+ ty::Param(param_ty) if param_ty == expected_ty_as_param => Ok(Some(bounds)),
+ // check whether there is any predicate that contains our `T`, like `Option<T>: Send`
+ _ => match ty.contains(expected) {
+ true => Err(()),
+ false => Ok(None),
+ },
+ })
+ .collect::<Result<Vec<_>, _>>();
+
+ let Ok(where_predicates) = where_predicates else { return };
+
+ // now get all predicates in the same types as the where bounds, so we can chain them
+ let predicates_from_where =
+ where_predicates.iter().flatten().flat_map(|bounds| bounds.iter());
+
+ // extract all bounds from the source code using their spans
+ let all_matching_bounds_strs = predicates_from_where
+ .filter_map(|bound| match bound {
+ GenericBound::Trait(_, _) => {
+ self.tcx.sess.source_map().span_to_snippet(bound.span()).ok()
+ }
+ _ => None,
+ })
+ .collect::<Vec<String>>();
+
+ if all_matching_bounds_strs.len() == 0 {
+ return;
+ }
+
+ let all_bounds_str = all_matching_bounds_strs.join(" + ");
+
+ let ty_param_used_in_fn_params = fn_parameters.iter().any(|param| {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, param);
+ matches!(ty.kind(), ty::Param(fn_param_ty_param) if expected_ty_as_param == fn_param_ty_param)
+ });
+
+ if ty_param_used_in_fn_params {
+ return;
+ }
+
+ err.span_suggestion(
+ fn_return.span(),
+ "consider using an impl return type",
+ format!("impl {}", all_bounds_str),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ pub(in super::super) fn suggest_missing_break_or_return_expr(
+ &self,
+ err: &mut Diagnostic,
+ expr: &'tcx hir::Expr<'tcx>,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ id: hir::HirId,
+ fn_id: hir::HirId,
+ ) {
+ if !expected.is_unit() {
+ return;
+ }
+ let found = self.resolve_vars_with_obligations(found);
+
+ let in_loop = self.is_loop(id)
+ || self.tcx.hir().parent_iter(id).any(|(parent_id, _)| self.is_loop(parent_id));
+
+ let in_local_statement = self.is_local_statement(id)
+ || self
+ .tcx
+ .hir()
+ .parent_iter(id)
+ .any(|(parent_id, _)| self.is_local_statement(parent_id));
+
+ if in_loop && in_local_statement {
+ err.multipart_suggestion(
+ "you might have meant to break the loop with this value",
+ vec![
+ (expr.span.shrink_to_lo(), "break ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ return;
+ }
+
+ if let hir::FnRetTy::Return(ty) = fn_decl.output {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ let bound_vars = self.tcx.late_bound_vars(fn_id);
+ let ty = self.tcx.erase_late_bound_regions(Binder::bind_with_vars(ty, bound_vars));
+ let ty = self.normalize_associated_types_in(expr.span, ty);
+ let ty = match self.tcx.asyncness(fn_id.owner) {
+ hir::IsAsync::Async => {
+ let infcx = self.tcx.infer_ctxt().build();
+ infcx
+ .get_impl_future_output_ty(ty)
+ .unwrap_or_else(|| {
+ span_bug!(
+ fn_decl.output.span(),
+ "failed to get output type of async function"
+ )
+ })
+ .skip_binder()
+ }
+ hir::IsAsync::NotAsync => ty,
+ };
+ if self.can_coerce(found, ty) {
+ err.multipart_suggestion(
+ "you might have meant to return this value",
+ vec![
+ (expr.span.shrink_to_lo(), "return ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ pub(in super::super) fn suggest_missing_parentheses(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ ) -> bool {
+ let sp = self.tcx.sess.source_map().start_point(expr.span);
+ if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) {
+ // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Given an expression type mismatch, peel any `&` expressions until we get to
+ /// a block expression, and then suggest replacing the braces with square braces
+ /// if it was possibly mistaken array syntax.
+ pub(crate) fn suggest_block_to_brackets_peeling_refs(
+ &self,
+ diag: &mut Diagnostic,
+ mut expr: &hir::Expr<'_>,
+ mut expr_ty: Ty<'tcx>,
+ mut expected_ty: Ty<'tcx>,
+ ) -> bool {
+ loop {
+ match (&expr.kind, expr_ty.kind(), expected_ty.kind()) {
+ (
+ hir::ExprKind::AddrOf(_, _, inner_expr),
+ ty::Ref(_, inner_expr_ty, _),
+ ty::Ref(_, inner_expected_ty, _),
+ ) => {
+ expr = *inner_expr;
+ expr_ty = *inner_expr_ty;
+ expected_ty = *inner_expected_ty;
+ }
+ (hir::ExprKind::Block(blk, _), _, _) => {
+ self.suggest_block_to_brackets(diag, *blk, expr_ty, expected_ty);
+ break true;
+ }
+ _ => break false,
+ }
+ }
+ }
+
+ pub(crate) fn suggest_copied_or_cloned(
+ &self,
+ diag: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expr_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) -> bool {
+ let ty::Adt(adt_def, substs) = expr_ty.kind() else { return false; };
+ let ty::Adt(expected_adt_def, expected_substs) = expected_ty.kind() else { return false; };
+ if adt_def != expected_adt_def {
+ return false;
+ }
+
+ let mut suggest_copied_or_cloned = || {
+ let expr_inner_ty = substs.type_at(0);
+ let expected_inner_ty = expected_substs.type_at(0);
+ if let ty::Ref(_, ty, hir::Mutability::Not) = expr_inner_ty.kind()
+ && self.can_eq(self.param_env, *ty, expected_inner_ty).is_ok()
+ {
+ let def_path = self.tcx.def_path_str(adt_def.did());
+ if self.type_is_copy_modulo_regions(self.param_env, *ty, expr.span) {
+ diag.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{def_path}::copied` to copy the value inside the `{def_path}`"
+ ),
+ ".copied()",
+ Applicability::MachineApplicable,
+ );
+ return true;
+ } else if let Some(clone_did) = self.tcx.lang_items().clone_trait()
+ && rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions(
+ self,
+ self.param_env,
+ *ty,
+ clone_did,
+ expr.span
+ )
+ {
+ diag.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{def_path}::cloned` to clone the value inside the `{def_path}`"
+ ),
+ ".cloned()",
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ false
+ };
+
+ if let Some(result_did) = self.tcx.get_diagnostic_item(sym::Result)
+ && adt_def.did() == result_did
+ // Check that the error types are equal
+ && self.can_eq(self.param_env, substs.type_at(1), expected_substs.type_at(1)).is_ok()
+ {
+ return suggest_copied_or_cloned();
+ } else if let Some(option_did) = self.tcx.get_diagnostic_item(sym::Option)
+ && adt_def.did() == option_did
+ {
+ return suggest_copied_or_cloned();
+ }
+
+ false
+ }
+
+ pub(crate) fn suggest_into(
+ &self,
+ diag: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expr_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) -> bool {
+ let expr = expr.peel_blocks();
+
+ // We have better suggestions for scalar interconversions...
+ if expr_ty.is_scalar() && expected_ty.is_scalar() {
+ return false;
+ }
+
+ // Don't suggest turning a block into another type (e.g. `{}.into()`)
+ if matches!(expr.kind, hir::ExprKind::Block(..)) {
+ return false;
+ }
+
+ // We'll later suggest `.as_ref` when noting the type error,
+ // so skip if we will suggest that instead.
+ if self.err_ctxt().should_suggest_as_ref(expected_ty, expr_ty).is_some() {
+ return false;
+ }
+
+ if let Some(into_def_id) = self.tcx.get_diagnostic_item(sym::Into)
+ && self.predicate_must_hold_modulo_regions(&traits::Obligation::new(
+ self.misc(expr.span),
+ self.param_env,
+ ty::Binder::dummy(ty::TraitRef {
+ def_id: into_def_id,
+ substs: self.tcx.mk_substs_trait(expr_ty, &[expected_ty.into()]),
+ })
+ .to_poly_trait_predicate()
+ .to_predicate(self.tcx),
+ ))
+ {
+ let sugg = if expr.precedence().order() >= PREC_POSTFIX {
+ vec![(expr.span.shrink_to_hi(), ".into()".to_owned())]
+ } else {
+ vec![(expr.span.shrink_to_lo(), "(".to_owned()), (expr.span.shrink_to_hi(), ").into()".to_owned())]
+ };
+ diag.multipart_suggestion(
+ format!("call `Into::into` on this expression to convert `{expr_ty}` into `{expected_ty}`"),
+ sugg,
+ Applicability::MaybeIncorrect
+ );
+ return true;
+ }
+
+ false
+ }
+
+ /// Suggest wrapping the block in square brackets instead of curly braces
+ /// in case the block was mistaken array syntax, e.g. `{ 1 }` -> `[ 1 ]`.
+ pub(crate) fn suggest_block_to_brackets(
+ &self,
+ diag: &mut Diagnostic,
+ blk: &hir::Block<'_>,
+ blk_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) {
+ if let ty::Slice(elem_ty) | ty::Array(elem_ty, _) = expected_ty.kind() {
+ if self.can_coerce(blk_ty, *elem_ty)
+ && blk.stmts.is_empty()
+ && blk.rules == hir::BlockCheckMode::DefaultBlock
+ {
+ let source_map = self.tcx.sess.source_map();
+ if let Ok(snippet) = source_map.span_to_snippet(blk.span) {
+ if snippet.starts_with('{') && snippet.ends_with('}') {
+ diag.multipart_suggestion_verbose(
+ "to create an array, use square brackets instead of curly braces",
+ vec![
+ (
+ blk.span
+ .shrink_to_lo()
+ .with_hi(rustc_span::BytePos(blk.span.lo().0 + 1)),
+ "[".to_string(),
+ ),
+ (
+ blk.span
+ .shrink_to_hi()
+ .with_lo(rustc_span::BytePos(blk.span.hi().0 - 1)),
+ "]".to_string(),
+ ),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ fn is_loop(&self, id: hir::HirId) -> bool {
+ let node = self.tcx.hir().get(id);
+ matches!(node, Node::Expr(Expr { kind: ExprKind::Loop(..), .. }))
+ }
+
+ fn is_local_statement(&self, id: hir::HirId) -> bool {
+ let node = self.tcx.hir().get(id);
+ matches!(node, Node::Stmt(Stmt { kind: StmtKind::Local(..), .. }))
+ }
+
+ /// Suggest that `&T` was cloned instead of `T` because `T` does not implement `Clone`,
+ /// which is a side-effect of autoref.
+ pub(crate) fn note_type_is_not_clone(
+ &self,
+ diag: &mut Diagnostic,
+ expected_ty: Ty<'tcx>,
+ found_ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ ) {
+ let hir::ExprKind::MethodCall(segment, callee_expr, &[], _) = expr.kind else { return; };
+ let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else { return; };
+ let ty::Ref(_, pointee_ty, _) = found_ty.kind() else { return };
+ let results = self.typeck_results.borrow();
+ // First, look for a `Clone::clone` call
+ if segment.ident.name == sym::clone
+ && results.type_dependent_def_id(expr.hir_id).map_or(
+ false,
+ |did| {
+ let assoc_item = self.tcx.associated_item(did);
+ assoc_item.container == ty::AssocItemContainer::TraitContainer
+ && assoc_item.container_id(self.tcx) == clone_trait_did
+ },
+ )
+ // If that clone call hasn't already dereferenced the self type (i.e. don't give this
+ // diagnostic in cases where we have `(&&T).clone()` and we expect `T`).
+ && !results.expr_adjustments(callee_expr).iter().any(|adj| matches!(adj.kind, ty::adjustment::Adjust::Deref(..)))
+ // Check that we're in fact trying to clone into the expected type
+ && self.can_coerce(*pointee_ty, expected_ty)
+ // And the expected type doesn't implement `Clone`
+ && !self.predicate_must_hold_considering_regions(&traits::Obligation {
+ cause: traits::ObligationCause::dummy(),
+ param_env: self.param_env,
+ recursion_depth: 0,
+ predicate: ty::Binder::dummy(ty::TraitRef {
+ def_id: clone_trait_did,
+ substs: self.tcx.mk_substs([expected_ty.into()].iter()),
+ })
+ .without_const()
+ .to_predicate(self.tcx),
+ })
+ {
+ diag.span_note(
+ callee_expr.span,
+ &format!(
+ "`{expected_ty}` does not implement `Clone`, so `{found_ty}` was cloned instead"
+ ),
+ );
+ }
+ }
+
+ /// A common error is to add an extra semicolon:
+ ///
+ /// ```compile_fail,E0308
+ /// fn foo() -> usize {
+ /// 22;
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the final statement in a block is an
+ /// expression with an explicit semicolon whose type is compatible
+ /// with `expected_ty`. If so, it suggests removing the semicolon.
+ pub(crate) fn consider_removing_semicolon(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ ) -> bool {
+ if let Some((span_semi, boxed)) = self.err_ctxt().could_remove_semicolon(blk, expected_ty) {
+ if let StatementAsExpression::NeedsBoxing = boxed {
+ err.span_suggestion_verbose(
+ span_semi,
+ "consider removing this semicolon and boxing the expression",
+ "",
+ Applicability::HasPlaceholders,
+ );
+ } else {
+ err.span_suggestion_short(
+ span_semi,
+ "remove this semicolon to return this value",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/gather_locals.rs b/compiler/rustc_hir_typeck/src/gather_locals.rs
new file mode 100644
index 000000000..9a096f24f
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/gather_locals.rs
@@ -0,0 +1,161 @@
+use crate::{FnCtxt, LocalTy};
+use rustc_hir as hir;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::PatKind;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::ty::Ty;
+use rustc_middle::ty::UserType;
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+
+/// A declaration is an abstraction of [hir::Local] and [hir::Let].
+///
+/// It must have a hir_id, as this is how we connect gather_locals to the check functions.
+pub(super) struct Declaration<'a> {
+ pub hir_id: hir::HirId,
+ pub pat: &'a hir::Pat<'a>,
+ pub ty: Option<&'a hir::Ty<'a>>,
+ pub span: Span,
+ pub init: Option<&'a hir::Expr<'a>>,
+ pub els: Option<&'a hir::Block<'a>>,
+}
+
+impl<'a> From<&'a hir::Local<'a>> for Declaration<'a> {
+ fn from(local: &'a hir::Local<'a>) -> Self {
+ let hir::Local { hir_id, pat, ty, span, init, els, source: _ } = *local;
+ Declaration { hir_id, pat, ty, span, init, els }
+ }
+}
+
+impl<'a> From<&'a hir::Let<'a>> for Declaration<'a> {
+ fn from(let_expr: &'a hir::Let<'a>) -> Self {
+ let hir::Let { hir_id, pat, ty, span, init } = *let_expr;
+ Declaration { hir_id, pat, ty, span, init: Some(init), els: None }
+ }
+}
+
+pub(super) struct GatherLocalsVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ // parameters are special cases of patterns, but we want to handle them as
+ // *distinct* cases. so track when we are hitting a pattern *within* an fn
+ // parameter.
+ outermost_fn_param_pat: Option<Span>,
+}
+
+impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
+ pub(super) fn new(fcx: &'a FnCtxt<'a, 'tcx>) -> Self {
+ Self { fcx, outermost_fn_param_pat: None }
+ }
+
+ fn assign(&mut self, span: Span, nid: hir::HirId, ty_opt: Option<LocalTy<'tcx>>) -> Ty<'tcx> {
+ match ty_opt {
+ None => {
+ // Infer the variable's type.
+ let var_ty = self.fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ });
+ self.fcx
+ .locals
+ .borrow_mut()
+ .insert(nid, LocalTy { decl_ty: var_ty, revealed_ty: var_ty });
+ var_ty
+ }
+ Some(typ) => {
+ // Take type that the user specified.
+ self.fcx.locals.borrow_mut().insert(nid, typ);
+ typ.revealed_ty
+ }
+ }
+ }
+
+ /// Allocates a [LocalTy] for a declaration, which may have a type annotation. If it does have
+ /// a type annotation, then the LocalTy stored will be the resolved type. This may be found
+ /// again during type checking by querying [FnCtxt::local_ty] for the same hir_id.
+ fn declare(&mut self, decl: Declaration<'tcx>) {
+ let local_ty = match decl.ty {
+ Some(ref ty) => {
+ let o_ty = self.fcx.to_ty(&ty);
+
+ let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(UserType::Ty(o_ty));
+ debug!("visit_local: ty.hir_id={:?} o_ty={:?} c_ty={:?}", ty.hir_id, o_ty, c_ty);
+ self.fcx
+ .typeck_results
+ .borrow_mut()
+ .user_provided_types_mut()
+ .insert(ty.hir_id, c_ty);
+
+ Some(LocalTy { decl_ty: o_ty, revealed_ty: o_ty })
+ }
+ None => None,
+ };
+ self.assign(decl.span, decl.hir_id, local_ty);
+
+ debug!(
+ "local variable {:?} is assigned type {}",
+ decl.pat,
+ self.fcx.ty_to_string(self.fcx.locals.borrow().get(&decl.hir_id).unwrap().decl_ty)
+ );
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> {
+ // Add explicitly-declared locals.
+ fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
+ self.declare(local.into());
+ intravisit::walk_local(self, local)
+ }
+
+ fn visit_let_expr(&mut self, let_expr: &'tcx hir::Let<'tcx>) {
+ self.declare(let_expr.into());
+ intravisit::walk_let_expr(self, let_expr);
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ let old_outermost_fn_param_pat = self.outermost_fn_param_pat.replace(param.ty_span);
+ intravisit::walk_param(self, param);
+ self.outermost_fn_param_pat = old_outermost_fn_param_pat;
+ }
+
+ // Add pattern bindings.
+ fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ if let PatKind::Binding(_, _, ident, _) = p.kind {
+ let var_ty = self.assign(p.span, p.hir_id, None);
+
+ if let Some(ty_span) = self.outermost_fn_param_pat {
+ if !self.fcx.tcx.features().unsized_fn_params {
+ self.fcx.require_type_is_sized(
+ var_ty,
+ p.span,
+ traits::SizedArgumentType(Some(ty_span)),
+ );
+ }
+ } else {
+ if !self.fcx.tcx.features().unsized_locals {
+ self.fcx.require_type_is_sized(var_ty, p.span, traits::VariableType(p.hir_id));
+ }
+ }
+
+ debug!(
+ "pattern binding {} is assigned to {} with type {:?}",
+ ident,
+ self.fcx.ty_to_string(self.fcx.locals.borrow().get(&p.hir_id).unwrap().decl_ty),
+ var_ty
+ );
+ }
+ let old_outermost_fn_param_pat = self.outermost_fn_param_pat.take();
+ intravisit::walk_pat(self, p);
+ self.outermost_fn_param_pat = old_outermost_fn_param_pat;
+ }
+
+ // Don't descend into the bodies of nested closures.
+ fn visit_fn(
+ &mut self,
+ _: intravisit::FnKind<'tcx>,
+ _: &'tcx hir::FnDecl<'tcx>,
+ _: hir::BodyId,
+ _: Span,
+ _: hir::HirId,
+ ) {
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
new file mode 100644
index 000000000..122ad7009
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
@@ -0,0 +1,563 @@
+use super::{
+ for_each_consumable, record_consumed_borrow::ConsumedAndBorrowedPlaces, DropRangesBuilder,
+ NodeInfo, PostOrderId, TrackedValue, TrackedValueIndex,
+};
+use hir::{
+ intravisit::{self, Visitor},
+ Body, Expr, ExprKind, Guard, HirId, LoopIdError,
+};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_index::vec::IndexVec;
+use rustc_middle::{
+ hir::map::Map,
+ ty::{TyCtxt, TypeckResults},
+};
+use std::mem::swap;
+
+/// Traverses the body to find the control flow graph and locations for the
+/// relevant places are dropped or reinitialized.
+///
+/// The resulting structure still needs to be iterated to a fixed point, which
+/// can be done with propagate_to_fixpoint in cfg_propagate.
+pub(super) fn build_control_flow_graph<'tcx>(
+ hir: Map<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &TypeckResults<'tcx>,
+ consumed_borrowed_places: ConsumedAndBorrowedPlaces,
+ body: &'tcx Body<'tcx>,
+ num_exprs: usize,
+) -> (DropRangesBuilder, FxHashSet<HirId>) {
+ let mut drop_range_visitor =
+ DropRangeVisitor::new(hir, tcx, typeck_results, consumed_borrowed_places, num_exprs);
+ intravisit::walk_body(&mut drop_range_visitor, body);
+
+ drop_range_visitor.drop_ranges.process_deferred_edges();
+ if let Some(filename) = &tcx.sess.opts.unstable_opts.dump_drop_tracking_cfg {
+ super::cfg_visualize::write_graph_to_file(&drop_range_visitor.drop_ranges, filename, tcx);
+ }
+
+ (drop_range_visitor.drop_ranges, drop_range_visitor.places.borrowed_temporaries)
+}
+
+/// This struct is used to gather the information for `DropRanges` to determine the regions of the
+/// HIR tree for which a value is dropped.
+///
+/// We are interested in points where a variables is dropped or initialized, and the control flow
+/// of the code. We identify locations in code by their post-order traversal index, so it is
+/// important for this traversal to match that in `RegionResolutionVisitor` and `InteriorVisitor`.
+///
+/// We make several simplifying assumptions, with the goal of being more conservative than
+/// necessary rather than less conservative (since being less conservative is unsound, but more
+/// conservative is still safe). These assumptions are:
+///
+/// 1. Moving a variable `a` counts as a move of the whole variable.
+/// 2. Moving a partial path like `a.b.c` is ignored.
+/// 3. Reinitializing through a field (e.g. `a.b.c = 5`) counts as a reinitialization of all of
+/// `a`.
+///
+/// Some examples:
+///
+/// Rule 1:
+/// ```rust
+/// let mut a = (vec![0], vec![0]);
+/// drop(a);
+/// // `a` is not considered initialized.
+/// ```
+///
+/// Rule 2:
+/// ```rust
+/// let mut a = (vec![0], vec![0]);
+/// drop(a.0);
+/// drop(a.1);
+/// // `a` is still considered initialized.
+/// ```
+///
+/// Rule 3:
+/// ```compile_fail,E0382
+/// let mut a = (vec![0], vec![0]);
+/// drop(a);
+/// a.1 = vec![1];
+/// // all of `a` is considered initialized
+/// ```
+
+struct DropRangeVisitor<'a, 'tcx> {
+ hir: Map<'tcx>,
+ places: ConsumedAndBorrowedPlaces,
+ drop_ranges: DropRangesBuilder,
+ expr_index: PostOrderId,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a TypeckResults<'tcx>,
+ label_stack: Vec<(Option<rustc_ast::Label>, PostOrderId)>,
+}
+
+impl<'a, 'tcx> DropRangeVisitor<'a, 'tcx> {
+ fn new(
+ hir: Map<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a TypeckResults<'tcx>,
+ places: ConsumedAndBorrowedPlaces,
+ num_exprs: usize,
+ ) -> Self {
+ debug!("consumed_places: {:?}", places.consumed);
+ let drop_ranges = DropRangesBuilder::new(
+ places.consumed.iter().flat_map(|(_, places)| places.iter().cloned()),
+ hir,
+ num_exprs,
+ );
+ Self {
+ hir,
+ places,
+ drop_ranges,
+ expr_index: PostOrderId::from_u32(0),
+ typeck_results,
+ tcx,
+ label_stack: vec![],
+ }
+ }
+
+ fn record_drop(&mut self, value: TrackedValue) {
+ if self.places.borrowed.contains(&value) {
+ debug!("not marking {:?} as dropped because it is borrowed at some point", value);
+ } else {
+ debug!("marking {:?} as dropped at {:?}", value, self.expr_index);
+ let count = self.expr_index;
+ self.drop_ranges.drop_at(value, count);
+ }
+ }
+
+ /// ExprUseVisitor's consume callback doesn't go deep enough for our purposes in all
+ /// expressions. This method consumes a little deeper into the expression when needed.
+ fn consume_expr(&mut self, expr: &hir::Expr<'_>) {
+ debug!("consuming expr {:?}, count={:?}", expr.kind, self.expr_index);
+ let places = self
+ .places
+ .consumed
+ .get(&expr.hir_id)
+ .map_or(vec![], |places| places.iter().cloned().collect());
+ for place in places {
+ trace!(?place, "consuming place");
+ for_each_consumable(self.hir, place, |value| self.record_drop(value));
+ }
+ }
+
+ /// Marks an expression as being reinitialized.
+ ///
+ /// Note that we always approximated on the side of things being more
+ /// initialized than they actually are, as opposed to less. In cases such
+ /// as `x.y = ...`, we would consider all of `x` as being initialized
+ /// instead of just the `y` field.
+ ///
+ /// This is because it is always safe to consider something initialized
+ /// even when it is not, but the other way around will cause problems.
+ ///
+ /// In the future, we will hopefully tighten up these rules to be more
+ /// precise.
+ fn reinit_expr(&mut self, expr: &hir::Expr<'_>) {
+ // Walk the expression to find the base. For example, in an expression
+ // like `*a[i].x`, we want to find the `a` and mark that as
+ // reinitialized.
+ match expr.kind {
+ ExprKind::Path(hir::QPath::Resolved(
+ _,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ // This is the base case, where we have found an actual named variable.
+
+ let location = self.expr_index;
+ debug!("reinitializing {:?} at {:?}", hir_id, location);
+ self.drop_ranges.reinit_at(TrackedValue::Variable(*hir_id), location);
+ }
+
+ ExprKind::Field(base, _) => self.reinit_expr(base),
+
+ // Most expressions do not refer to something where we need to track
+ // reinitializations.
+ //
+ // Some of these may be interesting in the future
+ ExprKind::Path(..)
+ | ExprKind::Box(..)
+ | ExprKind::ConstBlock(..)
+ | ExprKind::Array(..)
+ | ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Lit(..)
+ | ExprKind::Cast(..)
+ | ExprKind::Type(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Let(..)
+ | ExprKind::If(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Match(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::Block(..)
+ | ExprKind::Assign(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::Index(..)
+ | ExprKind::AddrOf(..)
+ | ExprKind::Break(..)
+ | ExprKind::Continue(..)
+ | ExprKind::Ret(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Yield(..)
+ | ExprKind::Err => (),
+ }
+ }
+
+ /// For an expression with an uninhabited return type (e.g. a function that returns !),
+ /// this adds a self edge to the CFG to model the fact that the function does not
+ /// return.
+ fn handle_uninhabited_return(&mut self, expr: &Expr<'tcx>) {
+ let ty = self.typeck_results.expr_ty(expr);
+ let ty = self.tcx.erase_regions(ty);
+ let m = self.tcx.parent_module(expr.hir_id).to_def_id();
+ let param_env = self.tcx.param_env(m.expect_local());
+ if self.tcx.is_ty_uninhabited_from(m, ty, param_env) {
+ // This function will not return. We model this fact as an infinite loop.
+ self.drop_ranges.add_control_edge(self.expr_index + 1, self.expr_index + 1);
+ }
+ }
+
+ /// Map a Destination to an equivalent expression node
+ ///
+ /// The destination field of a Break or Continue expression can target either an
+ /// expression or a block. The drop range analysis, however, only deals in
+ /// expression nodes, so blocks that might be the destination of a Break or Continue
+ /// will not have a PostOrderId.
+ ///
+ /// If the destination is an expression, this function will simply return that expression's
+ /// hir_id. If the destination is a block, this function will return the hir_id of last
+ /// expression in the block.
+ fn find_target_expression_from_destination(
+ &self,
+ destination: hir::Destination,
+ ) -> Result<HirId, LoopIdError> {
+ destination.target_id.map(|target| {
+ let node = self.hir.get(target);
+ match node {
+ hir::Node::Expr(_) => target,
+ hir::Node::Block(b) => find_last_block_expression(b),
+ hir::Node::Param(..)
+ | hir::Node::Item(..)
+ | hir::Node::ForeignItem(..)
+ | hir::Node::TraitItem(..)
+ | hir::Node::ImplItem(..)
+ | hir::Node::Variant(..)
+ | hir::Node::Field(..)
+ | hir::Node::AnonConst(..)
+ | hir::Node::Stmt(..)
+ | hir::Node::PathSegment(..)
+ | hir::Node::Ty(..)
+ | hir::Node::TypeBinding(..)
+ | hir::Node::TraitRef(..)
+ | hir::Node::Pat(..)
+ | hir::Node::PatField(..)
+ | hir::Node::ExprField(..)
+ | hir::Node::Arm(..)
+ | hir::Node::Local(..)
+ | hir::Node::Ctor(..)
+ | hir::Node::Lifetime(..)
+ | hir::Node::GenericParam(..)
+ | hir::Node::Crate(..)
+ | hir::Node::Infer(..) => bug!("Unsupported branch target: {:?}", node),
+ }
+ })
+ }
+}
+
+fn find_last_block_expression(block: &hir::Block<'_>) -> HirId {
+ block.expr.map_or_else(
+ // If there is no tail expression, there will be at least one statement in the
+ // block because the block contains a break or continue statement.
+ || block.stmts.last().unwrap().hir_id,
+ |expr| expr.hir_id,
+ )
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for DropRangeVisitor<'a, 'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ let mut reinit = None;
+ match expr.kind {
+ ExprKind::Assign(lhs, rhs, _) => {
+ self.visit_expr(lhs);
+ self.visit_expr(rhs);
+
+ reinit = Some(lhs);
+ }
+
+ ExprKind::If(test, if_true, if_false) => {
+ self.visit_expr(test);
+
+ let fork = self.expr_index;
+
+ self.drop_ranges.add_control_edge(fork, self.expr_index + 1);
+ self.visit_expr(if_true);
+ let true_end = self.expr_index;
+
+ self.drop_ranges.add_control_edge(fork, self.expr_index + 1);
+ if let Some(if_false) = if_false {
+ self.visit_expr(if_false);
+ }
+
+ self.drop_ranges.add_control_edge(true_end, self.expr_index + 1);
+ }
+ ExprKind::Match(scrutinee, arms, ..) => {
+ // We walk through the match expression almost like a chain of if expressions.
+ // Here's a diagram to follow along with:
+ //
+ // ┌─┐
+ // match │A│ {
+ // ┌───┴─┘
+ // │
+ // ┌▼┌───►┌─┐ ┌─┐
+ // │B│ if │C│ =>│D│,
+ // └─┘ ├─┴──►└─┴──────┐
+ // ┌──┘ │
+ // ┌──┘ │
+ // │ │
+ // ┌▼┌───►┌─┐ ┌─┐ │
+ // │E│ if │F│ =>│G│, │
+ // └─┘ ├─┴──►└─┴┐ │
+ // │ │ │
+ // } ▼ ▼ │
+ // ┌─┐◄───────────────────┘
+ // │H│
+ // └─┘
+ //
+ // The order we want is that the scrutinee (A) flows into the first pattern (B),
+ // which flows into the guard (C). Then the guard either flows into the arm body
+ // (D) or into the start of the next arm (E). Finally, the body flows to the end
+ // of the match block (H).
+ //
+ // The subsequent arms follow the same ordering. First we go to the pattern, then
+ // the guard (if present, otherwise it flows straight into the body), then into
+ // the body and then to the end of the match expression.
+ //
+ // The comments below show which edge is being added.
+ self.visit_expr(scrutinee);
+
+ let (guard_exit, arm_end_ids) = arms.iter().fold(
+ (self.expr_index, vec![]),
+ |(incoming_edge, mut arm_end_ids), hir::Arm { pat, body, guard, .. }| {
+ // A -> B, or C -> E
+ self.drop_ranges.add_control_edge(incoming_edge, self.expr_index + 1);
+ self.visit_pat(pat);
+ // B -> C and E -> F are added implicitly due to the traversal order.
+ match guard {
+ Some(Guard::If(expr)) => self.visit_expr(expr),
+ Some(Guard::IfLet(let_expr)) => {
+ self.visit_let_expr(let_expr);
+ }
+ None => (),
+ }
+ // Likewise, C -> D and F -> G are added implicitly.
+
+ // Save C, F, so we can add the other outgoing edge.
+ let to_next_arm = self.expr_index;
+
+ // The default edge does not get added since we also have an explicit edge,
+ // so we also need to add an edge to the next node as well.
+ //
+ // This adds C -> D, F -> G
+ self.drop_ranges.add_control_edge(self.expr_index, self.expr_index + 1);
+ self.visit_expr(body);
+
+ // Save the end of the body so we can add the exit edge once we know where
+ // the exit is.
+ arm_end_ids.push(self.expr_index);
+
+ // Pass C to the next iteration, as well as vec![D]
+ //
+ // On the last round through, we pass F and vec![D, G] so that we can
+ // add all the exit edges.
+ (to_next_arm, arm_end_ids)
+ },
+ );
+ // F -> H
+ self.drop_ranges.add_control_edge(guard_exit, self.expr_index + 1);
+
+ arm_end_ids.into_iter().for_each(|arm_end| {
+ // D -> H, G -> H
+ self.drop_ranges.add_control_edge(arm_end, self.expr_index + 1)
+ });
+ }
+
+ ExprKind::Loop(body, label, ..) => {
+ let loop_begin = self.expr_index + 1;
+ self.label_stack.push((label, loop_begin));
+ if body.stmts.is_empty() && body.expr.is_none() {
+ // For empty loops we won't have updated self.expr_index after visiting the
+ // body, meaning we'd get an edge from expr_index to expr_index + 1, but
+ // instead we want an edge from expr_index + 1 to expr_index + 1.
+ self.drop_ranges.add_control_edge(loop_begin, loop_begin);
+ } else {
+ self.visit_block(body);
+ self.drop_ranges.add_control_edge(self.expr_index, loop_begin);
+ }
+ self.label_stack.pop();
+ }
+ // Find the loop entry by searching through the label stack for either the last entry
+ // (if label is none), or the first entry where the label matches this one. The Loop
+ // case maintains this stack mapping labels to the PostOrderId for the loop entry.
+ ExprKind::Continue(hir::Destination { label, .. }, ..) => self
+ .label_stack
+ .iter()
+ .rev()
+ .find(|(loop_label, _)| label.is_none() || *loop_label == label)
+ .map_or((), |(_, target)| {
+ self.drop_ranges.add_control_edge(self.expr_index, *target)
+ }),
+
+ ExprKind::Break(destination, ..) => {
+ // destination either points to an expression or to a block. We use
+ // find_target_expression_from_destination to use the last expression of the block
+ // if destination points to a block.
+ //
+ // We add an edge to the hir_id of the expression/block we are breaking out of, and
+ // then in process_deferred_edges we will map this hir_id to its PostOrderId, which
+ // will refer to the end of the block due to the post order traversal.
+ self.find_target_expression_from_destination(destination).map_or((), |target| {
+ self.drop_ranges.add_control_edge_hir_id(self.expr_index, target)
+ })
+ }
+
+ ExprKind::Call(f, args) => {
+ self.visit_expr(f);
+ for arg in args {
+ self.visit_expr(arg);
+ }
+
+ self.handle_uninhabited_return(expr);
+ }
+ ExprKind::MethodCall(_, receiver, exprs, _) => {
+ self.visit_expr(receiver);
+ for expr in exprs {
+ self.visit_expr(expr);
+ }
+
+ self.handle_uninhabited_return(expr);
+ }
+
+ ExprKind::AddrOf(..)
+ | ExprKind::Array(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Block(..)
+ | ExprKind::Box(..)
+ | ExprKind::Cast(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::ConstBlock(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Err
+ | ExprKind::Field(..)
+ | ExprKind::Index(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::Let(..)
+ | ExprKind::Lit(..)
+ | ExprKind::Path(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Ret(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Type(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Yield(..) => intravisit::walk_expr(self, expr),
+ }
+
+ self.expr_index = self.expr_index + 1;
+ self.drop_ranges.add_node_mapping(expr.hir_id, self.expr_index);
+ self.consume_expr(expr);
+ if let Some(expr) = reinit {
+ self.reinit_expr(expr);
+ }
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+
+ // Increment expr_count here to match what InteriorVisitor expects.
+ self.expr_index = self.expr_index + 1;
+ }
+}
+
+impl DropRangesBuilder {
+ fn new(
+ tracked_values: impl Iterator<Item = TrackedValue>,
+ hir: Map<'_>,
+ num_exprs: usize,
+ ) -> Self {
+ let mut tracked_value_map = FxHashMap::<_, TrackedValueIndex>::default();
+ let mut next = <_>::from(0u32);
+ for value in tracked_values {
+ for_each_consumable(hir, value, |value| {
+ if !tracked_value_map.contains_key(&value) {
+ tracked_value_map.insert(value, next);
+ next = next + 1;
+ }
+ });
+ }
+ debug!("hir_id_map: {:?}", tracked_value_map);
+ let num_values = tracked_value_map.len();
+ Self {
+ tracked_value_map,
+ nodes: IndexVec::from_fn_n(|_| NodeInfo::new(num_values), num_exprs + 1),
+ deferred_edges: <_>::default(),
+ post_order_map: <_>::default(),
+ }
+ }
+
+ fn tracked_value_index(&self, tracked_value: TrackedValue) -> TrackedValueIndex {
+ *self.tracked_value_map.get(&tracked_value).unwrap()
+ }
+
+ /// Adds an entry in the mapping from HirIds to PostOrderIds
+ ///
+ /// Needed so that `add_control_edge_hir_id` can work.
+ fn add_node_mapping(&mut self, node_hir_id: HirId, post_order_id: PostOrderId) {
+ self.post_order_map.insert(node_hir_id, post_order_id);
+ }
+
+ /// Like add_control_edge, but uses a hir_id as the target.
+ ///
+ /// This can be used for branches where we do not know the PostOrderId of the target yet,
+ /// such as when handling `break` or `continue`.
+ fn add_control_edge_hir_id(&mut self, from: PostOrderId, to: HirId) {
+ self.deferred_edges.push((from, to));
+ }
+
+ fn drop_at(&mut self, value: TrackedValue, location: PostOrderId) {
+ let value = self.tracked_value_index(value);
+ self.node_mut(location).drops.push(value);
+ }
+
+ fn reinit_at(&mut self, value: TrackedValue, location: PostOrderId) {
+ let value = match self.tracked_value_map.get(&value) {
+ Some(value) => *value,
+ // If there's no value, this is never consumed and therefore is never dropped. We can
+ // ignore this.
+ None => return,
+ };
+ self.node_mut(location).reinits.push(value);
+ }
+
+ /// Looks up PostOrderId for any control edges added by HirId and adds a proper edge for them.
+ ///
+ /// Should be called after visiting the HIR but before solving the control flow, otherwise some
+ /// edges will be missed.
+ fn process_deferred_edges(&mut self) {
+ trace!("processing deferred edges. post_order_map={:#?}", self.post_order_map);
+ let mut edges = vec![];
+ swap(&mut edges, &mut self.deferred_edges);
+ edges.into_iter().for_each(|(from, to)| {
+ trace!("Adding deferred edge from {:?} to {:?}", from, to);
+ let to = *self.post_order_map.get(&to).expect("Expression ID not found");
+ trace!("target edge PostOrderId={:?}", to);
+ self.add_control_edge(from, to)
+ });
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs
new file mode 100644
index 000000000..139d17d2e
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs
@@ -0,0 +1,92 @@
+use super::{DropRangesBuilder, PostOrderId};
+use rustc_index::{bit_set::BitSet, vec::IndexVec};
+use std::collections::BTreeMap;
+
+impl DropRangesBuilder {
+ pub fn propagate_to_fixpoint(&mut self) {
+ trace!("before fixpoint: {:#?}", self);
+ let preds = self.compute_predecessors();
+
+ trace!("predecessors: {:#?}", preds.iter_enumerated().collect::<BTreeMap<_, _>>());
+
+ let mut new_state = BitSet::new_empty(self.num_values());
+ let mut changed_nodes = BitSet::new_empty(self.nodes.len());
+ let mut unchanged_mask = BitSet::new_filled(self.nodes.len());
+ changed_nodes.insert(0u32.into());
+
+ let mut propagate = || {
+ let mut changed = false;
+ unchanged_mask.insert_all();
+ for id in self.nodes.indices() {
+ trace!("processing {:?}, changed_nodes: {:?}", id, changed_nodes);
+ // Check if any predecessor has changed, and if not then short-circuit.
+ //
+ // We handle the start node specially, since it doesn't have any predecessors,
+ // but we need to start somewhere.
+ if match id.index() {
+ 0 => !changed_nodes.contains(id),
+ _ => !preds[id].iter().any(|pred| changed_nodes.contains(*pred)),
+ } {
+ trace!("short-circuiting because none of {:?} have changed", preds[id]);
+ unchanged_mask.remove(id);
+ continue;
+ }
+
+ if id.index() == 0 {
+ new_state.clear();
+ } else {
+ // If we are not the start node and we have no predecessors, treat
+ // everything as dropped because there's no way to get here anyway.
+ new_state.insert_all();
+ };
+
+ for pred in &preds[id] {
+ new_state.intersect(&self.nodes[*pred].drop_state);
+ }
+
+ for drop in &self.nodes[id].drops {
+ new_state.insert(*drop);
+ }
+
+ for reinit in &self.nodes[id].reinits {
+ new_state.remove(*reinit);
+ }
+
+ if self.nodes[id].drop_state.intersect(&new_state) {
+ changed_nodes.insert(id);
+ changed = true;
+ } else {
+ unchanged_mask.remove(id);
+ }
+ }
+
+ changed_nodes.intersect(&unchanged_mask);
+ changed
+ };
+
+ while propagate() {
+ trace!("drop_state changed, re-running propagation");
+ }
+
+ trace!("after fixpoint: {:#?}", self);
+ }
+
+ fn compute_predecessors(&self) -> IndexVec<PostOrderId, Vec<PostOrderId>> {
+ let mut preds = IndexVec::from_fn_n(|_| vec![], self.nodes.len());
+ for (id, node) in self.nodes.iter_enumerated() {
+ // If the node has no explicit successors, we assume that control
+ // will from this node into the next one.
+ //
+ // If there are successors listed, then we assume that all
+ // possible successors are given and we do not include the default.
+ if node.successors.len() == 0 && id.index() != self.nodes.len() - 1 {
+ preds[id + 1].push(id);
+ } else {
+ for succ in &node.successors {
+ preds[*succ].push(id);
+ }
+ }
+ }
+ preds
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs
new file mode 100644
index 000000000..c0a0bfe8e
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs
@@ -0,0 +1,91 @@
+//! Implementation of GraphWalk for DropRanges so we can visualize the control
+//! flow graph when needed for debugging.
+
+use rustc_graphviz as dot;
+use rustc_middle::ty::TyCtxt;
+
+use super::{DropRangesBuilder, PostOrderId};
+
+/// Writes the CFG for DropRangesBuilder to a .dot file for visualization.
+///
+/// It is not normally called, but is kept around to easily add debugging
+/// code when needed.
+pub(super) fn write_graph_to_file(
+ drop_ranges: &DropRangesBuilder,
+ filename: &str,
+ tcx: TyCtxt<'_>,
+) {
+ dot::render(
+ &DropRangesGraph { drop_ranges, tcx },
+ &mut std::fs::File::create(filename).unwrap(),
+ )
+ .unwrap();
+}
+
+struct DropRangesGraph<'a, 'tcx> {
+ drop_ranges: &'a DropRangesBuilder,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'a> dot::GraphWalk<'a> for DropRangesGraph<'_, '_> {
+ type Node = PostOrderId;
+
+ type Edge = (PostOrderId, PostOrderId);
+
+ fn nodes(&'a self) -> dot::Nodes<'a, Self::Node> {
+ self.drop_ranges.nodes.iter_enumerated().map(|(i, _)| i).collect()
+ }
+
+ fn edges(&'a self) -> dot::Edges<'a, Self::Edge> {
+ self.drop_ranges
+ .nodes
+ .iter_enumerated()
+ .flat_map(|(i, node)| {
+ if node.successors.len() == 0 {
+ vec![(i, i + 1)]
+ } else {
+ node.successors.iter().map(move |&s| (i, s)).collect()
+ }
+ })
+ .collect()
+ }
+
+ fn source(&'a self, edge: &Self::Edge) -> Self::Node {
+ edge.0
+ }
+
+ fn target(&'a self, edge: &Self::Edge) -> Self::Node {
+ edge.1
+ }
+}
+
+impl<'a> dot::Labeller<'a> for DropRangesGraph<'_, '_> {
+ type Node = PostOrderId;
+
+ type Edge = (PostOrderId, PostOrderId);
+
+ fn graph_id(&'a self) -> dot::Id<'a> {
+ dot::Id::new("drop_ranges").unwrap()
+ }
+
+ fn node_id(&'a self, n: &Self::Node) -> dot::Id<'a> {
+ dot::Id::new(format!("id{}", n.index())).unwrap()
+ }
+
+ fn node_label(&'a self, n: &Self::Node) -> dot::LabelText<'a> {
+ dot::LabelText::LabelStr(
+ format!(
+ "{n:?}: {}",
+ self.drop_ranges
+ .post_order_map
+ .iter()
+ .find(|(_hir_id, &post_order_id)| post_order_id == *n)
+ .map_or("<unknown>".into(), |(hir_id, _)| self
+ .tcx
+ .hir()
+ .node_to_string(*hir_id))
+ )
+ .into(),
+ )
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
new file mode 100644
index 000000000..4f3bdfbe7
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
@@ -0,0 +1,309 @@
+//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
+//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
+//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
+//!
+//! There are three phases to this analysis:
+//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
+//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
+//! and also build a control flow graph.
+//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
+//! the CFG and find the exact points where we know a value is definitely dropped.
+//!
+//! The end result is a data structure that maps the post-order index of each node in the HIR tree
+//! to a set of values that are known to be dropped at that location.
+
+use self::cfg_build::build_control_flow_graph;
+use self::record_consumed_borrow::find_consumed_and_borrowed;
+use crate::FnCtxt;
+use hir::def_id::DefId;
+use hir::{Body, HirId, HirIdMap, Node};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
+use rustc_middle::ty;
+use std::collections::BTreeMap;
+use std::fmt::Debug;
+
+mod cfg_build;
+mod cfg_propagate;
+mod cfg_visualize;
+mod record_consumed_borrow;
+
+pub fn compute_drop_ranges<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body: &'tcx Body<'tcx>,
+) -> DropRanges {
+ if fcx.sess().opts.unstable_opts.drop_tracking {
+ let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
+
+ let typeck_results = &fcx.typeck_results.borrow();
+ let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
+ let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
+ fcx.tcx.hir(),
+ fcx.tcx,
+ typeck_results,
+ consumed_borrowed_places,
+ body,
+ num_exprs,
+ );
+
+ drop_ranges.propagate_to_fixpoint();
+
+ debug!("borrowed_temporaries = {borrowed_temporaries:?}");
+ DropRanges {
+ tracked_value_map: drop_ranges.tracked_value_map,
+ nodes: drop_ranges.nodes,
+ borrowed_temporaries: Some(borrowed_temporaries),
+ }
+ } else {
+ // If drop range tracking is not enabled, skip all the analysis and produce an
+ // empty set of DropRanges.
+ DropRanges {
+ tracked_value_map: FxHashMap::default(),
+ nodes: IndexVec::new(),
+ borrowed_temporaries: None,
+ }
+ }
+}
+
+/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
+///
+/// This includes the place itself, and if the place is a reference to a local
+/// variable then `f` is also called on the HIR node for that variable as well.
+///
+/// For example, if `place` points to `foo()`, then `f` is called once for the
+/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
+/// be called both on the `ExprKind::Path` node that represents the expression
+/// as well as the HirId of the local `x` itself.
+fn for_each_consumable<'tcx>(hir: Map<'tcx>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
+ f(place);
+ let node = hir.find(place.hir_id());
+ if let Some(Node::Expr(expr)) = node {
+ match expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ _,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ f(TrackedValue::Variable(*hir_id));
+ }
+ _ => (),
+ }
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct PostOrderId {
+ DEBUG_FORMAT = "id({})",
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct TrackedValueIndex {
+ DEBUG_FORMAT = "hidx({})",
+ }
+}
+
+/// Identifies a value whose drop state we need to track.
+#[derive(PartialEq, Eq, Hash, Clone, Copy)]
+enum TrackedValue {
+ /// Represents a named variable, such as a let binding, parameter, or upvar.
+ ///
+ /// The HirId points to the variable's definition site.
+ Variable(HirId),
+ /// A value produced as a result of an expression.
+ ///
+ /// The HirId points to the expression that returns this value.
+ Temporary(HirId),
+}
+
+impl Debug for TrackedValue {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ ty::tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
+ } else {
+ match self {
+ Self::Variable(hir_id) => write!(f, "Variable({:?})", hir_id),
+ Self::Temporary(hir_id) => write!(f, "Temporary({:?})", hir_id),
+ }
+ }
+ })
+ }
+}
+
+impl TrackedValue {
+ fn hir_id(&self) -> HirId {
+ match self {
+ TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
+ }
+ }
+
+ fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
+ match place_with_id.place.base {
+ PlaceBase::Rvalue | PlaceBase::StaticItem => {
+ TrackedValue::Temporary(place_with_id.hir_id)
+ }
+ PlaceBase::Local(hir_id)
+ | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
+ TrackedValue::Variable(hir_id)
+ }
+ }
+ }
+}
+
+/// Represents a reason why we might not be able to convert a HirId or Place
+/// into a tracked value.
+#[derive(Debug)]
+enum TrackedValueConversionError {
+ /// Place projects are not currently supported.
+ ///
+ /// The reasoning around these is kind of subtle, so we choose to be more
+ /// conservative around these for now. There is no reason in theory we
+ /// cannot support these, we just have not implemented it yet.
+ PlaceProjectionsNotSupported,
+}
+
+impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
+ type Error = TrackedValueConversionError;
+
+ fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
+ if !place_with_id.place.projections.is_empty() {
+ debug!(
+ "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
+ place_with_id
+ );
+ return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
+ }
+
+ Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
+ }
+}
+
+pub struct DropRanges {
+ tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+ nodes: IndexVec<PostOrderId, NodeInfo>,
+ borrowed_temporaries: Option<FxHashSet<HirId>>,
+}
+
+impl DropRanges {
+ pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
+ self.tracked_value_map
+ .get(&TrackedValue::Temporary(hir_id))
+ .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
+ .cloned()
+ .map_or(false, |tracked_value_id| {
+ self.expect_node(location.into()).drop_state.contains(tracked_value_id)
+ })
+ }
+
+ pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
+ if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
+ }
+
+ /// Returns a reference to the NodeInfo for a node, panicking if it does not exist
+ fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
+ &self.nodes[id]
+ }
+}
+
+/// Tracks information needed to compute drop ranges.
+struct DropRangesBuilder {
+ /// The core of DropRangesBuilder is a set of nodes, which each represent
+ /// one expression. We primarily refer to them by their index in a
+ /// post-order traversal of the HIR tree, since this is what
+ /// generator_interior uses to talk about yield positions.
+ ///
+ /// This IndexVec keeps the relevant details for each node. See the
+ /// NodeInfo struct for more details, but this information includes things
+ /// such as the set of control-flow successors, which variables are dropped
+ /// or reinitialized, and whether each variable has been inferred to be
+ /// known-dropped or potentially reinitialized at each point.
+ nodes: IndexVec<PostOrderId, NodeInfo>,
+ /// We refer to values whose drop state we are tracking by the HirId of
+ /// where they are defined. Within a NodeInfo, however, we store the
+ /// drop-state in a bit vector indexed by a HirIdIndex
+ /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
+ /// from HirIds to the HirIdIndex that is used to represent that value in
+ /// bitvector.
+ tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+
+ /// When building the control flow graph, we don't always know the
+ /// post-order index of the target node at the point we encounter it.
+ /// For example, this happens with break and continue. In those cases,
+ /// we store a pair of the PostOrderId of the source and the HirId
+ /// of the target. Once we have gathered all of these edges, we make a
+ /// pass over the set of deferred edges (see process_deferred_edges in
+ /// cfg_build.rs), look up the PostOrderId for the target (since now the
+ /// post-order index for all nodes is known), and add missing control flow
+ /// edges.
+ deferred_edges: Vec<(PostOrderId, HirId)>,
+ /// This maps HirIds of expressions to their post-order index. It is
+ /// used in process_deferred_edges to correctly add back-edges.
+ post_order_map: HirIdMap<PostOrderId>,
+}
+
+impl Debug for DropRangesBuilder {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("DropRanges")
+ .field("hir_id_map", &self.tracked_value_map)
+ .field("post_order_maps", &self.post_order_map)
+ .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
+ .finish()
+ }
+}
+
+/// DropRanges keeps track of what values are definitely dropped at each point in the code.
+///
+/// Values of interest are defined by the hir_id of their place. Locations in code are identified
+/// by their index in the post-order traversal. At its core, DropRanges maps
+/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
+/// dropped at the point of the node identified by post_order_id.
+impl DropRangesBuilder {
+ /// Returns the number of values (hir_ids) that are tracked
+ fn num_values(&self) -> usize {
+ self.tracked_value_map.len()
+ }
+
+ fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
+ let size = self.num_values();
+ self.nodes.ensure_contains_elem(id, || NodeInfo::new(size));
+ &mut self.nodes[id]
+ }
+
+ fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
+ trace!("adding control edge from {:?} to {:?}", from, to);
+ self.node_mut(from).successors.push(to);
+ }
+}
+
+#[derive(Debug)]
+struct NodeInfo {
+ /// IDs of nodes that can follow this one in the control flow
+ ///
+ /// If the vec is empty, then control proceeds to the next node.
+ successors: Vec<PostOrderId>,
+
+ /// List of hir_ids that are dropped by this node.
+ drops: Vec<TrackedValueIndex>,
+
+ /// List of hir_ids that are reinitialized by this node.
+ reinits: Vec<TrackedValueIndex>,
+
+ /// Set of values that are definitely dropped at this point.
+ drop_state: BitSet<TrackedValueIndex>,
+}
+
+impl NodeInfo {
+ fn new(num_values: usize) -> Self {
+ Self {
+ successors: vec![],
+ drops: vec![],
+ reinits: vec![],
+ drop_state: BitSet::new_filled(num_values),
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
new file mode 100644
index 000000000..bfe95852a
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
@@ -0,0 +1,241 @@
+use super::TrackedValue;
+use crate::{
+ expr_use_visitor::{self, ExprUseVisitor},
+ FnCtxt,
+};
+use hir::{def_id::DefId, Body, HirId, HirIdMap};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::ty::{ParamEnv, TyCtxt};
+use rustc_middle::{
+ hir::place::{PlaceBase, Projection, ProjectionKind},
+ ty::TypeVisitable,
+};
+
+pub(super) fn find_consumed_and_borrowed<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body: &'tcx Body<'tcx>,
+) -> ConsumedAndBorrowedPlaces {
+ let mut expr_use_visitor = ExprUseDelegate::new(fcx.tcx, fcx.param_env);
+ expr_use_visitor.consume_body(fcx, def_id, body);
+ expr_use_visitor.places
+}
+
+pub(super) struct ConsumedAndBorrowedPlaces {
+ /// Records the variables/expressions that are dropped by a given expression.
+ ///
+ /// The key is the hir-id of the expression, and the value is a set or hir-ids for variables
+ /// or values that are consumed by that expression.
+ ///
+ /// Note that this set excludes "partial drops" -- for example, a statement like `drop(x.y)` is
+ /// not considered a drop of `x`, although it would be a drop of `x.y`.
+ pub(super) consumed: HirIdMap<FxHashSet<TrackedValue>>,
+
+ /// A set of hir-ids of values or variables that are borrowed at some point within the body.
+ pub(super) borrowed: FxHashSet<TrackedValue>,
+
+ /// A set of hir-ids of values or variables that are borrowed at some point within the body.
+ pub(super) borrowed_temporaries: FxHashSet<HirId>,
+}
+
+/// Works with ExprUseVisitor to find interesting values for the drop range analysis.
+///
+/// Interesting values are those that are either dropped or borrowed. For dropped values, we also
+/// record the parent expression, which is the point where the drop actually takes place.
+struct ExprUseDelegate<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ places: ConsumedAndBorrowedPlaces,
+}
+
+impl<'tcx> ExprUseDelegate<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
+ Self {
+ tcx,
+ param_env,
+ places: ConsumedAndBorrowedPlaces {
+ consumed: <_>::default(),
+ borrowed: <_>::default(),
+ borrowed_temporaries: <_>::default(),
+ },
+ }
+ }
+
+ fn consume_body(&mut self, fcx: &'_ FnCtxt<'_, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>) {
+ // Run ExprUseVisitor to find where values are consumed.
+ ExprUseVisitor::new(
+ self,
+ &fcx.infcx,
+ def_id.expect_local(),
+ fcx.param_env,
+ &fcx.typeck_results.borrow(),
+ )
+ .consume_body(body);
+ }
+
+ fn mark_consumed(&mut self, consumer: HirId, target: TrackedValue) {
+ self.places.consumed.entry(consumer).or_insert_with(|| <_>::default());
+
+ debug!(?consumer, ?target, "mark_consumed");
+ self.places.consumed.get_mut(&consumer).map(|places| places.insert(target));
+ }
+
+ fn borrow_place(&mut self, place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>) {
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
+
+ // Ordinarily a value is consumed by it's parent, but in the special case of a
+ // borrowed RValue, we create a reference that lives as long as the temporary scope
+ // for that expression (typically, the innermost statement, but sometimes the enclosing
+ // block). We record this fact here so that later in generator_interior
+ // we can use the correct scope.
+ //
+ // We special case borrows through a dereference (`&*x`, `&mut *x` where `x` is
+ // some rvalue expression), since these are essentially a copy of a pointer.
+ // In other words, this borrow does not refer to the
+ // temporary (`*x`), but to the referent (whatever `x` is a borrow of).
+ //
+ // We were considering that we might encounter problems down the line if somehow,
+ // some part of the compiler were to look at this result and try to use it to
+ // drive a borrowck-like analysis (this does not currently happen, as of this writing).
+ // But even this should be fine, because the lifetime of the dereferenced reference
+ // found in the rvalue is only significant as an intermediate 'link' to the value we
+ // are producing, and we separately track whether that value is live over a yield.
+ // Example:
+ //
+ // ```notrust
+ // fn identity<T>(x: &mut T) -> &mut T { x }
+ // let a: A = ...;
+ // let y: &'y mut A = &mut *identity(&'a mut a);
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^ the borrow we are talking about
+ // ```
+ //
+ // The expression `*identity(...)` is a deref of an rvalue,
+ // where the `identity(...)` (the rvalue) produces a return type
+ // of `&'rv mut A`, where `'a: 'rv`. We then assign this result to
+ // `'y`, resulting in (transitively) `'a: 'y` (i.e., while `y` is in use,
+ // `a` will be considered borrowed). Other parts of the code will ensure
+ // that if `y` is live over a yield, `&'y mut A` appears in the generator
+ // state. If `'y` is live, then any sound region analysis must conclude
+ // that `'a` is also live. So if this causes a bug, blame some other
+ // part of the code!
+ let is_deref = place_with_id
+ .place
+ .projections
+ .iter()
+ .any(|Projection { kind, .. }| *kind == ProjectionKind::Deref);
+
+ if let (false, PlaceBase::Rvalue) = (is_deref, place_with_id.place.base) {
+ self.places.borrowed_temporaries.insert(place_with_id.hir_id);
+ }
+ }
+}
+
+impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> {
+ fn consume(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ ) {
+ let hir = self.tcx.hir();
+ let parent = match hir.find_parent_node(place_with_id.hir_id) {
+ Some(parent) => parent,
+ None => place_with_id.hir_id,
+ };
+ debug!(
+ "consume {:?}; diag_expr_id={}, using parent {}",
+ place_with_id,
+ hir.node_to_string(diag_expr_id),
+ hir.node_to_string(parent)
+ );
+ place_with_id
+ .try_into()
+ .map_or((), |tracked_value| self.mark_consumed(parent, tracked_value));
+ }
+
+ fn borrow(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ bk: rustc_middle::ty::BorrowKind,
+ ) {
+ debug!(
+ "borrow: place_with_id = {place_with_id:#?}, diag_expr_id={diag_expr_id:#?}, \
+ borrow_kind={bk:#?}"
+ );
+
+ self.borrow_place(place_with_id);
+ }
+
+ fn copy(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ _diag_expr_id: HirId,
+ ) {
+ debug!("copy: place_with_id = {place_with_id:?}");
+
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
+
+ // For copied we treat this mostly like a borrow except that we don't add the place
+ // to borrowed_temporaries because the copy is consumed.
+ }
+
+ fn mutate(
+ &mut self,
+ assignee_place: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ ) {
+ debug!("mutate {assignee_place:?}; diag_expr_id={diag_expr_id:?}");
+
+ if assignee_place.place.base == PlaceBase::Rvalue
+ && assignee_place.place.projections.is_empty()
+ {
+ // Assigning to an Rvalue is illegal unless done through a dereference. We would have
+ // already gotten a type error, so we will just return here.
+ return;
+ }
+
+ // If the type being assigned needs dropped, then the mutation counts as a borrow
+ // since it is essentially doing `Drop::drop(&mut x); x = new_value;`.
+ let ty = self.tcx.erase_regions(assignee_place.place.base_ty);
+ if ty.needs_infer() {
+ self.tcx.sess.delay_span_bug(
+ self.tcx.hir().span(assignee_place.hir_id),
+ &format!("inference variables in {ty}"),
+ );
+ } else if ty.needs_drop(self.tcx, self.param_env) {
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(assignee_place));
+ }
+ }
+
+ fn bind(
+ &mut self,
+ binding_place: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ ) {
+ debug!("bind {binding_place:?}; diag_expr_id={diag_expr_id:?}");
+ }
+
+ fn fake_read(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ cause: rustc_middle::mir::FakeReadCause,
+ diag_expr_id: HirId,
+ ) {
+ debug!(
+ "fake_read place_with_id={place_with_id:?}; cause={cause:?}; diag_expr_id={diag_expr_id:?}"
+ );
+
+ // fake reads happen in places like the scrutinee of a match expression.
+ // we treat those as a borrow, much like a copy: the idea is that we are
+ // transiently creating a `&T` ref that we can read from to observe the current
+ // value (this `&T` is immediately dropped afterwards).
+ self.borrow_place(place_with_id);
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
new file mode 100644
index 000000000..b7dd599cd
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
@@ -0,0 +1,647 @@
+//! This calculates the types which has storage which lives across a suspension point in a
+//! generator from the perspective of typeck. The actual types used at runtime
+//! is calculated in `rustc_mir_transform::generator` and may be a subset of the
+//! types computed here.
+
+use self::drop_ranges::DropRanges;
+use super::FnCtxt;
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
+use rustc_errors::{pluralize, DelayDm};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::hir_id::HirIdSet;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
+use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData};
+use rustc_middle::ty::{self, RvalueScopes, Ty, TyCtxt, TypeVisitable};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+mod drop_ranges;
+
+struct InteriorVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ region_scope_tree: &'a region::ScopeTree,
+ types: FxIndexSet<ty::GeneratorInteriorTypeCause<'tcx>>,
+ rvalue_scopes: &'a RvalueScopes,
+ expr_count: usize,
+ kind: hir::GeneratorKind,
+ prev_unresolved_span: Option<Span>,
+ linted_values: HirIdSet,
+ drop_ranges: DropRanges,
+}
+
+impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
+ fn record(
+ &mut self,
+ ty: Ty<'tcx>,
+ hir_id: HirId,
+ scope: Option<region::Scope>,
+ expr: Option<&'tcx Expr<'tcx>>,
+ source_span: Span,
+ ) {
+ use rustc_span::DUMMY_SP;
+
+ let ty = self.fcx.resolve_vars_if_possible(ty);
+
+ debug!(
+ "attempting to record type ty={:?}; hir_id={:?}; scope={:?}; expr={:?}; source_span={:?}; expr_count={:?}",
+ ty, hir_id, scope, expr, source_span, self.expr_count,
+ );
+
+ let live_across_yield = scope
+ .map(|s| {
+ self.region_scope_tree.yield_in_scope(s).and_then(|yield_data| {
+ // If we are recording an expression that is the last yield
+ // in the scope, or that has a postorder CFG index larger
+ // than the one of all of the yields, then its value can't
+ // be storage-live (and therefore live) at any of the yields.
+ //
+ // See the mega-comment at `yield_in_scope` for a proof.
+
+ yield_data
+ .iter()
+ .find(|yield_data| {
+ debug!(
+ "comparing counts yield: {} self: {}, source_span = {:?}",
+ yield_data.expr_and_pat_count, self.expr_count, source_span
+ );
+
+ if self.fcx.sess().opts.unstable_opts.drop_tracking
+ && self
+ .drop_ranges
+ .is_dropped_at(hir_id, yield_data.expr_and_pat_count)
+ {
+ debug!("value is dropped at yield point; not recording");
+ return false;
+ }
+
+ // If it is a borrowing happening in the guard,
+ // it needs to be recorded regardless because they
+ // do live across this yield point.
+ yield_data.expr_and_pat_count >= self.expr_count
+ })
+ .cloned()
+ })
+ })
+ .unwrap_or_else(|| {
+ Some(YieldData { span: DUMMY_SP, expr_and_pat_count: 0, source: self.kind.into() })
+ });
+
+ if let Some(yield_data) = live_across_yield {
+ debug!(
+ "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}",
+ expr, scope, ty, self.expr_count, yield_data.span
+ );
+
+ if let Some((unresolved_type, unresolved_type_span)) =
+ self.fcx.unresolved_type_vars(&ty)
+ {
+ // If unresolved type isn't a ty_var then unresolved_type_span is None
+ let span = self
+ .prev_unresolved_span
+ .unwrap_or_else(|| unresolved_type_span.unwrap_or(source_span));
+
+ // If we encounter an int/float variable, then inference fallback didn't
+ // finish due to some other error. Don't emit spurious additional errors.
+ if let ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) =
+ unresolved_type.kind()
+ {
+ self.fcx
+ .tcx
+ .sess
+ .delay_span_bug(span, &format!("Encountered var {:?}", unresolved_type));
+ } else {
+ let note = format!(
+ "the type is part of the {} because of this {}",
+ self.kind, yield_data.source
+ );
+
+ self.fcx
+ .need_type_info_err_in_generator(self.kind, span, unresolved_type)
+ .span_note(yield_data.span, &*note)
+ .emit();
+ }
+ } else {
+ // Insert the type into the ordered set.
+ let scope_span = scope.map(|s| s.span(self.fcx.tcx, self.region_scope_tree));
+
+ if !self.linted_values.contains(&hir_id) {
+ check_must_not_suspend_ty(
+ self.fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ expr,
+ source_span,
+ yield_span: yield_data.span,
+ plural_len: 1,
+ ..Default::default()
+ },
+ );
+ self.linted_values.insert(hir_id);
+ }
+
+ self.types.insert(ty::GeneratorInteriorTypeCause {
+ span: source_span,
+ ty,
+ scope_span,
+ yield_span: yield_data.span,
+ expr: expr.map(|e| e.hir_id),
+ });
+ }
+ } else {
+ debug!(
+ "no type in expr = {:?}, count = {:?}, span = {:?}",
+ expr,
+ self.expr_count,
+ expr.map(|e| e.span)
+ );
+ if let Some((unresolved_type, unresolved_type_span)) =
+ self.fcx.unresolved_type_vars(&ty)
+ {
+ debug!(
+ "remained unresolved_type = {:?}, unresolved_type_span: {:?}",
+ unresolved_type, unresolved_type_span
+ );
+ self.prev_unresolved_span = unresolved_type_span;
+ }
+ }
+ }
+}
+
+pub fn resolve_interior<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body_id: hir::BodyId,
+ interior: Ty<'tcx>,
+ kind: hir::GeneratorKind,
+) {
+ let body = fcx.tcx.hir().body(body_id);
+ let typeck_results = fcx.inh.typeck_results.borrow();
+ let mut visitor = InteriorVisitor {
+ fcx,
+ types: FxIndexSet::default(),
+ region_scope_tree: fcx.tcx.region_scope_tree(def_id),
+ rvalue_scopes: &typeck_results.rvalue_scopes,
+ expr_count: 0,
+ kind,
+ prev_unresolved_span: None,
+ linted_values: <_>::default(),
+ drop_ranges: drop_ranges::compute_drop_ranges(fcx, def_id, body),
+ };
+ intravisit::walk_body(&mut visitor, body);
+
+ // Check that we visited the same amount of expressions as the RegionResolutionVisitor
+ let region_expr_count = fcx.tcx.region_scope_tree(def_id).body_expr_count(body_id).unwrap();
+ assert_eq!(region_expr_count, visitor.expr_count);
+
+ // The types are already kept in insertion order.
+ let types = visitor.types;
+
+ // The types in the generator interior contain lifetimes local to the generator itself,
+ // which should not be exposed outside of the generator. Therefore, we replace these
+ // lifetimes with existentially-bound lifetimes, which reflect the exact value of the
+ // lifetimes not being known by users.
+ //
+ // These lifetimes are used in auto trait impl checking (for example,
+ // if a Sync generator contains an &'α T, we need to check whether &'α T: Sync),
+ // so knowledge of the exact relationships between them isn't particularly important.
+
+ debug!("types in generator {:?}, span = {:?}", types, body.value.span);
+
+ let mut counter = 0;
+ let mut captured_tys = FxHashSet::default();
+ let type_causes: Vec<_> = types
+ .into_iter()
+ .filter_map(|mut cause| {
+ // Erase regions and canonicalize late-bound regions to deduplicate as many types as we
+ // can.
+ let ty = fcx.normalize_associated_types_in(cause.span, cause.ty);
+ let erased = fcx.tcx.erase_regions(ty);
+ if captured_tys.insert(erased) {
+ // Replace all regions inside the generator interior with late bound regions.
+ // Note that each region slot in the types gets a new fresh late bound region,
+ // which means that none of the regions inside relate to any other, even if
+ // typeck had previously found constraints that would cause them to be related.
+ let folded = fcx.tcx.fold_regions(erased, |_, current_depth| {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(counter),
+ kind: ty::BrAnon(counter),
+ };
+ let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, br));
+ counter += 1;
+ r
+ });
+
+ cause.ty = folded;
+ Some(cause)
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ // Extract type components to build the witness type.
+ let type_list = fcx.tcx.mk_type_list(type_causes.iter().map(|cause| cause.ty));
+ let bound_vars = fcx.tcx.mk_bound_variable_kinds(
+ (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))),
+ );
+ let witness =
+ fcx.tcx.mk_generator_witness(ty::Binder::bind_with_vars(type_list, bound_vars.clone()));
+
+ drop(typeck_results);
+ // Store the generator types and spans into the typeck results for this generator.
+ fcx.inh.typeck_results.borrow_mut().generator_interior_types =
+ ty::Binder::bind_with_vars(type_causes, bound_vars);
+
+ debug!(
+ "types in generator after region replacement {:?}, span = {:?}",
+ witness, body.value.span
+ );
+
+ // Unify the type variable inside the generator with the new witness
+ match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(interior, witness) {
+ Ok(ok) => fcx.register_infer_ok_obligations(ok),
+ _ => bug!("failed to relate {interior} and {witness}"),
+ }
+}
+
+// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in
+// librustc_middle/middle/region.rs since `expr_count` is compared against the results
+// there.
+impl<'a, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'tcx> {
+ fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) {
+ let Arm { guard, pat, body, .. } = arm;
+ self.visit_pat(pat);
+ if let Some(ref g) = guard {
+ {
+ // If there is a guard, we need to count all variables bound in the pattern as
+ // borrowed for the entire guard body, regardless of whether they are accessed.
+ // We do this by walking the pattern bindings and recording `&T` for any `x: T`
+ // that is bound.
+
+ struct ArmPatCollector<'a, 'b, 'tcx> {
+ interior_visitor: &'a mut InteriorVisitor<'b, 'tcx>,
+ scope: Scope,
+ }
+
+ impl<'a, 'b, 'tcx> Visitor<'tcx> for ArmPatCollector<'a, 'b, 'tcx> {
+ fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+ if let PatKind::Binding(_, id, ident, ..) = pat.kind {
+ let ty =
+ self.interior_visitor.fcx.typeck_results.borrow().node_type(id);
+ let tcx = self.interior_visitor.fcx.tcx;
+ let ty = tcx.mk_ref(
+ // Use `ReErased` as `resolve_interior` is going to replace all the
+ // regions anyway.
+ tcx.mk_region(ty::ReErased),
+ ty::TypeAndMut { ty, mutbl: hir::Mutability::Not },
+ );
+ self.interior_visitor.record(
+ ty,
+ id,
+ Some(self.scope),
+ None,
+ ident.span,
+ );
+ }
+ }
+ }
+
+ ArmPatCollector {
+ interior_visitor: self,
+ scope: Scope { id: g.body().hir_id.local_id, data: ScopeData::Node },
+ }
+ .visit_pat(pat);
+ }
+
+ match g {
+ Guard::If(ref e) => {
+ self.visit_expr(e);
+ }
+ Guard::IfLet(ref l) => {
+ self.visit_let_expr(l);
+ }
+ }
+ }
+ self.visit_expr(body);
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+
+ self.expr_count += 1;
+
+ if let PatKind::Binding(..) = pat.kind {
+ let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id).unwrap();
+ let ty = self.fcx.typeck_results.borrow().pat_ty(pat);
+ self.record(ty, pat.hir_id, Some(scope), None, pat.span);
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ match &expr.kind {
+ ExprKind::Call(callee, args) => match &callee.kind {
+ ExprKind::Path(qpath) => {
+ let res = self.fcx.typeck_results.borrow().qpath_res(qpath, callee.hir_id);
+ match res {
+ // Direct calls never need to keep the callee `ty::FnDef`
+ // ZST in a temporary, so skip its type, just in case it
+ // can significantly complicate the generator type.
+ Res::Def(
+ DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn),
+ _,
+ ) => {
+ // NOTE(eddyb) this assumes a path expression has
+ // no nested expressions to keep track of.
+ self.expr_count += 1;
+
+ // Record the rest of the call expression normally.
+ for arg in *args {
+ self.visit_expr(arg);
+ }
+ }
+ _ => intravisit::walk_expr(self, expr),
+ }
+ }
+ _ => intravisit::walk_expr(self, expr),
+ },
+ _ => intravisit::walk_expr(self, expr),
+ }
+
+ self.expr_count += 1;
+
+ debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr));
+
+ let ty = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr);
+
+ // Typically, the value produced by an expression is consumed by its parent in some way,
+ // so we only have to check if the parent contains a yield (note that the parent may, for
+ // example, store the value into a local variable, but then we already consider local
+ // variables to be live across their scope).
+ //
+ // However, in the case of temporary values, we are going to store the value into a
+ // temporary on the stack that is live for the current temporary scope and then return a
+ // reference to it. That value may be live across the entire temporary scope.
+ //
+ // There's another subtlety: if the type has an observable drop, it must be dropped after
+ // the yield, even if it's not borrowed or referenced after the yield. Ideally this would
+ // *only* happen for types with observable drop, not all types which wrap them, but that
+ // doesn't match the behavior of MIR borrowck and causes ICEs. See the FIXME comment in
+ // src/test/ui/generator/drop-tracking-parent-expression.rs.
+ let scope = if self.drop_ranges.is_borrowed_temporary(expr)
+ || ty.map_or(true, |ty| {
+ // Avoid ICEs in needs_drop.
+ let ty = self.fcx.resolve_vars_if_possible(ty);
+ let ty = self.fcx.tcx.erase_regions(ty);
+ if ty.needs_infer() {
+ self.fcx
+ .tcx
+ .sess
+ .delay_span_bug(expr.span, &format!("inference variables in {ty}"));
+ true
+ } else {
+ ty.needs_drop(self.fcx.tcx, self.fcx.param_env)
+ }
+ }) {
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
+ } else {
+ let parent_expr = self
+ .fcx
+ .tcx
+ .hir()
+ .parent_iter(expr.hir_id)
+ .find(|(_, node)| matches!(node, hir::Node::Expr(_)))
+ .map(|(id, _)| id);
+ debug!("parent_expr: {:?}", parent_expr);
+ match parent_expr {
+ Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }),
+ None => {
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
+ }
+ }
+ };
+
+ // If there are adjustments, then record the final type --
+ // this is the actual value that is being produced.
+ if let Some(adjusted_ty) = ty {
+ self.record(adjusted_ty, expr.hir_id, scope, Some(expr), expr.span);
+ }
+
+ // Also record the unadjusted type (which is the only type if
+ // there are no adjustments). The reason for this is that the
+ // unadjusted value is sometimes a "temporary" that would wind
+ // up in a MIR temporary.
+ //
+ // As an example, consider an expression like `vec![].push(x)`.
+ // Here, the `vec![]` would wind up MIR stored into a
+ // temporary variable `t` which we can borrow to invoke
+ // `<Vec<_>>::push(&mut t, x)`.
+ //
+ // Note that an expression can have many adjustments, and we
+ // are just ignoring those intermediate types. This is because
+ // those intermediate values are always linearly "consumed" by
+ // the other adjustments, and hence would never be directly
+ // captured in the MIR.
+ //
+ // (Note that this partly relies on the fact that the `Deref`
+ // traits always return references, which means their content
+ // can be reborrowed without needing to spill to a temporary.
+ // If this were not the case, then we could conceivably have
+ // to create intermediate temporaries.)
+ //
+ // The type table might not have information for this expression
+ // if it is in a malformed scope. (#66387)
+ if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) {
+ self.record(ty, expr.hir_id, scope, Some(expr), expr.span);
+ } else {
+ self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node");
+ }
+ }
+}
+
+#[derive(Default)]
+struct SuspendCheckData<'a, 'tcx> {
+ expr: Option<&'tcx Expr<'tcx>>,
+ source_span: Span,
+ yield_span: Span,
+ descr_pre: &'a str,
+ descr_post: &'a str,
+ plural_len: usize,
+}
+
+// Returns whether it emitted a diagnostic or not
+// Note that this fn and the proceeding one are based on the code
+// for creating must_use diagnostics
+//
+// Note that this technique was chosen over things like a `Suspend` marker trait
+// as it is simpler and has precedent in the compiler
+fn check_must_not_suspend_ty<'tcx>(
+ fcx: &FnCtxt<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ hir_id: HirId,
+ data: SuspendCheckData<'_, 'tcx>,
+) -> bool {
+ if ty.is_unit()
+ // FIXME: should this check `is_ty_uninhabited_from`. This query is not available in this stage
+ // of typeck (before ReVar and RePlaceholder are removed), but may remove noise, like in
+ // `must_use`
+ // || fcx.tcx.is_ty_uninhabited_from(fcx.tcx.parent_module(hir_id).to_def_id(), ty, fcx.param_env)
+ {
+ return false;
+ }
+
+ let plural_suffix = pluralize!(data.plural_len);
+
+ debug!("Checking must_not_suspend for {}", ty);
+
+ match *ty.kind() {
+ ty::Adt(..) if ty.is_box() => {
+ let boxed_ty = ty.boxed_ty();
+ let descr_pre = &format!("{}boxed ", data.descr_pre);
+ check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data })
+ }
+ ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data),
+ // FIXME: support adding the attribute to TAITs
+ ty::Opaque(def, _) => {
+ let mut has_emitted = false;
+ for &(predicate, _) in fcx.tcx.explicit_item_bounds(def) {
+ // We only look at the `DefId`, so it is safe to skip the binder here.
+ if let ty::PredicateKind::Trait(ref poly_trait_predicate) =
+ predicate.kind().skip_binder()
+ {
+ let def_id = poly_trait_predicate.trait_ref.def_id;
+ let descr_pre = &format!("{}implementer{} of ", data.descr_pre, plural_suffix);
+ if check_must_not_suspend_def(
+ fcx.tcx,
+ def_id,
+ hir_id,
+ SuspendCheckData { descr_pre, ..data },
+ ) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Dynamic(binder, _, _) => {
+ let mut has_emitted = false;
+ for predicate in binder.iter() {
+ if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() {
+ let def_id = trait_ref.def_id;
+ let descr_post = &format!(" trait object{}{}", plural_suffix, data.descr_post);
+ if check_must_not_suspend_def(
+ fcx.tcx,
+ def_id,
+ hir_id,
+ SuspendCheckData { descr_post, ..data },
+ ) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Tuple(fields) => {
+ let mut has_emitted = false;
+ let comps = match data.expr.map(|e| &e.kind) {
+ Some(hir::ExprKind::Tup(comps)) => {
+ debug_assert_eq!(comps.len(), fields.len());
+ Some(comps)
+ }
+ _ => None,
+ };
+ for (i, ty) in fields.iter().enumerate() {
+ let descr_post = &format!(" in tuple element {i}");
+ let span = comps.and_then(|c| c.get(i)).map(|e| e.span).unwrap_or(data.source_span);
+ if check_must_not_suspend_ty(
+ fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ descr_post,
+ expr: comps.and_then(|comps| comps.get(i)),
+ source_span: span,
+ ..data
+ },
+ ) {
+ has_emitted = true;
+ }
+ }
+ has_emitted
+ }
+ ty::Array(ty, len) => {
+ let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix);
+ check_must_not_suspend_ty(
+ fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ descr_pre,
+ plural_len: len.try_eval_usize(fcx.tcx, fcx.param_env).unwrap_or(0) as usize
+ + 1,
+ ..data
+ },
+ )
+ }
+ // If drop tracking is enabled, we want to look through references, since the referrent
+ // may not be considered live across the await point.
+ ty::Ref(_region, ty, _mutability) if fcx.sess().opts.unstable_opts.drop_tracking => {
+ let descr_pre = &format!("{}reference{} to ", data.descr_pre, plural_suffix);
+ check_must_not_suspend_ty(fcx, ty, hir_id, SuspendCheckData { descr_pre, ..data })
+ }
+ _ => false,
+ }
+}
+
+fn check_must_not_suspend_def(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ hir_id: HirId,
+ data: SuspendCheckData<'_, '_>,
+) -> bool {
+ if let Some(attr) = tcx.get_attr(def_id, sym::must_not_suspend) {
+ tcx.struct_span_lint_hir(
+ rustc_session::lint::builtin::MUST_NOT_SUSPEND,
+ hir_id,
+ data.source_span,
+ DelayDm(|| {
+ format!(
+ "{}`{}`{} held across a suspend point, but should not be",
+ data.descr_pre,
+ tcx.def_path_str(def_id),
+ data.descr_post,
+ )
+ }),
+ |lint| {
+ // add span pointing to the offending yield/await
+ lint.span_label(data.yield_span, "the value is held across this suspend point");
+
+ // Add optional reason note
+ if let Some(note) = attr.value_str() {
+ // FIXME(guswynn): consider formatting this better
+ lint.span_note(data.source_span, note.as_str());
+ }
+
+ // Add some quick suggestions on what to do
+ // FIXME: can `drop` work as a suggestion here as well?
+ lint.span_help(
+ data.source_span,
+ "consider using a block (`{ ... }`) \
+ to shrink the value's scope, ending before the suspend point",
+ );
+
+ lint
+ },
+ );
+
+ true
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/inherited.rs b/compiler/rustc_hir_typeck/src/inherited.rs
new file mode 100644
index 000000000..0fb7651b3
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/inherited.rs
@@ -0,0 +1,213 @@
+use super::callee::DeferredCallResolution;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::HirIdMap;
+use rustc_infer::infer;
+use rustc_infer::infer::{DefiningAnchor, InferCtxt, InferOk, TyCtxtInferExt};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::LocalDefIdMap;
+use rustc_span::{self, Span};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCtxt, TraitEngine, TraitEngineExt as _,
+};
+
+use std::cell::RefCell;
+use std::ops::Deref;
+
+/// Closures defined within the function. For example:
+/// ```ignore (illustrative)
+/// fn foo() {
+/// bar(move|| { ... })
+/// }
+/// ```
+/// Here, the function `foo()` and the closure passed to
+/// `bar()` will each have their own `FnCtxt`, but they will
+/// share the inherited fields.
+pub struct Inherited<'tcx> {
+ pub(super) infcx: InferCtxt<'tcx>,
+
+ pub(super) typeck_results: RefCell<ty::TypeckResults<'tcx>>,
+
+ pub(super) locals: RefCell<HirIdMap<super::LocalTy<'tcx>>>,
+
+ pub(super) fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>,
+
+ // Some additional `Sized` obligations badly affect type inference.
+ // These obligations are added in a later stage of typeck.
+ // Removing these may also cause additional complications, see #101066.
+ pub(super) deferred_sized_obligations:
+ RefCell<Vec<(Ty<'tcx>, Span, traits::ObligationCauseCode<'tcx>)>>,
+
+ // When we process a call like `c()` where `c` is a closure type,
+ // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
+ // `FnOnce` closure. In that case, we defer full resolution of the
+ // call until upvar inference can kick in and make the
+ // decision. We keep these deferred resolutions grouped by the
+ // def-id of the closure, so that once we decide, we can easily go
+ // back and process them.
+ pub(super) deferred_call_resolutions: RefCell<LocalDefIdMap<Vec<DeferredCallResolution<'tcx>>>>,
+
+ pub(super) deferred_cast_checks: RefCell<Vec<super::cast::CastCheck<'tcx>>>,
+
+ pub(super) deferred_transmute_checks: RefCell<Vec<(Ty<'tcx>, Ty<'tcx>, hir::HirId)>>,
+
+ pub(super) deferred_asm_checks: RefCell<Vec<(&'tcx hir::InlineAsm<'tcx>, hir::HirId)>>,
+
+ pub(super) deferred_generator_interiors:
+ RefCell<Vec<(hir::BodyId, Ty<'tcx>, hir::GeneratorKind)>>,
+
+ pub(super) body_id: Option<hir::BodyId>,
+
+ /// Whenever we introduce an adjustment from `!` into a type variable,
+ /// we record that type variable here. This is later used to inform
+ /// fallback. See the `fallback` module for details.
+ pub(super) diverging_type_vars: RefCell<FxHashSet<Ty<'tcx>>>,
+}
+
+impl<'tcx> Deref for Inherited<'tcx> {
+ type Target = InferCtxt<'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.infcx
+ }
+}
+
+/// A temporary returned by `Inherited::build(...)`. This is necessary
+/// for multiple `InferCtxt` to share the same `typeck_results`
+/// without using `Rc` or something similar.
+pub struct InheritedBuilder<'tcx> {
+ infcx: infer::InferCtxtBuilder<'tcx>,
+ def_id: LocalDefId,
+ typeck_results: RefCell<ty::TypeckResults<'tcx>>,
+}
+
+impl<'tcx> Inherited<'tcx> {
+ pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> {
+ let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner;
+
+ InheritedBuilder {
+ infcx: tcx
+ .infer_ctxt()
+ .ignoring_regions()
+ .with_opaque_type_inference(DefiningAnchor::Bind(hir_owner.def_id))
+ .with_normalize_fn_sig_for_diagnostic(Lrc::new(move |infcx, fn_sig| {
+ if fn_sig.has_escaping_bound_vars() {
+ return fn_sig;
+ }
+ infcx.probe(|_| {
+ let ocx = ObligationCtxt::new_in_snapshot(infcx);
+ let normalized_fn_sig = ocx.normalize(
+ ObligationCause::dummy(),
+ // FIXME(compiler-errors): This is probably not the right param-env...
+ infcx.tcx.param_env(def_id),
+ fn_sig,
+ );
+ if ocx.select_all_or_error().is_empty() {
+ let normalized_fn_sig =
+ infcx.resolve_vars_if_possible(normalized_fn_sig);
+ if !normalized_fn_sig.needs_infer() {
+ return normalized_fn_sig;
+ }
+ }
+ fn_sig
+ })
+ })),
+ def_id,
+ typeck_results: RefCell::new(ty::TypeckResults::new(hir_owner)),
+ }
+ }
+}
+
+impl<'tcx> InheritedBuilder<'tcx> {
+ pub fn enter<F, R>(mut self, f: F) -> R
+ where
+ F: FnOnce(&Inherited<'tcx>) -> R,
+ {
+ let def_id = self.def_id;
+ f(&Inherited::new(self.infcx.build(), def_id, self.typeck_results))
+ }
+}
+
+impl<'tcx> Inherited<'tcx> {
+ fn new(
+ infcx: InferCtxt<'tcx>,
+ def_id: LocalDefId,
+ typeck_results: RefCell<ty::TypeckResults<'tcx>>,
+ ) -> Self {
+ let tcx = infcx.tcx;
+ let body_id = tcx.hir().maybe_body_owned_by(def_id);
+
+ Inherited {
+ typeck_results,
+ infcx,
+ fulfillment_cx: RefCell::new(<dyn TraitEngine<'_>>::new(tcx)),
+ locals: RefCell::new(Default::default()),
+ deferred_sized_obligations: RefCell::new(Vec::new()),
+ deferred_call_resolutions: RefCell::new(Default::default()),
+ deferred_cast_checks: RefCell::new(Vec::new()),
+ deferred_transmute_checks: RefCell::new(Vec::new()),
+ deferred_asm_checks: RefCell::new(Vec::new()),
+ deferred_generator_interiors: RefCell::new(Vec::new()),
+ diverging_type_vars: RefCell::new(Default::default()),
+ body_id,
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
+ if obligation.has_escaping_bound_vars() {
+ span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation);
+ }
+ self.fulfillment_cx.borrow_mut().register_predicate_obligation(self, obligation);
+ }
+
+ pub(super) fn register_predicates<I>(&self, obligations: I)
+ where
+ I: IntoIterator<Item = traits::PredicateObligation<'tcx>>,
+ {
+ for obligation in obligations {
+ self.register_predicate(obligation);
+ }
+ }
+
+ pub(super) fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
+ self.register_predicates(infer_ok.obligations);
+ infer_ok.value
+ }
+
+ pub(super) fn normalize_associated_types_in<T>(
+ &self,
+ span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.normalize_associated_types_in_with_cause(
+ ObligationCause::misc(span, body_id),
+ param_env,
+ value,
+ )
+ }
+
+ pub(super) fn normalize_associated_types_in_with_cause<T>(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let ok = self.partially_normalize_associated_types_in(cause, param_env, value);
+ debug!(?ok);
+ self.register_infer_ok_obligations(ok)
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/intrinsicck.rs b/compiler/rustc_hir_typeck/src/intrinsicck.rs
new file mode 100644
index 000000000..9812d96fc
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/intrinsicck.rs
@@ -0,0 +1,108 @@
+use hir::HirId;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_index::vec::Idx;
+use rustc_middle::ty::layout::{LayoutError, SizeSkeleton};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::{Pointer, VariantIdx};
+
+use super::FnCtxt;
+
+/// If the type is `Option<T>`, it will return `T`, otherwise
+/// the type itself. Works on most `Option`-like types.
+fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let ty::Adt(def, substs) = *ty.kind() else { return ty };
+
+ if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() {
+ let data_idx;
+
+ let one = VariantIdx::new(1);
+ let zero = VariantIdx::new(0);
+
+ if def.variant(zero).fields.is_empty() {
+ data_idx = one;
+ } else if def.variant(one).fields.is_empty() {
+ data_idx = zero;
+ } else {
+ return ty;
+ }
+
+ if def.variant(data_idx).fields.len() == 1 {
+ return def.variant(data_idx).fields[0].ty(tcx, substs);
+ }
+ }
+
+ ty
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn check_transmute(&self, from: Ty<'tcx>, to: Ty<'tcx>, hir_id: HirId) {
+ let tcx = self.tcx;
+ let span = tcx.hir().span(hir_id);
+ let normalize = |ty| {
+ let ty = self.resolve_vars_if_possible(ty);
+ self.tcx.normalize_erasing_regions(self.param_env, ty)
+ };
+ let from = normalize(from);
+ let to = normalize(to);
+ trace!(?from, ?to);
+
+ // Transmutes that are only changing lifetimes are always ok.
+ if from == to {
+ return;
+ }
+
+ let skel = |ty| SizeSkeleton::compute(ty, tcx, self.param_env);
+ let sk_from = skel(from);
+ let sk_to = skel(to);
+ trace!(?sk_from, ?sk_to);
+
+ // Check for same size using the skeletons.
+ if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) {
+ if sk_from.same_size(sk_to) {
+ return;
+ }
+
+ // Special-case transmuting from `typeof(function)` and
+ // `Option<typeof(function)>` to present a clearer error.
+ let from = unpack_option_like(tcx, from);
+ if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&tcx) {
+ struct_span_err!(tcx.sess, span, E0591, "can't transmute zero-sized type")
+ .note(&format!("source type: {from}"))
+ .note(&format!("target type: {to}"))
+ .help("cast with `as` to a pointer instead")
+ .emit();
+ return;
+ }
+ }
+
+ // Try to display a sensible error with as much information as possible.
+ let skeleton_string = |ty: Ty<'tcx>, sk| match sk {
+ Ok(SizeSkeleton::Known(size)) => format!("{} bits", size.bits()),
+ Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"),
+ Err(LayoutError::Unknown(bad)) => {
+ if bad == ty {
+ "this type does not have a fixed size".to_owned()
+ } else {
+ format!("size can vary because of {bad}")
+ }
+ }
+ Err(err) => err.to_string(),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0512,
+ "cannot transmute between types of different sizes, \
+ or dependently-sized types"
+ );
+ if from == to {
+ err.note(&format!("`{from}` does not have a fixed size"));
+ } else {
+ err.note(&format!("source type: `{}` ({})", from, skeleton_string(from, sk_from)))
+ .note(&format!("target type: `{}` ({})", to, skeleton_string(to, sk_to)));
+ }
+ err.emit();
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
new file mode 100644
index 000000000..959c54866
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -0,0 +1,507 @@
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(try_blocks)]
+#![feature(never_type)]
+#![feature(min_specialization)]
+#![feature(control_flow_enum)]
+#![feature(drain_filter)]
+#![allow(rustc::potential_query_instability)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+
+#[macro_use]
+extern crate rustc_middle;
+
+mod _match;
+mod autoderef;
+mod callee;
+// Used by clippy;
+pub mod cast;
+mod check;
+mod closure;
+mod coercion;
+mod demand;
+mod diverges;
+mod errors;
+mod expectation;
+mod expr;
+// Used by clippy;
+pub mod expr_use_visitor;
+mod fallback;
+mod fn_ctxt;
+mod gather_locals;
+mod generator_interior;
+mod inherited;
+mod intrinsicck;
+mod mem_categorization;
+mod method;
+mod op;
+mod pat;
+mod place_op;
+mod rvalue_scopes;
+mod upvar;
+mod writeback;
+
+pub use diverges::Diverges;
+pub use expectation::Expectation;
+pub use fn_ctxt::*;
+pub use inherited::{Inherited, InheritedBuilder};
+
+use crate::check::check_fn;
+use crate::coercion::DynamicCoerceMany;
+use crate::gather_locals::GatherLocalsVisitor;
+use rustc_data_structures::unord::UnordSet;
+use rustc_errors::{struct_span_err, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{HirIdMap, Node};
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_hir_analysis::check::check_abi;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::traits;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::config;
+use rustc_session::Session;
+use rustc_span::def_id::{DefId, LocalDefId};
+use rustc_span::Span;
+
+#[macro_export]
+macro_rules! type_error_struct {
+ ($session:expr, $span:expr, $typ:expr, $code:ident, $($message:tt)*) => ({
+ let mut err = rustc_errors::struct_span_err!($session, $span, $code, $($message)*);
+
+ if $typ.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ err
+ })
+}
+
+/// The type of a local binding, including the revealed type for anon types.
+#[derive(Copy, Clone, Debug)]
+pub struct LocalTy<'tcx> {
+ decl_ty: Ty<'tcx>,
+ revealed_ty: Ty<'tcx>,
+}
+
+#[derive(Copy, Clone)]
+pub struct UnsafetyState {
+ pub def: hir::HirId,
+ pub unsafety: hir::Unsafety,
+ from_fn: bool,
+}
+
+impl UnsafetyState {
+ pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState {
+ UnsafetyState { def, unsafety, from_fn: true }
+ }
+
+ pub fn recurse(self, blk: &hir::Block<'_>) -> UnsafetyState {
+ use hir::BlockCheckMode;
+ match self.unsafety {
+ // If this unsafe, then if the outer function was already marked as
+ // unsafe we shouldn't attribute the unsafe'ness to the block. This
+ // way the block can be warned about instead of ignoring this
+ // extraneous block (functions are never warned about).
+ hir::Unsafety::Unsafe if self.from_fn => self,
+
+ unsafety => {
+ let (unsafety, def) = match blk.rules {
+ BlockCheckMode::UnsafeBlock(..) => (hir::Unsafety::Unsafe, blk.hir_id),
+ BlockCheckMode::DefaultBlock => (unsafety, self.def),
+ };
+ UnsafetyState { def, unsafety, from_fn: false }
+ }
+ }
+ }
+}
+
+/// If this `DefId` is a "primary tables entry", returns
+/// `Some((body_id, body_ty, fn_sig))`. Otherwise, returns `None`.
+///
+/// If this function returns `Some`, then `typeck_results(def_id)` will
+/// succeed; if it returns `None`, then `typeck_results(def_id)` may or
+/// may not succeed. In some cases where this function returns `None`
+/// (notably closures), `typeck_results(def_id)` would wind up
+/// redirecting to the owning function.
+fn primary_body_of(
+ tcx: TyCtxt<'_>,
+ id: hir::HirId,
+) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> {
+ match tcx.hir().get(id) {
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Const(ty, body) | hir::ItemKind::Static(ty, _, body) => {
+ Some((body, Some(ty), None))
+ }
+ hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(sig))),
+ _ => None,
+ },
+ Node::TraitItem(item) => match item.kind {
+ hir::TraitItemKind::Const(ty, Some(body)) => Some((body, Some(ty), None)),
+ hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
+ Some((body, None, Some(sig)))
+ }
+ _ => None,
+ },
+ Node::ImplItem(item) => match item.kind {
+ hir::ImplItemKind::Const(ty, body) => Some((body, Some(ty), None)),
+ hir::ImplItemKind::Fn(ref sig, body) => Some((body, None, Some(sig))),
+ _ => None,
+ },
+ Node::AnonConst(constant) => Some((constant.body, None, None)),
+ _ => None,
+ }
+}
+
+fn has_typeck_results(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ // Closures' typeck results come from their outermost function,
+ // as they are part of the same "inference environment".
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ if typeck_root_def_id != def_id {
+ return tcx.has_typeck_results(typeck_root_def_id);
+ }
+
+ if let Some(def_id) = def_id.as_local() {
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ primary_body_of(tcx, id).is_some()
+ } else {
+ false
+ }
+}
+
+fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &UnordSet<LocalDefId> {
+ &*tcx.typeck(def_id).used_trait_imports
+}
+
+fn typeck_item_bodies(tcx: TyCtxt<'_>, (): ()) {
+ tcx.hir().par_body_owners(|body_owner_def_id| tcx.ensure().typeck(body_owner_def_id));
+}
+
+fn typeck_const_arg<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (did, param_did): (LocalDefId, DefId),
+) -> &ty::TypeckResults<'tcx> {
+ let fallback = move || tcx.type_of(param_did);
+ typeck_with_fallback(tcx, did, fallback)
+}
+
+fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
+ if let Some(param_did) = tcx.opt_const_param_of(def_id) {
+ tcx.typeck_const_arg((def_id, param_did))
+ } else {
+ let fallback = move || tcx.type_of(def_id.to_def_id());
+ typeck_with_fallback(tcx, def_id, fallback)
+ }
+}
+
+/// Used only to get `TypeckResults` for type inference during error recovery.
+/// Currently only used for type inference of `static`s and `const`s to avoid type cycle errors.
+fn diagnostic_only_typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
+ let fallback = move || {
+ let span = tcx.hir().span(tcx.hir().local_def_id_to_hir_id(def_id));
+ tcx.ty_error_with_message(span, "diagnostic only typeck table used")
+ };
+ typeck_with_fallback(tcx, def_id, fallback)
+}
+
+fn typeck_with_fallback<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ fallback: impl Fn() -> Ty<'tcx> + 'tcx,
+) -> &'tcx ty::TypeckResults<'tcx> {
+ // Closures' typeck results come from their outermost function,
+ // as they are part of the same "inference environment".
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()).expect_local();
+ if typeck_root_def_id != def_id {
+ return tcx.typeck(typeck_root_def_id);
+ }
+
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let span = tcx.hir().span(id);
+
+ // Figure out what primary body this item has.
+ let (body_id, body_ty, fn_sig) = primary_body_of(tcx, id).unwrap_or_else(|| {
+ span_bug!(span, "can't type-check body of {:?}", def_id);
+ });
+ let body = tcx.hir().body(body_id);
+
+ let typeck_results = Inherited::build(tcx, def_id).enter(|inh| {
+ let param_env = tcx.param_env(def_id);
+ let mut fcx = if let Some(hir::FnSig { header, decl, .. }) = fn_sig {
+ let fn_sig = if rustc_hir_analysis::collect::get_infer_ret_ty(&decl.output).is_some() {
+ let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
+ <dyn AstConv<'_>>::ty_of_fn(&fcx, id, header.unsafety, header.abi, decl, None, None)
+ } else {
+ tcx.fn_sig(def_id)
+ };
+
+ check_abi(tcx, id, span, fn_sig.abi());
+
+ // Compute the function signature from point of view of inside the fn.
+ let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig);
+ let fn_sig = inh.normalize_associated_types_in(
+ body.value.span,
+ body_id.hir_id,
+ param_env,
+ fn_sig,
+ );
+ check_fn(&inh, param_env, fn_sig, decl, id, body, None, true).0
+ } else {
+ let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
+ let expected_type = body_ty
+ .and_then(|ty| match ty.kind {
+ hir::TyKind::Infer => Some(<dyn AstConv<'_>>::ast_ty_to_ty(&fcx, ty)),
+ _ => None,
+ })
+ .unwrap_or_else(|| match tcx.hir().get(id) {
+ Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(id)) {
+ Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::ConstBlock(ref anon_const),
+ ..
+ }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ }),
+ Node::Ty(&hir::Ty {
+ kind: hir::TyKind::Typeof(ref anon_const), ..
+ }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ }),
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::InlineAsm(asm), .. })
+ | Node::Item(&hir::Item { kind: hir::ItemKind::GlobalAsm(asm), .. }) => {
+ let operand_ty = asm
+ .operands
+ .iter()
+ .filter_map(|(op, _op_sp)| match op {
+ hir::InlineAsmOperand::Const { anon_const }
+ if anon_const.hir_id == id =>
+ {
+ // Inline assembly constants must be integers.
+ Some(fcx.next_int_var())
+ }
+ hir::InlineAsmOperand::SymFn { anon_const }
+ if anon_const.hir_id == id =>
+ {
+ Some(fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span,
+ }))
+ }
+ _ => None,
+ })
+ .next();
+ operand_ty.unwrap_or_else(fallback)
+ }
+ _ => fallback(),
+ },
+ _ => fallback(),
+ });
+
+ let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type);
+ fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
+
+ // Gather locals in statics (because of block expressions).
+ GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ fcx.check_expr_coercable_to_type(&body.value, expected_type, None);
+
+ fcx.write_ty(id, expected_type);
+
+ fcx
+ };
+
+ let fallback_has_occurred = fcx.type_inference_fallback();
+
+ // Even though coercion casts provide type hints, we check casts after fallback for
+ // backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
+ fcx.check_casts();
+ fcx.select_obligations_where_possible(fallback_has_occurred, |_| {});
+
+ // Closure and generator analysis may run after fallback
+ // because they don't constrain other type variables.
+ // Closure analysis only runs on closures. Therefore they only need to fulfill non-const predicates (as of now)
+ let prev_constness = fcx.param_env.constness();
+ fcx.param_env = fcx.param_env.without_const();
+ fcx.closure_analyze(body);
+ fcx.param_env = fcx.param_env.with_constness(prev_constness);
+ assert!(fcx.deferred_call_resolutions.borrow().is_empty());
+ // Before the generator analysis, temporary scopes shall be marked to provide more
+ // precise information on types to be captured.
+ fcx.resolve_rvalue_scopes(def_id.to_def_id());
+ fcx.resolve_generator_interiors(def_id.to_def_id());
+
+ for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) {
+ let ty = fcx.normalize_ty(span, ty);
+ fcx.require_type_is_sized(ty, span, code);
+ }
+
+ fcx.select_all_obligations_or_error();
+
+ if !fcx.infcx.is_tainted_by_errors() {
+ fcx.check_transmutes();
+ }
+
+ fcx.check_asms();
+
+ fcx.infcx.skip_region_resolution();
+
+ fcx.resolve_type_vars_in_body(body)
+ });
+
+ // Consistency check our TypeckResults instance can hold all ItemLocalIds
+ // it will need to hold.
+ assert_eq!(typeck_results.hir_owner, id.owner);
+
+ typeck_results
+}
+
+/// When `check_fn` is invoked on a generator (i.e., a body that
+/// includes yield), it returns back some information about the yield
+/// points.
+struct GeneratorTypes<'tcx> {
+ /// Type of generator argument / values returned by `yield`.
+ resume_ty: Ty<'tcx>,
+
+ /// Type of value that is yielded.
+ yield_ty: Ty<'tcx>,
+
+ /// Types that are captured (see `GeneratorInterior` for more).
+ interior: Ty<'tcx>,
+
+ /// Indicates if the generator is movable or static (immovable).
+ movability: hir::Movability,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Needs {
+ MutPlace,
+ None,
+}
+
+impl Needs {
+ fn maybe_mut_place(m: hir::Mutability) -> Self {
+ match m {
+ hir::Mutability::Mut => Needs::MutPlace,
+ hir::Mutability::Not => Needs::None,
+ }
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub enum PlaceOp {
+ Deref,
+ Index,
+}
+
+pub struct BreakableCtxt<'tcx> {
+ may_break: bool,
+
+ // this is `null` for loops where break with a value is illegal,
+ // such as `while`, `for`, and `while let`
+ coerce: Option<DynamicCoerceMany<'tcx>>,
+}
+
+pub struct EnclosingBreakables<'tcx> {
+ stack: Vec<BreakableCtxt<'tcx>>,
+ by_id: HirIdMap<usize>,
+}
+
+impl<'tcx> EnclosingBreakables<'tcx> {
+ fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'tcx> {
+ self.opt_find_breakable(target_id).unwrap_or_else(|| {
+ bug!("could not find enclosing breakable with id {}", target_id);
+ })
+ }
+
+ fn opt_find_breakable(&mut self, target_id: hir::HirId) -> Option<&mut BreakableCtxt<'tcx>> {
+ match self.by_id.get(&target_id) {
+ Some(ix) => Some(&mut self.stack[*ix]),
+ None => None,
+ }
+ }
+}
+
+fn report_unexpected_variant_res(tcx: TyCtxt<'_>, res: Res, qpath: &hir::QPath<'_>, span: Span) {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0533,
+ "expected unit struct, unit variant or constant, found {} `{}`",
+ res.descr(),
+ rustc_hir_pretty::qpath_to_string(qpath),
+ )
+ .emit();
+}
+
+/// Controls whether the arguments are tupled. This is used for the call
+/// operator.
+///
+/// Tupling means that all call-side arguments are packed into a tuple and
+/// passed as a single parameter. For example, if tupling is enabled, this
+/// function:
+/// ```
+/// fn f(x: (isize, isize)) {}
+/// ```
+/// Can be called as:
+/// ```ignore UNSOLVED (can this be done in user code?)
+/// # fn f(x: (isize, isize)) {}
+/// f(1, 2);
+/// ```
+/// Instead of:
+/// ```
+/// # fn f(x: (isize, isize)) {}
+/// f((1, 2));
+/// ```
+#[derive(Clone, Eq, PartialEq)]
+enum TupleArgumentsFlag {
+ DontTupleArguments,
+ TupleArguments,
+}
+
+fn fatally_break_rust(sess: &Session) {
+ let handler = sess.diagnostic();
+ handler.span_bug_no_panic(
+ MultiSpan::new(),
+ "It looks like you're trying to break rust; would you like some ICE?",
+ );
+ handler.note_without_error("the compiler expectedly panicked. this is a feature.");
+ handler.note_without_error(
+ "we would appreciate a joke overview: \
+ https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675",
+ );
+ handler.note_without_error(&format!(
+ "rustc {} running on {}",
+ option_env!("CFG_VERSION").unwrap_or("unknown_version"),
+ config::host_triple(),
+ ));
+}
+
+fn has_expected_num_generic_args<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_did: Option<DefId>,
+ expected: usize,
+) -> bool {
+ trait_did.map_or(true, |trait_did| {
+ let generics = tcx.generics_of(trait_did);
+ generics.count() == expected + if generics.has_self { 1 } else { 0 }
+ })
+}
+
+pub fn provide(providers: &mut Providers) {
+ method::provide(providers);
+ *providers = Providers {
+ typeck_item_bodies,
+ typeck_const_arg,
+ typeck,
+ diagnostic_only_typeck,
+ has_typeck_results,
+ used_trait_imports,
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_hir_typeck/src/mem_categorization.rs b/compiler/rustc_hir_typeck/src/mem_categorization.rs
new file mode 100644
index 000000000..362f1c343
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/mem_categorization.rs
@@ -0,0 +1,786 @@
+//! # Categorization
+//!
+//! The job of the categorization module is to analyze an expression to
+//! determine what kind of memory is used in evaluating it (for example,
+//! where dereferences occur and what kind of pointer is dereferenced;
+//! whether the memory is mutable, etc.).
+//!
+//! Categorization effectively transforms all of our expressions into
+//! expressions of the following forms (the actual enum has many more
+//! possibilities, naturally, but they are all variants of these base
+//! forms):
+//! ```ignore (not-rust)
+//! E = rvalue // some computed rvalue
+//! | x // address of a local variable or argument
+//! | *E // deref of a ptr
+//! | E.comp // access to an interior component
+//! ```
+//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
+//! address where the result is to be found. If Expr is a place, then this
+//! is the address of the place. If `Expr` is an rvalue, this is the address of
+//! some temporary spot in memory where the result is stored.
+//!
+//! Now, `cat_expr()` classifies the expression `Expr` and the address `A = ToAddr(Expr)`
+//! as follows:
+//!
+//! - `cat`: what kind of expression was this? This is a subset of the
+//! full expression forms which only includes those that we care about
+//! for the purpose of the analysis.
+//! - `mutbl`: mutability of the address `A`.
+//! - `ty`: the type of data found at the address `A`.
+//!
+//! The resulting categorization tree differs somewhat from the expressions
+//! themselves. For example, auto-derefs are explicit. Also, an index `a[b]` is
+//! decomposed into two operations: a dereference to reach the array data and
+//! then an index to jump forward to the relevant item.
+//!
+//! ## By-reference upvars
+//!
+//! One part of the codegen which may be non-obvious is that we translate
+//! closure upvars into the dereference of a borrowed pointer; this more closely
+//! resembles the runtime codegen. So, for example, if we had:
+//!
+//! let mut x = 3;
+//! let y = 5;
+//! let inc = || x += y;
+//!
+//! Then when we categorize `x` (*within* the closure) we would yield a
+//! result of `*x'`, effectively, where `x'` is a `Categorization::Upvar` reference
+//! tied to `x`. The type of `x'` will be a borrowed pointer.
+
+use rustc_middle::hir::place::*;
+use rustc_middle::ty::adjustment;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::pat_util::EnumerateAndAdjustIterator;
+use rustc_hir::PatKind;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::InferCtxt;
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use rustc_trait_selection::infer::InferCtxtExt;
+
+pub(crate) trait HirNode {
+ fn hir_id(&self) -> hir::HirId;
+ fn span(&self) -> Span;
+}
+
+impl HirNode for hir::Expr<'_> {
+ fn hir_id(&self) -> hir::HirId {
+ self.hir_id
+ }
+ fn span(&self) -> Span {
+ self.span
+ }
+}
+
+impl HirNode for hir::Pat<'_> {
+ fn hir_id(&self) -> hir::HirId {
+ self.hir_id
+ }
+ fn span(&self) -> Span {
+ self.span
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct MemCategorizationContext<'a, 'tcx> {
+ pub(crate) typeck_results: &'a ty::TypeckResults<'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_owner: LocalDefId,
+ upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
+}
+
+pub(crate) type McResult<T> = Result<T, ()>;
+
+impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
+ /// Creates a `MemCategorizationContext`.
+ pub(crate) fn new(
+ infcx: &'a InferCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_owner: LocalDefId,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> MemCategorizationContext<'a, 'tcx> {
+ MemCategorizationContext {
+ typeck_results,
+ infcx,
+ param_env,
+ body_owner,
+ upvars: infcx.tcx.upvars_mentioned(body_owner),
+ }
+ }
+
+ pub(crate) fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ pub(crate) fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span)
+ }
+
+ fn resolve_vars_if_possible<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.infcx.resolve_vars_if_possible(value)
+ }
+
+ fn is_tainted_by_errors(&self) -> bool {
+ self.infcx.is_tainted_by_errors()
+ }
+
+ fn resolve_type_vars_or_error(
+ &self,
+ id: hir::HirId,
+ ty: Option<Ty<'tcx>>,
+ ) -> McResult<Ty<'tcx>> {
+ match ty {
+ Some(ty) => {
+ let ty = self.resolve_vars_if_possible(ty);
+ if ty.references_error() || ty.is_ty_var() {
+ debug!("resolve_type_vars_or_error: error from {:?}", ty);
+ Err(())
+ } else {
+ Ok(ty)
+ }
+ }
+ // FIXME
+ None if self.is_tainted_by_errors() => Err(()),
+ None => {
+ bug!(
+ "no type for node {}: {} in mem_categorization",
+ id,
+ self.tcx().hir().node_to_string(id)
+ );
+ }
+ }
+ }
+
+ pub(crate) fn node_ty(&self, hir_id: hir::HirId) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(hir_id, self.typeck_results.node_type_opt(hir_id))
+ }
+
+ fn expr_ty(&self, expr: &hir::Expr<'_>) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(expr.hir_id, self.typeck_results.expr_ty_opt(expr))
+ }
+
+ pub(crate) fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(expr.hir_id, self.typeck_results.expr_ty_adjusted_opt(expr))
+ }
+
+ /// Returns the type of value that this pattern matches against.
+ /// Some non-obvious cases:
+ ///
+ /// - a `ref x` binding matches against a value of type `T` and gives
+ /// `x` the type `&T`; we return `T`.
+ /// - a pattern with implicit derefs (thanks to default binding
+ /// modes #42640) may look like `Some(x)` but in fact have
+ /// implicit deref patterns attached (e.g., it is really
+ /// `&Some(x)`). In that case, we return the "outermost" type
+ /// (e.g., `&Option<T>`).
+ pub(crate) fn pat_ty_adjusted(&self, pat: &hir::Pat<'_>) -> McResult<Ty<'tcx>> {
+ // Check for implicit `&` types wrapping the pattern; note
+ // that these are never attached to binding patterns, so
+ // actually this is somewhat "disjoint" from the code below
+ // that aims to account for `ref x`.
+ if let Some(vec) = self.typeck_results.pat_adjustments().get(pat.hir_id) {
+ if let Some(first_ty) = vec.first() {
+ debug!("pat_ty(pat={:?}) found adjusted ty `{:?}`", pat, first_ty);
+ return Ok(*first_ty);
+ }
+ }
+
+ self.pat_ty_unadjusted(pat)
+ }
+
+ /// Like `pat_ty`, but ignores implicit `&` patterns.
+ fn pat_ty_unadjusted(&self, pat: &hir::Pat<'_>) -> McResult<Ty<'tcx>> {
+ let base_ty = self.node_ty(pat.hir_id)?;
+ debug!("pat_ty(pat={:?}) base_ty={:?}", pat, base_ty);
+
+ // This code detects whether we are looking at a `ref x`,
+ // and if so, figures out what the type *being borrowed* is.
+ let ret_ty = match pat.kind {
+ PatKind::Binding(..) => {
+ let bm = *self
+ .typeck_results
+ .pat_binding_modes()
+ .get(pat.hir_id)
+ .expect("missing binding mode");
+
+ if let ty::BindByReference(_) = bm {
+ // a bind-by-ref means that the base_ty will be the type of the ident itself,
+ // but what we want here is the type of the underlying value being borrowed.
+ // So peel off one-level, turning the &T into T.
+ match base_ty.builtin_deref(false) {
+ Some(t) => t.ty,
+ None => {
+ debug!("By-ref binding of non-derefable type {:?}", base_ty);
+ return Err(());
+ }
+ }
+ } else {
+ base_ty
+ }
+ }
+ _ => base_ty,
+ };
+ debug!("pat_ty(pat={:?}) ret_ty={:?}", pat, ret_ty);
+
+ Ok(ret_ty)
+ }
+
+ pub(crate) fn cat_expr(&self, expr: &hir::Expr<'_>) -> McResult<PlaceWithHirId<'tcx>> {
+ // This recursion helper avoids going through *too many*
+ // adjustments, since *only* non-overloaded deref recurses.
+ fn helper<'a, 'tcx>(
+ mc: &MemCategorizationContext<'a, 'tcx>,
+ expr: &hir::Expr<'_>,
+ adjustments: &[adjustment::Adjustment<'tcx>],
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ match adjustments.split_last() {
+ None => mc.cat_expr_unadjusted(expr),
+ Some((adjustment, previous)) => {
+ mc.cat_expr_adjusted_with(expr, || helper(mc, expr, previous), adjustment)
+ }
+ }
+ }
+
+ helper(self, expr, self.typeck_results.expr_adjustments(expr))
+ }
+
+ pub(crate) fn cat_expr_adjusted(
+ &self,
+ expr: &hir::Expr<'_>,
+ previous: PlaceWithHirId<'tcx>,
+ adjustment: &adjustment::Adjustment<'tcx>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ self.cat_expr_adjusted_with(expr, || Ok(previous), adjustment)
+ }
+
+ #[instrument(level = "debug", skip(self, previous))]
+ fn cat_expr_adjusted_with<F>(
+ &self,
+ expr: &hir::Expr<'_>,
+ previous: F,
+ adjustment: &adjustment::Adjustment<'tcx>,
+ ) -> McResult<PlaceWithHirId<'tcx>>
+ where
+ F: FnOnce() -> McResult<PlaceWithHirId<'tcx>>,
+ {
+ let target = self.resolve_vars_if_possible(adjustment.target);
+ match adjustment.kind {
+ adjustment::Adjust::Deref(overloaded) => {
+ // Equivalent to *expr or something similar.
+ let base = if let Some(deref) = overloaded {
+ let ref_ty = self
+ .tcx()
+ .mk_ref(deref.region, ty::TypeAndMut { ty: target, mutbl: deref.mutbl });
+ self.cat_rvalue(expr.hir_id, expr.span, ref_ty)
+ } else {
+ previous()?
+ };
+ self.cat_deref(expr, base)
+ }
+
+ adjustment::Adjust::NeverToAny
+ | adjustment::Adjust::Pointer(_)
+ | adjustment::Adjust::Borrow(_)
+ | adjustment::Adjust::DynStar => {
+ // Result is an rvalue.
+ Ok(self.cat_rvalue(expr.hir_id, expr.span, target))
+ }
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub(crate) fn cat_expr_unadjusted(
+ &self,
+ expr: &hir::Expr<'_>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ debug!("cat_expr: id={} expr={:?}", expr.hir_id, expr);
+
+ let expr_ty = self.expr_ty(expr)?;
+ match expr.kind {
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref e_base) => {
+ if self.typeck_results.is_method_call(expr) {
+ self.cat_overloaded_place(expr, e_base)
+ } else {
+ let base = self.cat_expr(e_base)?;
+ self.cat_deref(expr, base)
+ }
+ }
+
+ hir::ExprKind::Field(ref base, _) => {
+ let base = self.cat_expr(base)?;
+ debug!("cat_expr(cat_field): id={} expr={:?} base={:?}", expr.hir_id, expr, base);
+
+ let field_idx = self
+ .typeck_results
+ .field_indices()
+ .get(expr.hir_id)
+ .cloned()
+ .expect("Field index not found");
+
+ Ok(self.cat_projection(
+ expr,
+ base,
+ expr_ty,
+ ProjectionKind::Field(field_idx as u32, VariantIdx::new(0)),
+ ))
+ }
+
+ hir::ExprKind::Index(ref base, _) => {
+ if self.typeck_results.is_method_call(expr) {
+ // If this is an index implemented by a method call, then it
+ // will include an implicit deref of the result.
+ // The call to index() returns a `&T` value, which
+ // is an rvalue. That is what we will be
+ // dereferencing.
+ self.cat_overloaded_place(expr, base)
+ } else {
+ let base = self.cat_expr(base)?;
+ Ok(self.cat_projection(expr, base, expr_ty, ProjectionKind::Index))
+ }
+ }
+
+ hir::ExprKind::Path(ref qpath) => {
+ let res = self.typeck_results.qpath_res(qpath, expr.hir_id);
+ self.cat_res(expr.hir_id, expr.span, expr_ty, res)
+ }
+
+ hir::ExprKind::Type(ref e, _) => self.cat_expr(e),
+
+ hir::ExprKind::AddrOf(..)
+ | hir::ExprKind::Call(..)
+ | hir::ExprKind::Assign(..)
+ | hir::ExprKind::AssignOp(..)
+ | hir::ExprKind::Closure { .. }
+ | hir::ExprKind::Ret(..)
+ | hir::ExprKind::Unary(..)
+ | hir::ExprKind::Yield(..)
+ | hir::ExprKind::MethodCall(..)
+ | hir::ExprKind::Cast(..)
+ | hir::ExprKind::DropTemps(..)
+ | hir::ExprKind::Array(..)
+ | hir::ExprKind::If(..)
+ | hir::ExprKind::Tup(..)
+ | hir::ExprKind::Binary(..)
+ | hir::ExprKind::Block(..)
+ | hir::ExprKind::Let(..)
+ | hir::ExprKind::Loop(..)
+ | hir::ExprKind::Match(..)
+ | hir::ExprKind::Lit(..)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Break(..)
+ | hir::ExprKind::Continue(..)
+ | hir::ExprKind::Struct(..)
+ | hir::ExprKind::Repeat(..)
+ | hir::ExprKind::InlineAsm(..)
+ | hir::ExprKind::Box(..)
+ | hir::ExprKind::Err => Ok(self.cat_rvalue(expr.hir_id, expr.span, expr_ty)),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self, span))]
+ pub(crate) fn cat_res(
+ &self,
+ hir_id: hir::HirId,
+ span: Span,
+ expr_ty: Ty<'tcx>,
+ res: Res,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ match res {
+ Res::Def(
+ DefKind::Ctor(..)
+ | DefKind::Const
+ | DefKind::ConstParam
+ | DefKind::AssocConst
+ | DefKind::Fn
+ | DefKind::AssocFn,
+ _,
+ )
+ | Res::SelfCtor(..) => Ok(self.cat_rvalue(hir_id, span, expr_ty)),
+
+ Res::Def(DefKind::Static(_), _) => {
+ Ok(PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::StaticItem, Vec::new()))
+ }
+
+ Res::Local(var_id) => {
+ if self.upvars.map_or(false, |upvars| upvars.contains_key(&var_id)) {
+ self.cat_upvar(hir_id, var_id)
+ } else {
+ Ok(PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Local(var_id), Vec::new()))
+ }
+ }
+
+ def => span_bug!(span, "unexpected definition in memory categorization: {:?}", def),
+ }
+ }
+
+ /// Categorize an upvar.
+ ///
+ /// Note: the actual upvar access contains invisible derefs of closure
+ /// environment and upvar reference as appropriate. Only regionck cares
+ /// about these dereferences, so we let it compute them as needed.
+ fn cat_upvar(&self, hir_id: hir::HirId, var_id: hir::HirId) -> McResult<PlaceWithHirId<'tcx>> {
+ let closure_expr_def_id = self.body_owner;
+
+ let upvar_id = ty::UpvarId {
+ var_path: ty::UpvarPath { hir_id: var_id },
+ closure_expr_id: closure_expr_def_id,
+ };
+ let var_ty = self.node_ty(var_id)?;
+
+ let ret = PlaceWithHirId::new(hir_id, var_ty, PlaceBase::Upvar(upvar_id), Vec::new());
+
+ debug!("cat_upvar ret={:?}", ret);
+ Ok(ret)
+ }
+
+ pub(crate) fn cat_rvalue(
+ &self,
+ hir_id: hir::HirId,
+ span: Span,
+ expr_ty: Ty<'tcx>,
+ ) -> PlaceWithHirId<'tcx> {
+ debug!("cat_rvalue hir_id={:?}, expr_ty={:?}, span={:?}", hir_id, expr_ty, span);
+ let ret = PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Rvalue, Vec::new());
+ debug!("cat_rvalue ret={:?}", ret);
+ ret
+ }
+
+ pub(crate) fn cat_projection<N: HirNode>(
+ &self,
+ node: &N,
+ base_place: PlaceWithHirId<'tcx>,
+ ty: Ty<'tcx>,
+ kind: ProjectionKind,
+ ) -> PlaceWithHirId<'tcx> {
+ let mut projections = base_place.place.projections;
+ projections.push(Projection { kind, ty });
+ let ret = PlaceWithHirId::new(
+ node.hir_id(),
+ base_place.place.base_ty,
+ base_place.place.base,
+ projections,
+ );
+ debug!("cat_field ret {:?}", ret);
+ ret
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn cat_overloaded_place(
+ &self,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ // Reconstruct the output assuming it's a reference with the
+ // same region and mutability as the receiver. This holds for
+ // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
+ let place_ty = self.expr_ty(expr)?;
+ let base_ty = self.expr_ty_adjusted(base)?;
+
+ let ty::Ref(region, _, mutbl) = *base_ty.kind() else {
+ span_bug!(expr.span, "cat_overloaded_place: base is not a reference");
+ };
+ let ref_ty = self.tcx().mk_ref(region, ty::TypeAndMut { ty: place_ty, mutbl });
+
+ let base = self.cat_rvalue(expr.hir_id, expr.span, ref_ty);
+ self.cat_deref(expr, base)
+ }
+
+ #[instrument(level = "debug", skip(self, node))]
+ fn cat_deref(
+ &self,
+ node: &impl HirNode,
+ base_place: PlaceWithHirId<'tcx>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ let base_curr_ty = base_place.place.ty();
+ let deref_ty = match base_curr_ty.builtin_deref(true) {
+ Some(mt) => mt.ty,
+ None => {
+ debug!("explicit deref of non-derefable type: {:?}", base_curr_ty);
+ return Err(());
+ }
+ };
+ let mut projections = base_place.place.projections;
+ projections.push(Projection { kind: ProjectionKind::Deref, ty: deref_ty });
+
+ let ret = PlaceWithHirId::new(
+ node.hir_id(),
+ base_place.place.base_ty,
+ base_place.place.base,
+ projections,
+ );
+ debug!("cat_deref ret {:?}", ret);
+ Ok(ret)
+ }
+
+ pub(crate) fn cat_pattern<F>(
+ &self,
+ place: PlaceWithHirId<'tcx>,
+ pat: &hir::Pat<'_>,
+ mut op: F,
+ ) -> McResult<()>
+ where
+ F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>),
+ {
+ self.cat_pattern_(place, pat, &mut op)
+ }
+
+ /// Returns the variant index for an ADT used within a Struct or TupleStruct pattern
+ /// Here `pat_hir_id` is the HirId of the pattern itself.
+ fn variant_index_for_adt(
+ &self,
+ qpath: &hir::QPath<'_>,
+ pat_hir_id: hir::HirId,
+ span: Span,
+ ) -> McResult<VariantIdx> {
+ let res = self.typeck_results.qpath_res(qpath, pat_hir_id);
+ let ty = self.typeck_results.node_type(pat_hir_id);
+ let ty::Adt(adt_def, _) = ty.kind() else {
+ self.tcx()
+ .sess
+ .delay_span_bug(span, "struct or tuple struct pattern not applied to an ADT");
+ return Err(());
+ };
+
+ match res {
+ Res::Def(DefKind::Variant, variant_id) => Ok(adt_def.variant_index_with_id(variant_id)),
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_id) => {
+ Ok(adt_def.variant_index_with_ctor_id(variant_ctor_id))
+ }
+ Res::Def(DefKind::Ctor(CtorOf::Struct, ..), _)
+ | Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ | Res::SelfCtor(..)
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. } => {
+ // Structs and Unions have only have one variant.
+ Ok(VariantIdx::new(0))
+ }
+ _ => bug!("expected ADT path, found={:?}", res),
+ }
+ }
+
+ /// Returns the total number of fields in an ADT variant used within a pattern.
+ /// Here `pat_hir_id` is the HirId of the pattern itself.
+ fn total_fields_in_adt_variant(
+ &self,
+ pat_hir_id: hir::HirId,
+ variant_index: VariantIdx,
+ span: Span,
+ ) -> McResult<usize> {
+ let ty = self.typeck_results.node_type(pat_hir_id);
+ match ty.kind() {
+ ty::Adt(adt_def, _) => Ok(adt_def.variant(variant_index).fields.len()),
+ _ => {
+ self.tcx()
+ .sess
+ .delay_span_bug(span, "struct or tuple struct pattern not applied to an ADT");
+ Err(())
+ }
+ }
+ }
+
+ /// Returns the total number of fields in a tuple used within a Tuple pattern.
+ /// Here `pat_hir_id` is the HirId of the pattern itself.
+ fn total_fields_in_tuple(&self, pat_hir_id: hir::HirId, span: Span) -> McResult<usize> {
+ let ty = self.typeck_results.node_type(pat_hir_id);
+ match ty.kind() {
+ ty::Tuple(substs) => Ok(substs.len()),
+ _ => {
+ self.tcx().sess.delay_span_bug(span, "tuple pattern not applied to a tuple");
+ Err(())
+ }
+ }
+ }
+
+ // FIXME(#19596) This is a workaround, but there should be a better way to do this
+ fn cat_pattern_<F>(
+ &self,
+ mut place_with_id: PlaceWithHirId<'tcx>,
+ pat: &hir::Pat<'_>,
+ op: &mut F,
+ ) -> McResult<()>
+ where
+ F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>),
+ {
+ // Here, `place` is the `PlaceWithHirId` being matched and pat is the pattern it
+ // is being matched against.
+ //
+ // In general, the way that this works is that we walk down the pattern,
+ // constructing a `PlaceWithHirId` that represents the path that will be taken
+ // to reach the value being matched.
+
+ debug!("cat_pattern(pat={:?}, place_with_id={:?})", pat, place_with_id);
+
+ // If (pattern) adjustments are active for this pattern, adjust the `PlaceWithHirId` correspondingly.
+ // `PlaceWithHirId`s are constructed differently from patterns. For example, in
+ //
+ // ```
+ // match foo {
+ // &&Some(x, ) => { ... },
+ // _ => { ... },
+ // }
+ // ```
+ //
+ // the pattern `&&Some(x,)` is represented as `Ref { Ref { TupleStruct }}`. To build the
+ // corresponding `PlaceWithHirId` we start with the `PlaceWithHirId` for `foo`, and then, by traversing the
+ // pattern, try to answer the question: given the address of `foo`, how is `x` reached?
+ //
+ // `&&Some(x,)` `place_foo`
+ // `&Some(x,)` `deref { place_foo}`
+ // `Some(x,)` `deref { deref { place_foo }}`
+ // (x,)` `field0 { deref { deref { place_foo }}}` <- resulting place
+ //
+ // The above example has no adjustments. If the code were instead the (after adjustments,
+ // equivalent) version
+ //
+ // ```
+ // match foo {
+ // Some(x, ) => { ... },
+ // _ => { ... },
+ // }
+ // ```
+ //
+ // Then we see that to get the same result, we must start with
+ // `deref { deref { place_foo }}` instead of `place_foo` since the pattern is now `Some(x,)`
+ // and not `&&Some(x,)`, even though its assigned type is that of `&&Some(x,)`.
+ for _ in 0..self.typeck_results.pat_adjustments().get(pat.hir_id).map_or(0, |v| v.len()) {
+ debug!("cat_pattern: applying adjustment to place_with_id={:?}", place_with_id);
+ place_with_id = self.cat_deref(pat, place_with_id)?;
+ }
+ let place_with_id = place_with_id; // lose mutability
+ debug!("cat_pattern: applied adjustment derefs to get place_with_id={:?}", place_with_id);
+
+ // Invoke the callback, but only now, after the `place_with_id` has adjusted.
+ //
+ // To see that this makes sense, consider `match &Some(3) { Some(x) => { ... }}`. In that
+ // case, the initial `place_with_id` will be that for `&Some(3)` and the pattern is `Some(x)`. We
+ // don't want to call `op` with these incompatible values. As written, what happens instead
+ // is that `op` is called with the adjusted place (that for `*&Some(3)`) and the pattern
+ // `Some(x)` (which matches). Recursing once more, `*&Some(3)` and the pattern `Some(x)`
+ // result in the place `Downcast<Some>(*&Some(3)).0` associated to `x` and invoke `op` with
+ // that (where the `ref` on `x` is implied).
+ op(&place_with_id, pat);
+
+ match pat.kind {
+ PatKind::Tuple(subpats, dots_pos) => {
+ // (p1, ..., pN)
+ let total_fields = self.total_fields_in_tuple(pat.hir_id, pat.span)?;
+
+ for (i, subpat) in subpats.iter().enumerate_and_adjust(total_fields, dots_pos) {
+ let subpat_ty = self.pat_ty_adjusted(subpat)?;
+ let projection_kind = ProjectionKind::Field(i as u32, VariantIdx::new(0));
+ let sub_place =
+ self.cat_projection(pat, place_with_id.clone(), subpat_ty, projection_kind);
+ self.cat_pattern_(sub_place, subpat, op)?;
+ }
+ }
+
+ PatKind::TupleStruct(ref qpath, subpats, dots_pos) => {
+ // S(p1, ..., pN)
+ let variant_index = self.variant_index_for_adt(qpath, pat.hir_id, pat.span)?;
+ let total_fields =
+ self.total_fields_in_adt_variant(pat.hir_id, variant_index, pat.span)?;
+
+ for (i, subpat) in subpats.iter().enumerate_and_adjust(total_fields, dots_pos) {
+ let subpat_ty = self.pat_ty_adjusted(subpat)?;
+ let projection_kind = ProjectionKind::Field(i as u32, variant_index);
+ let sub_place =
+ self.cat_projection(pat, place_with_id.clone(), subpat_ty, projection_kind);
+ self.cat_pattern_(sub_place, subpat, op)?;
+ }
+ }
+
+ PatKind::Struct(ref qpath, field_pats, _) => {
+ // S { f1: p1, ..., fN: pN }
+
+ let variant_index = self.variant_index_for_adt(qpath, pat.hir_id, pat.span)?;
+
+ for fp in field_pats {
+ let field_ty = self.pat_ty_adjusted(fp.pat)?;
+ let field_index = self
+ .typeck_results
+ .field_indices()
+ .get(fp.hir_id)
+ .cloned()
+ .expect("no index for a field");
+
+ let field_place = self.cat_projection(
+ pat,
+ place_with_id.clone(),
+ field_ty,
+ ProjectionKind::Field(field_index as u32, variant_index),
+ );
+ self.cat_pattern_(field_place, fp.pat, op)?;
+ }
+ }
+
+ PatKind::Or(pats) => {
+ for pat in pats {
+ self.cat_pattern_(place_with_id.clone(), pat, op)?;
+ }
+ }
+
+ PatKind::Binding(.., Some(ref subpat)) => {
+ self.cat_pattern_(place_with_id, subpat, op)?;
+ }
+
+ PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) => {
+ // box p1, &p1, &mut p1. we can ignore the mutability of
+ // PatKind::Ref since that information is already contained
+ // in the type.
+ let subplace = self.cat_deref(pat, place_with_id)?;
+ self.cat_pattern_(subplace, subpat, op)?;
+ }
+
+ PatKind::Slice(before, ref slice, after) => {
+ let Some(element_ty) = place_with_id.place.ty().builtin_index() else {
+ debug!("explicit index of non-indexable type {:?}", place_with_id);
+ return Err(());
+ };
+ let elt_place = self.cat_projection(
+ pat,
+ place_with_id.clone(),
+ element_ty,
+ ProjectionKind::Index,
+ );
+ for before_pat in before {
+ self.cat_pattern_(elt_place.clone(), before_pat, op)?;
+ }
+ if let Some(ref slice_pat) = *slice {
+ let slice_pat_ty = self.pat_ty_adjusted(slice_pat)?;
+ let slice_place = self.cat_projection(
+ pat,
+ place_with_id,
+ slice_pat_ty,
+ ProjectionKind::Subslice,
+ );
+ self.cat_pattern_(slice_place, slice_pat, op)?;
+ }
+ for after_pat in after {
+ self.cat_pattern_(elt_place.clone(), after_pat, op)?;
+ }
+ }
+
+ PatKind::Path(_)
+ | PatKind::Binding(.., None)
+ | PatKind::Lit(..)
+ | PatKind::Range(..)
+ | PatKind::Wild => {
+ // always ok
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/method/confirm.rs b/compiler/rustc_hir_typeck/src/method/confirm.rs
new file mode 100644
index 000000000..be4ea9986
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/method/confirm.rs
@@ -0,0 +1,594 @@
+use super::{probe, MethodCallee};
+
+use crate::{callee, FnCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::GenericArg;
+use rustc_hir_analysis::astconv::{AstConv, CreateSubstsForGenericArgsCtxt, IsMethodCall};
+use rustc_infer::infer::{self, InferOk};
+use rustc_middle::traits::{ObligationCauseCode, UnifyReceiverContext};
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
+use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{self, SubstsRef};
+use rustc_middle::ty::{self, GenericParamDefKind, Ty};
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+
+use std::iter;
+use std::ops::Deref;
+
+struct ConfirmContext<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+}
+
+impl<'a, 'tcx> Deref for ConfirmContext<'a, 'tcx> {
+ type Target = FnCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ self.fcx
+ }
+}
+
+#[derive(Debug)]
+pub struct ConfirmResult<'tcx> {
+ pub callee: MethodCallee<'tcx>,
+ pub illegal_sized_bound: Option<Span>,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn confirm_method(
+ &self,
+ span: Span,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: probe::Pick<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ ) -> ConfirmResult<'tcx> {
+ debug!(
+ "confirm(unadjusted_self_ty={:?}, pick={:?}, generic_args={:?})",
+ unadjusted_self_ty, pick, segment.args,
+ );
+
+ let mut confirm_cx = ConfirmContext::new(self, span, self_expr, call_expr);
+ confirm_cx.confirm(unadjusted_self_ty, pick, segment)
+ }
+}
+
+impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
+ fn new(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ ) -> ConfirmContext<'a, 'tcx> {
+ ConfirmContext { fcx, span, self_expr, call_expr }
+ }
+
+ fn confirm(
+ &mut self,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: probe::Pick<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ ) -> ConfirmResult<'tcx> {
+ // Adjust the self expression the user provided and obtain the adjusted type.
+ let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick);
+
+ // Create substitutions for the method's type parameters.
+ let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick);
+ let all_substs = self.instantiate_method_substs(&pick, segment, rcvr_substs);
+
+ debug!("rcvr_substs={rcvr_substs:?}, all_substs={all_substs:?}");
+
+ // Create the final signature for the method, replacing late-bound regions.
+ let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs);
+
+ // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that
+ // something which derefs to `Self` actually implements the trait and the caller
+ // wanted to make a static dispatch on it but forgot to import the trait.
+ // See test `src/test/ui/issue-35976.rs`.
+ //
+ // In that case, we'll error anyway, but we'll also re-run the search with all traits
+ // in scope, and if we find another method which can be used, we'll output an
+ // appropriate hint suggesting to import the trait.
+ let filler_substs = rcvr_substs
+ .extend_to(self.tcx, pick.item.def_id, |def, _| self.tcx.mk_param_from_def(def));
+ let illegal_sized_bound = self.predicates_require_illegal_sized_bound(
+ &self.tcx.predicates_of(pick.item.def_id).instantiate(self.tcx, filler_substs),
+ );
+
+ // Unify the (adjusted) self type with what the method expects.
+ //
+ // SUBTLE: if we want good error messages, because of "guessing" while matching
+ // traits, no trait system method can be called before this point because they
+ // could alter our Self-type, except for normalizing the receiver from the
+ // signature (which is also done during probing).
+ let method_sig_rcvr = self.normalize_associated_types_in(self.span, method_sig.inputs()[0]);
+ debug!(
+ "confirm: self_ty={:?} method_sig_rcvr={:?} method_sig={:?} method_predicates={:?}",
+ self_ty, method_sig_rcvr, method_sig, method_predicates
+ );
+ self.unify_receivers(self_ty, method_sig_rcvr, &pick, all_substs);
+
+ let (method_sig, method_predicates) =
+ self.normalize_associated_types_in(self.span, (method_sig, method_predicates));
+ let method_sig = ty::Binder::dummy(method_sig);
+
+ // Make sure nobody calls `drop()` explicitly.
+ self.enforce_illegal_method_limitations(&pick);
+
+ // Add any trait/regions obligations specified on the method's type parameters.
+ // We won't add these if we encountered an illegal sized bound, so that we can use
+ // a custom error in that case.
+ if illegal_sized_bound.is_none() {
+ self.add_obligations(
+ self.tcx.mk_fn_ptr(method_sig),
+ all_substs,
+ method_predicates,
+ pick.item.def_id,
+ );
+ }
+
+ // Create the final `MethodCallee`.
+ let callee = MethodCallee {
+ def_id: pick.item.def_id,
+ substs: all_substs,
+ sig: method_sig.skip_binder(),
+ };
+ ConfirmResult { callee, illegal_sized_bound }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // ADJUSTMENTS
+
+ fn adjust_self_ty(
+ &mut self,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: &probe::Pick<'tcx>,
+ ) -> Ty<'tcx> {
+ // Commit the autoderefs by calling `autoderef` again, but this
+ // time writing the results into the various typeck results.
+ let mut autoderef =
+ self.autoderef_overloaded_span(self.span, unadjusted_self_ty, self.call_expr.span);
+ let Some((ty, n)) = autoderef.nth(pick.autoderefs) else {
+ return self.tcx.ty_error_with_message(
+ rustc_span::DUMMY_SP,
+ &format!("failed autoderef {}", pick.autoderefs),
+ );
+ };
+ assert_eq!(n, pick.autoderefs);
+
+ let mut adjustments = self.adjust_steps(&autoderef);
+ let mut target = self.structurally_resolved_type(autoderef.span(), ty);
+
+ match pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl, unsize }) => {
+ let region = self.next_region_var(infer::Autoref(self.span));
+ // Type we're wrapping in a reference, used later for unsizing
+ let base_ty = target;
+
+ target = self.tcx.mk_ref(region, ty::TypeAndMut { mutbl, ty: target });
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Method call receivers are the primary use case
+ // for two-phase borrows.
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
+ target,
+ });
+
+ if unsize {
+ let unsized_ty = if let ty::Array(elem_ty, _) = base_ty.kind() {
+ self.tcx.mk_slice(*elem_ty)
+ } else {
+ bug!(
+ "AutorefOrPtrAdjustment's unsize flag should only be set for array ty, found {}",
+ base_ty
+ )
+ };
+ target = self
+ .tcx
+ .mk_ref(region, ty::TypeAndMut { mutbl: mutbl.into(), ty: unsized_ty });
+ adjustments
+ .push(Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target });
+ }
+ }
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) => {
+ target = match target.kind() {
+ &ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
+ assert_eq!(mutbl, hir::Mutability::Mut);
+ self.tcx.mk_ptr(ty::TypeAndMut { mutbl: hir::Mutability::Not, ty })
+ }
+ other => panic!("Cannot adjust receiver type {:?} to const ptr", other),
+ };
+
+ adjustments.push(Adjustment {
+ kind: Adjust::Pointer(PointerCast::MutToConstPointer),
+ target,
+ });
+ }
+ None => {}
+ }
+
+ self.register_predicates(autoderef.into_obligations());
+
+ // Write out the final adjustments.
+ self.apply_adjustments(self.self_expr, adjustments);
+
+ target
+ }
+
+ /// Returns a set of substitutions for the method *receiver* where all type and region
+ /// parameters are instantiated with fresh variables. This substitution does not include any
+ /// parameters declared on the method itself.
+ ///
+ /// Note that this substitution may include late-bound regions from the impl level. If so,
+ /// these are instantiated later in the `instantiate_method_sig` routine.
+ fn fresh_receiver_substs(
+ &mut self,
+ self_ty: Ty<'tcx>,
+ pick: &probe::Pick<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ match pick.kind {
+ probe::InherentImplPick => {
+ let impl_def_id = pick.item.container_id(self.tcx);
+ assert!(
+ self.tcx.impl_trait_ref(impl_def_id).is_none(),
+ "impl {:?} is not an inherent impl",
+ impl_def_id
+ );
+ self.fresh_substs_for_item(self.span, impl_def_id)
+ }
+
+ probe::ObjectPick => {
+ let trait_def_id = pick.item.container_id(self.tcx);
+ self.extract_existential_trait_ref(self_ty, |this, object_ty, principal| {
+ // The object data has no entry for the Self
+ // Type. For the purposes of this method call, we
+ // substitute the object type itself. This
+ // wouldn't be a sound substitution in all cases,
+ // since each instance of the object type is a
+ // different existential and hence could match
+ // distinct types (e.g., if `Self` appeared as an
+ // argument type), but those cases have already
+ // been ruled out when we deemed the trait to be
+ // "object safe".
+ let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty);
+ let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id);
+ let upcast_trait_ref =
+ this.replace_bound_vars_with_fresh_vars(upcast_poly_trait_ref);
+ debug!(
+ "original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}",
+ original_poly_trait_ref, upcast_trait_ref, trait_def_id
+ );
+ upcast_trait_ref.substs
+ })
+ }
+
+ probe::TraitPick => {
+ let trait_def_id = pick.item.container_id(self.tcx);
+
+ // Make a trait reference `$0 : Trait<$1...$n>`
+ // consisting entirely of type variables. Later on in
+ // the process we will unify the transformed-self-type
+ // of the method with the actual type in order to
+ // unify some of these variables.
+ self.fresh_substs_for_item(self.span, trait_def_id)
+ }
+
+ probe::WhereClausePick(poly_trait_ref) => {
+ // Where clauses can have bound regions in them. We need to instantiate
+ // those to convert from a poly-trait-ref to a trait-ref.
+ self.replace_bound_vars_with_fresh_vars(poly_trait_ref).substs
+ }
+ }
+ }
+
+ fn extract_existential_trait_ref<R, F>(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R
+ where
+ F: FnMut(&mut ConfirmContext<'a, 'tcx>, Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>) -> R,
+ {
+ // If we specified that this is an object method, then the
+ // self-type ought to be something that can be dereferenced to
+ // yield an object-type (e.g., `&Object` or `Box<Object>`
+ // etc).
+
+ // FIXME: this feels, like, super dubious
+ self.fcx
+ .autoderef(self.span, self_ty)
+ .include_raw_pointers()
+ .find_map(|(ty, _)| match ty.kind() {
+ ty::Dynamic(data, ..) => Some(closure(
+ self,
+ ty,
+ data.principal().unwrap_or_else(|| {
+ span_bug!(self.span, "calling trait method on empty object?")
+ }),
+ )),
+ _ => None,
+ })
+ .unwrap_or_else(|| {
+ span_bug!(
+ self.span,
+ "self-type `{}` for ObjectPick never dereferenced to an object",
+ self_ty
+ )
+ })
+ }
+
+ fn instantiate_method_substs(
+ &mut self,
+ pick: &probe::Pick<'tcx>,
+ seg: &hir::PathSegment<'_>,
+ parent_substs: SubstsRef<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ // Determine the values for the generic parameters of the method.
+ // If they were not explicitly supplied, just construct fresh
+ // variables.
+ let generics = self.tcx.generics_of(pick.item.def_id);
+
+ let arg_count_correct = <dyn AstConv<'_>>::check_generic_arg_count_for_call(
+ self.tcx,
+ self.span,
+ pick.item.def_id,
+ generics,
+ seg,
+ IsMethodCall::Yes,
+ );
+
+ // Create subst for early-bound lifetime parameters, combining
+ // parameters from the type and those from the method.
+ assert_eq!(generics.parent_count, parent_substs.len());
+
+ struct MethodSubstsCtxt<'a, 'tcx> {
+ cfcx: &'a ConfirmContext<'a, 'tcx>,
+ pick: &'a probe::Pick<'tcx>,
+ seg: &'a hir::PathSegment<'a>,
+ }
+ impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for MethodSubstsCtxt<'a, 'tcx> {
+ fn args_for_def_id(
+ &mut self,
+ def_id: DefId,
+ ) -> (Option<&'a hir::GenericArgs<'a>>, bool) {
+ if def_id == self.pick.item.def_id {
+ if let Some(data) = self.seg.args {
+ return (Some(data), false);
+ }
+ }
+ (None, false)
+ }
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx> {
+ match (&param.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ <dyn AstConv<'_>>::ast_region_to_region(self.cfcx.fcx, lt, Some(param))
+ .into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.cfcx.to_ty(ty).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
+ self.cfcx.const_arg_to_const(&ct.value, param.def_id).into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => {
+ self.cfcx.ty_infer(Some(param), inf.span).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => {
+ let tcx = self.cfcx.tcx();
+ self.cfcx.ct_infer(tcx.type_of(param.def_id), Some(param), inf.span).into()
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn inferred_kind(
+ &mut self,
+ _substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ _infer_args: bool,
+ ) -> subst::GenericArg<'tcx> {
+ self.cfcx.var_for_def(self.cfcx.span, param)
+ }
+ }
+ <dyn AstConv<'_>>::create_substs_for_generic_args(
+ self.tcx,
+ pick.item.def_id,
+ parent_substs,
+ false,
+ None,
+ &arg_count_correct,
+ &mut MethodSubstsCtxt { cfcx: self, pick, seg },
+ )
+ }
+
+ fn unify_receivers(
+ &mut self,
+ self_ty: Ty<'tcx>,
+ method_self_ty: Ty<'tcx>,
+ pick: &probe::Pick<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) {
+ debug!(
+ "unify_receivers: self_ty={:?} method_self_ty={:?} span={:?} pick={:?}",
+ self_ty, method_self_ty, self.span, pick
+ );
+ let cause = self.cause(
+ self.span,
+ ObligationCauseCode::UnifyReceiver(Box::new(UnifyReceiverContext {
+ assoc_item: pick.item,
+ param_env: self.param_env,
+ substs,
+ })),
+ );
+ match self.at(&cause, self.param_env).sup(method_self_ty, self_ty) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ }
+ Err(_) => {
+ span_bug!(
+ self.span,
+ "{} was a subtype of {} but now is not?",
+ self_ty,
+ method_self_ty
+ );
+ }
+ }
+ }
+
+ // NOTE: this returns the *unnormalized* predicates and method sig. Because of
+ // inference guessing, the predicates and method signature can't be normalized
+ // until we unify the `Self` type.
+ fn instantiate_method_sig(
+ &mut self,
+ pick: &probe::Pick<'tcx>,
+ all_substs: SubstsRef<'tcx>,
+ ) -> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) {
+ debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, all_substs);
+
+ // Instantiate the bounds on the method with the
+ // type/early-bound-regions substitutions performed. There can
+ // be no late-bound regions appearing here.
+ let def_id = pick.item.def_id;
+ let method_predicates = self.tcx.predicates_of(def_id).instantiate(self.tcx, all_substs);
+
+ debug!("method_predicates after subst = {:?}", method_predicates);
+
+ let sig = self.tcx.bound_fn_sig(def_id);
+
+ let sig = sig.subst(self.tcx, all_substs);
+ debug!("type scheme substituted, sig={:?}", sig);
+
+ let sig = self.replace_bound_vars_with_fresh_vars(sig);
+ debug!("late-bound lifetimes from method instantiated, sig={:?}", sig);
+
+ (sig, method_predicates)
+ }
+
+ fn add_obligations(
+ &mut self,
+ fty: Ty<'tcx>,
+ all_substs: SubstsRef<'tcx>,
+ method_predicates: ty::InstantiatedPredicates<'tcx>,
+ def_id: DefId,
+ ) {
+ debug!(
+ "add_obligations: fty={:?} all_substs={:?} method_predicates={:?} def_id={:?}",
+ fty, all_substs, method_predicates, def_id
+ );
+
+ // FIXME: could replace with the following, but we already calculated `method_predicates`,
+ // so we just call `predicates_for_generics` directly to avoid redoing work.
+ // `self.add_required_obligations(self.span, def_id, &all_substs);`
+ for obligation in traits::predicates_for_generics(
+ |idx, span| {
+ let code = if span.is_dummy() {
+ ObligationCauseCode::ExprItemObligation(def_id, self.call_expr.hir_id, idx)
+ } else {
+ ObligationCauseCode::ExprBindingObligation(
+ def_id,
+ span,
+ self.call_expr.hir_id,
+ idx,
+ )
+ };
+ traits::ObligationCause::new(self.span, self.body_id, code)
+ },
+ self.param_env,
+ method_predicates,
+ ) {
+ self.register_predicate(obligation);
+ }
+
+ // this is a projection from a trait reference, so we have to
+ // make sure that the trait reference inputs are well-formed.
+ self.add_wf_bounds(all_substs, self.call_expr);
+
+ // the function type must also be well-formed (this is not
+ // implied by the substs being well-formed because of inherent
+ // impls and late-bound regions - see issue #28609).
+ self.register_wf_obligation(fty.into(), self.span, traits::WellFormed(None));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // MISCELLANY
+
+ fn predicates_require_illegal_sized_bound(
+ &self,
+ predicates: &ty::InstantiatedPredicates<'tcx>,
+ ) -> Option<Span> {
+ let sized_def_id = self.tcx.lang_items().sized_trait()?;
+
+ traits::elaborate_predicates(self.tcx, predicates.predicates.iter().copied())
+ // We don't care about regions here.
+ .filter_map(|obligation| match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(trait_pred) if trait_pred.def_id() == sized_def_id => {
+ let span = iter::zip(&predicates.predicates, &predicates.spans)
+ .find_map(
+ |(p, span)| {
+ if *p == obligation.predicate { Some(*span) } else { None }
+ },
+ )
+ .unwrap_or(rustc_span::DUMMY_SP);
+ Some((trait_pred, span))
+ }
+ _ => None,
+ })
+ .find_map(|(trait_pred, span)| match trait_pred.self_ty().kind() {
+ ty::Dynamic(..) => Some(span),
+ _ => None,
+ })
+ }
+
+ fn enforce_illegal_method_limitations(&self, pick: &probe::Pick<'_>) {
+ // Disallow calls to the method `drop` defined in the `Drop` trait.
+ if let Some(trait_def_id) = pick.item.trait_container(self.tcx) {
+ callee::check_legal_trait_for_method_call(
+ self.tcx,
+ self.span,
+ Some(self.self_expr.span),
+ self.call_expr.span,
+ trait_def_id,
+ )
+ }
+ }
+
+ fn upcast(
+ &mut self,
+ source_trait_ref: ty::PolyTraitRef<'tcx>,
+ target_trait_def_id: DefId,
+ ) -> ty::PolyTraitRef<'tcx> {
+ let upcast_trait_refs =
+ traits::upcast_choices(self.tcx, source_trait_ref, target_trait_def_id);
+
+ // must be exactly one trait ref or we'd get an ambig error etc
+ if upcast_trait_refs.len() != 1 {
+ span_bug!(
+ self.span,
+ "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
+ source_trait_ref,
+ target_trait_def_id,
+ upcast_trait_refs
+ );
+ }
+
+ upcast_trait_refs.into_iter().next().unwrap()
+ }
+
+ fn replace_bound_vars_with_fresh_vars<T>(&self, value: ty::Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ self.fcx.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, value)
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/method/mod.rs b/compiler/rustc_hir_typeck/src/method/mod.rs
new file mode 100644
index 000000000..a1278edef
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/method/mod.rs
@@ -0,0 +1,625 @@
+//! Method lookup: the secret sauce of Rust. See the [rustc dev guide] for more information.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/method-lookup.html
+
+mod confirm;
+mod prelude2021;
+pub mod probe;
+mod suggest;
+
+pub use self::suggest::SelfSource;
+pub use self::MethodError::*;
+
+use crate::{Expectation, FnCtxt};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::{self, InferOk};
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{self, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TypeVisitable};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+
+use self::probe::{IsSuggestion, ProbeScope};
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ probe::provide(providers);
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct MethodCallee<'tcx> {
+ /// Impl method ID, for inherent methods, or trait method ID, otherwise.
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+
+ /// Instantiated method signature, i.e., it has been
+ /// substituted, normalized, and has had late-bound
+ /// lifetimes replaced with inference variables.
+ pub sig: ty::FnSig<'tcx>,
+}
+
+#[derive(Debug)]
+pub enum MethodError<'tcx> {
+ // Did not find an applicable method, but we did find various near-misses that may work.
+ NoMatch(NoMatchData<'tcx>),
+
+ // Multiple methods might apply.
+ Ambiguity(Vec<CandidateSource>),
+
+ // Found an applicable method, but it is not visible. The third argument contains a list of
+ // not-in-scope traits which may work.
+ PrivateMatch(DefKind, DefId, Vec<DefId>),
+
+ // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have
+ // forgotten to import a trait.
+ IllegalSizedBound(Vec<DefId>, bool, Span),
+
+ // Found a match, but the return type is wrong
+ BadReturnType,
+}
+
+// Contains a list of static methods that may apply, a list of unsatisfied trait predicates which
+// could lead to matches if satisfied, and a list of not-in-scope traits which may work.
+#[derive(Debug)]
+pub struct NoMatchData<'tcx> {
+ pub static_candidates: Vec<CandidateSource>,
+ pub unsatisfied_predicates:
+ Vec<(ty::Predicate<'tcx>, Option<ty::Predicate<'tcx>>, Option<ObligationCause<'tcx>>)>,
+ pub out_of_scope_traits: Vec<DefId>,
+ pub lev_candidate: Option<ty::AssocItem>,
+ pub mode: probe::Mode,
+}
+
+// A pared down enum describing just the places from which a method
+// candidate can arise. Used for error reporting only.
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum CandidateSource {
+ Impl(DefId),
+ Trait(DefId /* trait id */),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Determines whether the type `self_ty` supports a method name `method_name` or not.
+ #[instrument(level = "debug", skip(self))]
+ pub fn method_exists(
+ &self,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr_id: hir::HirId,
+ allow_private: bool,
+ ) -> bool {
+ let mode = probe::Mode::MethodCall;
+ match self.probe_for_name(
+ method_name.span,
+ mode,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr_id,
+ ProbeScope::TraitsInScope,
+ ) {
+ Ok(..) => true,
+ Err(NoMatch(..)) => false,
+ Err(Ambiguity(..)) => true,
+ Err(PrivateMatch(..)) => allow_private,
+ Err(IllegalSizedBound(..)) => true,
+ Err(BadReturnType) => bug!("no return type expectations but got BadReturnType"),
+ }
+ }
+
+ /// Adds a suggestion to call the given method to the provided diagnostic.
+ #[instrument(level = "debug", skip(self, err, call_expr))]
+ pub(crate) fn suggest_method_call(
+ &self,
+ err: &mut Diagnostic,
+ msg: &str,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr: &hir::Expr<'_>,
+ span: Option<Span>,
+ ) {
+ let params = self
+ .probe_for_name(
+ method_name.span,
+ probe::Mode::MethodCall,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr.hir_id,
+ ProbeScope::TraitsInScope,
+ )
+ .map(|pick| {
+ let sig = self.tcx.fn_sig(pick.item.def_id);
+ sig.inputs().skip_binder().len().saturating_sub(1)
+ })
+ .unwrap_or(0);
+
+ // Account for `foo.bar<T>`;
+ let sugg_span = span.unwrap_or(call_expr.span).shrink_to_hi();
+ let (suggestion, applicability) = (
+ format!("({})", (0..params).map(|_| "_").collect::<Vec<_>>().join(", ")),
+ if params > 0 { Applicability::HasPlaceholders } else { Applicability::MaybeIncorrect },
+ );
+
+ err.span_suggestion_verbose(sugg_span, msg, suggestion, applicability);
+ }
+
+ /// Performs method lookup. If lookup is successful, it will return the callee
+ /// and store an appropriate adjustment for the self-expr. In some cases it may
+ /// report an error (e.g., invoking the `drop` method).
+ ///
+ /// # Arguments
+ ///
+ /// Given a method call like `foo.bar::<T1,...Tn>(a, b + 1, ...)`:
+ ///
+ /// * `self`: the surrounding `FnCtxt` (!)
+ /// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
+ /// * `segment`: the name and generic arguments of the method (`bar::<T1, ...Tn>`)
+ /// * `span`: the span for the method call
+ /// * `call_expr`: the complete method call: (`foo.bar::<T1,...Tn>(...)`)
+ /// * `self_expr`: the self expression (`foo`)
+ /// * `args`: the expressions of the arguments (`a, b + 1, ...`)
+ #[instrument(level = "debug", skip(self))]
+ pub fn lookup_method(
+ &self,
+ self_ty: Ty<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ span: Span,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Result<MethodCallee<'tcx>, MethodError<'tcx>> {
+ let pick =
+ self.lookup_probe(span, segment.ident, self_ty, call_expr, ProbeScope::TraitsInScope)?;
+
+ self.lint_dot_call_from_2018(self_ty, segment, span, call_expr, self_expr, &pick, args);
+
+ for import_id in &pick.import_ids {
+ debug!("used_trait_import: {:?}", import_id);
+ Lrc::get_mut(&mut self.typeck_results.borrow_mut().used_trait_imports)
+ .unwrap()
+ .insert(*import_id);
+ }
+
+ self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span, None);
+
+ let result =
+ self.confirm_method(span, self_expr, call_expr, self_ty, pick.clone(), segment);
+ debug!("result = {:?}", result);
+
+ if let Some(span) = result.illegal_sized_bound {
+ let mut needs_mut = false;
+ if let ty::Ref(region, t_type, mutability) = self_ty.kind() {
+ let trait_type = self
+ .tcx
+ .mk_ref(*region, ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() });
+ // We probe again to see if there might be a borrow mutability discrepancy.
+ match self.lookup_probe(
+ span,
+ segment.ident,
+ trait_type,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ ) {
+ Ok(ref new_pick) if *new_pick != pick => {
+ needs_mut = true;
+ }
+ _ => {}
+ }
+ }
+
+ // We probe again, taking all traits into account (not only those in scope).
+ let mut candidates = match self.lookup_probe(
+ span,
+ segment.ident,
+ self_ty,
+ call_expr,
+ ProbeScope::AllTraits,
+ ) {
+ // If we find a different result the caller probably forgot to import a trait.
+ Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container_id(self.tcx)],
+ Err(Ambiguity(ref sources)) => sources
+ .iter()
+ .filter_map(|source| {
+ match *source {
+ // Note: this cannot come from an inherent impl,
+ // because the first probing succeeded.
+ CandidateSource::Impl(def) => self.tcx.trait_id_of_impl(def),
+ CandidateSource::Trait(_) => None,
+ }
+ })
+ .collect(),
+ _ => Vec::new(),
+ };
+ candidates.retain(|candidate| *candidate != self.tcx.parent(result.callee.def_id));
+
+ return Err(IllegalSizedBound(candidates, needs_mut, span));
+ }
+
+ Ok(result.callee)
+ }
+
+ #[instrument(level = "debug", skip(self, call_expr))]
+ pub fn lookup_probe(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ scope: ProbeScope,
+ ) -> probe::PickResult<'tcx> {
+ let mode = probe::Mode::MethodCall;
+ let self_ty = self.resolve_vars_if_possible(self_ty);
+ self.probe_for_name(
+ span,
+ mode,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr.hir_id,
+ scope,
+ )
+ }
+
+ pub(super) fn obligation_for_method(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_types: Option<&[Ty<'tcx>]>,
+ ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
+ {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
+ GenericParamDefKind::Type { .. } => {
+ if param.index == 0 {
+ return self_ty.into();
+ } else if let Some(input_types) = opt_input_types {
+ return input_types[param.index as usize - 1].into();
+ }
+ }
+ }
+ self.var_for_def(span, param)
+ });
+
+ let trait_ref = ty::TraitRef::new(trait_def_id, substs);
+
+ // Construct an obligation
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ (
+ traits::Obligation::misc(
+ span,
+ self.body_id,
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(self.tcx),
+ ),
+ substs,
+ )
+ }
+
+ pub(super) fn obligation_for_op_method(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_type: Option<Ty<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Expectation<'tcx>,
+ ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
+ {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
+ GenericParamDefKind::Type { .. } => {
+ if param.index == 0 {
+ return self_ty.into();
+ } else if let Some(input_type) = opt_input_type {
+ return input_type.into();
+ }
+ }
+ }
+ self.var_for_def(span, param)
+ });
+
+ let trait_ref = ty::TraitRef::new(trait_def_id, substs);
+
+ // Construct an obligation
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ let output_ty = expected.only_has_type(self).and_then(|ty| (!ty.needs_infer()).then(|| ty));
+
+ (
+ traits::Obligation::new(
+ traits::ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
+ output_ty,
+ },
+ ),
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(self.tcx),
+ ),
+ substs,
+ )
+ }
+
+ /// `lookup_method_in_trait` is used for overloaded operators.
+ /// It does a very narrow slice of what the normal probe/confirm path does.
+ /// In particular, it doesn't really do any probing: it simply constructs
+ /// an obligation for a particular trait with the given self type and checks
+ /// whether that trait is implemented.
+ #[instrument(level = "debug", skip(self, span))]
+ pub(super) fn lookup_method_in_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_types: Option<&[Ty<'tcx>]>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ let (obligation, substs) =
+ self.obligation_for_method(span, trait_def_id, self_ty, opt_input_types);
+ self.construct_obligation_for_trait(
+ span,
+ m_name,
+ trait_def_id,
+ obligation,
+ substs,
+ None,
+ false,
+ )
+ }
+
+ pub(super) fn lookup_op_method_in_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_type: Option<Ty<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Expectation<'tcx>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ let (obligation, substs) = self.obligation_for_op_method(
+ span,
+ trait_def_id,
+ self_ty,
+ opt_input_type,
+ opt_input_expr,
+ expected,
+ );
+ self.construct_obligation_for_trait(
+ span,
+ m_name,
+ trait_def_id,
+ obligation,
+ substs,
+ opt_input_expr,
+ true,
+ )
+ }
+
+ // FIXME(#18741): it seems likely that we can consolidate some of this
+ // code with the other method-lookup code. In particular, the second half
+ // of this method is basically the same as confirmation.
+ fn construct_obligation_for_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ obligation: traits::PredicateObligation<'tcx>,
+ substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ is_op: bool,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!(?obligation);
+
+ // Now we want to know if this can be matched
+ if !self.predicate_may_hold(&obligation) {
+ debug!("--> Cannot match obligation");
+ // Cannot be matched, no such method resolution is possible.
+ return None;
+ }
+
+ // Trait must have a method named `m_name` and it should not have
+ // type parameters or early-bound regions.
+ let tcx = self.tcx;
+ let Some(method_item) = self.associated_value(trait_def_id, m_name) else {
+ tcx.sess.delay_span_bug(
+ span,
+ "operator trait does not have corresponding operator method",
+ );
+ return None;
+ };
+ let def_id = method_item.def_id;
+ let generics = tcx.generics_of(def_id);
+ assert_eq!(generics.params.len(), 0);
+
+ debug!("lookup_in_trait_adjusted: method_item={:?}", method_item);
+ let mut obligations = vec![];
+
+ // Instantiate late-bound regions and substitute the trait
+ // parameters into the method type to get the actual method type.
+ //
+ // N.B., instantiate late-bound regions first so that
+ // `instantiate_type_scheme` can normalize associated types that
+ // may reference those regions.
+ let fn_sig = tcx.bound_fn_sig(def_id);
+ let fn_sig = fn_sig.subst(self.tcx, substs);
+ let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, fn_sig);
+
+ let InferOk { value, obligations: o } = if is_op {
+ self.normalize_op_associated_types_in_as_infer_ok(span, fn_sig, opt_input_expr)
+ } else {
+ self.normalize_associated_types_in_as_infer_ok(span, fn_sig)
+ };
+ let fn_sig = {
+ obligations.extend(o);
+ value
+ };
+
+ // Register obligations for the parameters. This will include the
+ // `Self` parameter, which in turn has a bound of the main trait,
+ // so this also effectively registers `obligation` as well. (We
+ // used to register `obligation` explicitly, but that resulted in
+ // double error messages being reported.)
+ //
+ // Note that as the method comes from a trait, it should not have
+ // any late-bound regions appearing in its bounds.
+ let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
+
+ let InferOk { value, obligations: o } = if is_op {
+ self.normalize_op_associated_types_in_as_infer_ok(span, bounds, opt_input_expr)
+ } else {
+ self.normalize_associated_types_in_as_infer_ok(span, bounds)
+ };
+ let bounds = {
+ obligations.extend(o);
+ value
+ };
+
+ assert!(!bounds.has_escaping_bound_vars());
+
+ let cause = if is_op {
+ ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
+ output_ty: None,
+ },
+ )
+ } else {
+ traits::ObligationCause::misc(span, self.body_id)
+ };
+ let predicates_cause = cause.clone();
+ obligations.extend(traits::predicates_for_generics(
+ move |_, _| predicates_cause.clone(),
+ self.param_env,
+ bounds,
+ ));
+
+ // Also add an obligation for the method type being well-formed.
+ let method_ty = tcx.mk_fn_ptr(ty::Binder::dummy(fn_sig));
+ debug!(
+ "lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}",
+ method_ty, obligation
+ );
+ obligations.push(traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(method_ty.into())).to_predicate(tcx),
+ ));
+
+ let callee = MethodCallee { def_id, substs, sig: fn_sig };
+
+ debug!("callee = {:?}", callee);
+
+ Some(InferOk { obligations, value: callee })
+ }
+
+ /// Performs a [full-qualified function call] (formerly "universal function call") lookup. If
+ /// lookup is successful, it will return the type of definition and the [`DefId`] of the found
+ /// function definition.
+ ///
+ /// [full-qualified function call]: https://doc.rust-lang.org/reference/expressions/call-expr.html#disambiguating-function-calls
+ ///
+ /// # Arguments
+ ///
+ /// Given a function call like `Foo::bar::<T1,...Tn>(...)`:
+ ///
+ /// * `self`: the surrounding `FnCtxt` (!)
+ /// * `span`: the span of the call, excluding arguments (`Foo::bar::<T1, ...Tn>`)
+ /// * `method_name`: the identifier of the function within the container type (`bar`)
+ /// * `self_ty`: the type to search within (`Foo`)
+ /// * `self_ty_span` the span for the type being searched within (span of `Foo`)
+ /// * `expr_id`: the [`hir::HirId`] of the expression composing the entire call
+ #[instrument(level = "debug", skip(self), ret)]
+ pub fn resolve_fully_qualified_call(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ self_ty_span: Span,
+ expr_id: hir::HirId,
+ ) -> Result<(DefKind, DefId), MethodError<'tcx>> {
+ let tcx = self.tcx;
+
+ // Check if we have an enum variant.
+ if let ty::Adt(adt_def, _) = self_ty.kind() {
+ if adt_def.is_enum() {
+ let variant_def = adt_def
+ .variants()
+ .iter()
+ .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did()));
+ if let Some(variant_def) = variant_def {
+ // Braced variants generate unusable names in value namespace (reserved for
+ // possible future use), so variants resolved as associated items may refer to
+ // them as well. It's ok to use the variant's id as a ctor id since an
+ // error will be reported on any use of such resolution anyway.
+ let ctor_def_id = variant_def.ctor_def_id.unwrap_or(variant_def.def_id);
+ tcx.check_stability(ctor_def_id, Some(expr_id), span, Some(method_name.span));
+ return Ok((
+ DefKind::Ctor(CtorOf::Variant, variant_def.ctor_kind),
+ ctor_def_id,
+ ));
+ }
+ }
+ }
+
+ let pick = self.probe_for_name(
+ span,
+ probe::Mode::Path,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ expr_id,
+ ProbeScope::TraitsInScope,
+ )?;
+
+ self.lint_fully_qualified_call_from_2018(
+ span,
+ method_name,
+ self_ty,
+ self_ty_span,
+ expr_id,
+ &pick,
+ );
+
+ debug!(?pick);
+ {
+ let mut typeck_results = self.typeck_results.borrow_mut();
+ let used_trait_imports = Lrc::get_mut(&mut typeck_results.used_trait_imports).unwrap();
+ for import_id in pick.import_ids {
+ debug!(used_trait_import=?import_id);
+ used_trait_imports.insert(import_id);
+ }
+ }
+
+ let def_kind = pick.item.kind.as_def_kind();
+ tcx.check_stability(pick.item.def_id, Some(expr_id), span, Some(method_name.span));
+ Ok((def_kind, pick.item.def_id))
+ }
+
+ /// Finds item with name `item_name` defined in impl/trait `def_id`
+ /// and return it, or `None`, if no such item was defined there.
+ pub fn associated_value(&self, def_id: DefId, item_name: Ident) -> Option<ty::AssocItem> {
+ self.tcx
+ .associated_items(def_id)
+ .find_by_name_and_namespace(self.tcx, item_name, Namespace::ValueNS, def_id)
+ .copied()
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/method/prelude2021.rs b/compiler/rustc_hir_typeck/src/method/prelude2021.rs
new file mode 100644
index 000000000..3c98a2aa3
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/method/prelude2021.rs
@@ -0,0 +1,415 @@
+use crate::{
+ method::probe::{self, Pick},
+ FnCtxt,
+};
+use hir::def_id::DefId;
+use hir::HirId;
+use hir::ItemKind;
+use rustc_ast::Mutability;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{Adt, Array, Ref, Ty};
+use rustc_session::lint::builtin::RUST_2021_PRELUDE_COLLISIONS;
+use rustc_span::symbol::kw::{Empty, Underscore};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(super) fn lint_dot_call_from_2018(
+ &self,
+ self_ty: Ty<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ span: Span,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ pick: &Pick<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) {
+ debug!(
+ "lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
+ segment.ident, self_ty, call_expr, self_expr
+ );
+
+ // Rust 2021 and later is already using the new prelude
+ if span.rust_2021() {
+ return;
+ }
+
+ let prelude_or_array_lint = match segment.ident.name {
+ // `try_into` was added to the prelude in Rust 2021.
+ sym::try_into => RUST_2021_PRELUDE_COLLISIONS,
+ // `into_iter` wasn't added to the prelude,
+ // but `[T; N].into_iter()` doesn't resolve to IntoIterator::into_iter
+ // before Rust 2021, which results in the same problem.
+ // It is only a problem for arrays.
+ sym::into_iter if let Array(..) = self_ty.kind() => {
+ // In this case, it wasn't really a prelude addition that was the problem.
+ // Instead, the problem is that the array-into_iter hack will no longer apply in Rust 2021.
+ rustc_lint::ARRAY_INTO_ITER
+ }
+ _ => return,
+ };
+
+ // No need to lint if method came from std/core, as that will now be in the prelude
+ if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) {
+ return;
+ }
+
+ if matches!(pick.kind, probe::PickKind::InherentImplPick | probe::PickKind::ObjectPick) {
+ // avoid repeatedly adding unneeded `&*`s
+ if pick.autoderefs == 1
+ && matches!(
+ pick.autoref_or_ptr_adjustment,
+ Some(probe::AutorefOrPtrAdjustment::Autoref { .. })
+ )
+ && matches!(self_ty.kind(), Ref(..))
+ {
+ return;
+ }
+
+ // if it's an inherent `self` method (not `&self` or `&mut self`), it will take
+ // precedence over the `TryInto` impl, and thus won't break in 2021 edition
+ if pick.autoderefs == 0 && pick.autoref_or_ptr_adjustment.is_none() {
+ return;
+ }
+
+ // Inherent impls only require not relying on autoref and autoderef in order to
+ // ensure that the trait implementation won't be used
+ self.tcx.struct_span_lint_hir(
+ prelude_or_array_lint,
+ self_expr.hir_id,
+ self_expr.span,
+ format!("trait method `{}` will become ambiguous in Rust 2021", segment.ident.name),
+ |lint| {
+ let sp = self_expr.span;
+
+ let derefs = "*".repeat(pick.autoderefs);
+
+ let autoref = match pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref {
+ mutbl: Mutability::Mut,
+ ..
+ }) => "&mut ",
+ Some(probe::AutorefOrPtrAdjustment::Autoref {
+ mutbl: Mutability::Not,
+ ..
+ }) => "&",
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) | None => "",
+ };
+ if let Ok(self_expr) = self.sess().source_map().span_to_snippet(self_expr.span)
+ {
+ let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
+ pick.autoref_or_ptr_adjustment
+ {
+ format!("{}{} as *const _", derefs, self_expr)
+ } else {
+ format!("{}{}{}", autoref, derefs, self_expr)
+ };
+
+ lint.span_suggestion(
+ sp,
+ "disambiguate the method call",
+ format!("({})", self_adjusted),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
+ pick.autoref_or_ptr_adjustment
+ {
+ format!("{}(...) as *const _", derefs)
+ } else {
+ format!("{}{}...", autoref, derefs)
+ };
+ lint.span_help(
+ sp,
+ &format!("disambiguate the method call with `({})`", self_adjusted,),
+ );
+ }
+
+ lint
+ },
+ );
+ } else {
+ // trait implementations require full disambiguation to not clash with the new prelude
+ // additions (i.e. convert from dot-call to fully-qualified call)
+ self.tcx.struct_span_lint_hir(
+ prelude_or_array_lint,
+ call_expr.hir_id,
+ call_expr.span,
+ format!("trait method `{}` will become ambiguous in Rust 2021", segment.ident.name),
+ |lint| {
+ let sp = call_expr.span;
+ let trait_name = self.trait_path_or_bare_name(
+ span,
+ call_expr.hir_id,
+ pick.item.container_id(self.tcx),
+ );
+
+ let (self_adjusted, precise) = self.adjust_expr(pick, self_expr, sp);
+ if precise {
+ let args = args
+ .iter()
+ .map(|arg| {
+ let span = arg.span.find_ancestor_inside(sp).unwrap_or_default();
+ format!(
+ ", {}",
+ self.sess().source_map().span_to_snippet(span).unwrap()
+ )
+ })
+ .collect::<String>();
+
+ lint.span_suggestion(
+ sp,
+ "disambiguate the associated function",
+ format!(
+ "{}::{}{}({}{})",
+ trait_name,
+ segment.ident.name,
+ if let Some(args) = segment.args.as_ref().and_then(|args| self
+ .sess()
+ .source_map()
+ .span_to_snippet(args.span_ext)
+ .ok())
+ {
+ // Keep turbofish.
+ format!("::{}", args)
+ } else {
+ String::new()
+ },
+ self_adjusted,
+ args,
+ ),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ lint.span_help(
+ sp,
+ &format!(
+ "disambiguate the associated function with `{}::{}(...)`",
+ trait_name, segment.ident,
+ ),
+ );
+ }
+
+ lint
+ },
+ );
+ }
+ }
+
+ pub(super) fn lint_fully_qualified_call_from_2018(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ self_ty_span: Span,
+ expr_id: hir::HirId,
+ pick: &Pick<'tcx>,
+ ) {
+ // Rust 2021 and later is already using the new prelude
+ if span.rust_2021() {
+ return;
+ }
+
+ // These are the fully qualified methods added to prelude in Rust 2021
+ if !matches!(method_name.name, sym::try_into | sym::try_from | sym::from_iter) {
+ return;
+ }
+
+ // No need to lint if method came from std/core, as that will now be in the prelude
+ if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) {
+ return;
+ }
+
+ // For from_iter, check if the type actually implements FromIterator.
+ // If we know it does not, we don't need to warn.
+ if method_name.name == sym::from_iter {
+ if let Some(trait_def_id) = self.tcx.get_diagnostic_item(sym::FromIterator) {
+ if !self
+ .infcx
+ .type_implements_trait(
+ trait_def_id,
+ self_ty,
+ InternalSubsts::empty(),
+ self.param_env,
+ )
+ .may_apply()
+ {
+ return;
+ }
+ }
+ }
+
+ // No need to lint if this is an inherent method called on a specific type, like `Vec::foo(...)`,
+ // since such methods take precedence over trait methods.
+ if matches!(pick.kind, probe::PickKind::InherentImplPick) {
+ return;
+ }
+
+ self.tcx.struct_span_lint_hir(
+ RUST_2021_PRELUDE_COLLISIONS,
+ expr_id,
+ span,
+ format!(
+ "trait-associated function `{}` will become ambiguous in Rust 2021",
+ method_name.name
+ ),
+ |lint| {
+ // "type" refers to either a type or, more likely, a trait from which
+ // the associated function or method is from.
+ let container_id = pick.item.container_id(self.tcx);
+ let trait_path = self.trait_path_or_bare_name(span, expr_id, container_id);
+ let trait_generics = self.tcx.generics_of(container_id);
+
+ let trait_name = if trait_generics.params.len() <= trait_generics.has_self as usize
+ {
+ trait_path
+ } else {
+ let counts = trait_generics.own_counts();
+ format!(
+ "{}<{}>",
+ trait_path,
+ std::iter::repeat("'_")
+ .take(counts.lifetimes)
+ .chain(std::iter::repeat("_").take(
+ counts.types + counts.consts - trait_generics.has_self as usize
+ ))
+ .collect::<Vec<_>>()
+ .join(", ")
+ )
+ };
+
+ let mut self_ty_name = self_ty_span
+ .find_ancestor_inside(span)
+ .and_then(|span| self.sess().source_map().span_to_snippet(span).ok())
+ .unwrap_or_else(|| self_ty.to_string());
+
+ // Get the number of generics the self type has (if an Adt) unless we can determine that
+ // the user has written the self type with generics already which we (naively) do by looking
+ // for a "<" in `self_ty_name`.
+ if !self_ty_name.contains('<') {
+ if let Adt(def, _) = self_ty.kind() {
+ let generics = self.tcx.generics_of(def.did());
+ if !generics.params.is_empty() {
+ let counts = generics.own_counts();
+ self_ty_name += &format!(
+ "<{}>",
+ std::iter::repeat("'_")
+ .take(counts.lifetimes)
+ .chain(
+ std::iter::repeat("_").take(counts.types + counts.consts)
+ )
+ .collect::<Vec<_>>()
+ .join(", ")
+ );
+ }
+ }
+ }
+ lint.span_suggestion(
+ span,
+ "disambiguate the associated function",
+ format!("<{} as {}>::{}", self_ty_name, trait_name, method_name.name,),
+ Applicability::MachineApplicable,
+ );
+
+ lint
+ },
+ );
+ }
+
+ fn trait_path_or_bare_name(
+ &self,
+ span: Span,
+ expr_hir_id: HirId,
+ trait_def_id: DefId,
+ ) -> String {
+ self.trait_path(span, expr_hir_id, trait_def_id).unwrap_or_else(|| {
+ let key = self.tcx.def_key(trait_def_id);
+ format!("{}", key.disambiguated_data.data)
+ })
+ }
+
+ fn trait_path(&self, span: Span, expr_hir_id: HirId, trait_def_id: DefId) -> Option<String> {
+ let applicable_traits = self.tcx.in_scope_traits(expr_hir_id)?;
+ let applicable_trait = applicable_traits.iter().find(|t| t.def_id == trait_def_id)?;
+ if applicable_trait.import_ids.is_empty() {
+ // The trait was declared within the module, we only need to use its name.
+ return None;
+ }
+
+ let import_items: Vec<_> = applicable_trait
+ .import_ids
+ .iter()
+ .map(|&import_id| self.tcx.hir().expect_item(import_id))
+ .collect();
+
+ // Find an identifier with which this trait was imported (note that `_` doesn't count).
+ let any_id = import_items
+ .iter()
+ .filter_map(|item| if item.ident.name != Underscore { Some(item.ident) } else { None })
+ .next();
+ if let Some(any_id) = any_id {
+ if any_id.name == Empty {
+ // Glob import, so just use its name.
+ return None;
+ } else {
+ return Some(format!("{}", any_id));
+ }
+ }
+
+ // All that is left is `_`! We need to use the full path. It doesn't matter which one we pick,
+ // so just take the first one.
+ match import_items[0].kind {
+ ItemKind::Use(path, _) => Some(
+ path.segments
+ .iter()
+ .map(|segment| segment.ident.to_string())
+ .collect::<Vec<_>>()
+ .join("::"),
+ ),
+ _ => {
+ span_bug!(span, "unexpected item kind, expected a use: {:?}", import_items[0].kind);
+ }
+ }
+ }
+
+ /// Creates a string version of the `expr` that includes explicit adjustments.
+ /// Returns the string and also a bool indicating whether this is a *precise*
+ /// suggestion.
+ fn adjust_expr(
+ &self,
+ pick: &Pick<'tcx>,
+ expr: &hir::Expr<'tcx>,
+ outer: Span,
+ ) -> (String, bool) {
+ let derefs = "*".repeat(pick.autoderefs);
+
+ let autoref = match pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl: Mutability::Mut, .. }) => "&mut ",
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl: Mutability::Not, .. }) => "&",
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) | None => "",
+ };
+
+ let (expr_text, precise) = if let Some(expr_text) = expr
+ .span
+ .find_ancestor_inside(outer)
+ .and_then(|span| self.sess().source_map().span_to_snippet(span).ok())
+ {
+ (expr_text, true)
+ } else {
+ ("(..)".to_string(), false)
+ };
+
+ let adjusted_text = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
+ pick.autoref_or_ptr_adjustment
+ {
+ format!("{}{} as *const _", derefs, expr_text)
+ } else {
+ format!("{}{}{}", autoref, derefs, expr_text)
+ };
+
+ (adjusted_text, precise)
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
new file mode 100644
index 000000000..28aa2302f
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -0,0 +1,1926 @@
+use super::suggest;
+use super::CandidateSource;
+use super::MethodError;
+use super::NoMatchData;
+
+use crate::errors::MethodCallOnUnknownType;
+use crate::FnCtxt;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def::Namespace;
+use rustc_infer::infer::canonical::OriginalQueryValues;
+use rustc_infer::infer::canonical::{Canonical, QueryResponse};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{self, InferOk, TyCtxtInferExt};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::middle::stability;
+use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
+use rustc_middle::ty::GenericParamDefKind;
+use rustc_middle::ty::{self, ParamEnvAnd, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_session::lint;
+use rustc_span::def_id::DefId;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::lev_distance::{
+ find_best_match_for_name_with_substrings, lev_distance_with_substrings,
+};
+use rustc_span::symbol::sym;
+use rustc_span::{symbol::Ident, Span, Symbol, DUMMY_SP};
+use rustc_trait_selection::autoderef::{self, Autoderef};
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+use rustc_trait_selection::traits::query::method_autoderef::MethodAutoderefBadTy;
+use rustc_trait_selection::traits::query::method_autoderef::{
+ CandidateStep, MethodAutoderefStepsResult,
+};
+use rustc_trait_selection::traits::query::CanonicalTyGoal;
+use rustc_trait_selection::traits::{self, ObligationCause};
+use std::cmp::max;
+use std::iter;
+use std::mem;
+use std::ops::Deref;
+
+use smallvec::{smallvec, SmallVec};
+
+use self::CandidateKind::*;
+pub use self::PickKind::*;
+
+/// Boolean flag used to indicate if this search is for a suggestion
+/// or not. If true, we can allow ambiguity and so forth.
+#[derive(Clone, Copy, Debug)]
+pub struct IsSuggestion(pub bool);
+
+struct ProbeContext<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ mode: Mode,
+ method_name: Option<Ident>,
+ return_type: Option<Ty<'tcx>>,
+
+ /// This is the OriginalQueryValues for the steps queries
+ /// that are answered in steps.
+ orig_steps_var_values: OriginalQueryValues<'tcx>,
+ steps: &'tcx [CandidateStep<'tcx>],
+
+ inherent_candidates: Vec<Candidate<'tcx>>,
+ extension_candidates: Vec<Candidate<'tcx>>,
+ impl_dups: FxHashSet<DefId>,
+
+ /// Collects near misses when the candidate functions are missing a `self` keyword and is only
+ /// used for error reporting
+ static_candidates: Vec<CandidateSource>,
+
+ /// When probing for names, include names that are close to the
+ /// requested name (by Levensthein distance)
+ allow_similar_names: bool,
+
+ /// Some(candidate) if there is a private candidate
+ private_candidate: Option<(DefKind, DefId)>,
+
+ /// Collects near misses when trait bounds for type parameters are unsatisfied and is only used
+ /// for error reporting
+ unsatisfied_predicates:
+ Vec<(ty::Predicate<'tcx>, Option<ty::Predicate<'tcx>>, Option<ObligationCause<'tcx>>)>,
+
+ is_suggestion: IsSuggestion,
+
+ scope_expr_id: hir::HirId,
+}
+
+impl<'a, 'tcx> Deref for ProbeContext<'a, 'tcx> {
+ type Target = FnCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ self.fcx
+ }
+}
+
+#[derive(Debug, Clone)]
+struct Candidate<'tcx> {
+ // Candidates are (I'm not quite sure, but they are mostly) basically
+ // some metadata on top of a `ty::AssocItem` (without substs).
+ //
+ // However, method probing wants to be able to evaluate the predicates
+ // for a function with the substs applied - for example, if a function
+ // has `where Self: Sized`, we don't want to consider it unless `Self`
+ // is actually `Sized`, and similarly, return-type suggestions want
+ // to consider the "actual" return type.
+ //
+ // The way this is handled is through `xform_self_ty`. It contains
+ // the receiver type of this candidate, but `xform_self_ty`,
+ // `xform_ret_ty` and `kind` (which contains the predicates) have the
+ // generic parameters of this candidate substituted with the *same set*
+ // of inference variables, which acts as some weird sort of "query".
+ //
+ // When we check out a candidate, we require `xform_self_ty` to be
+ // a subtype of the passed-in self-type, and this equates the type
+ // variables in the rest of the fields.
+ //
+ // For example, if we have this candidate:
+ // ```
+ // trait Foo {
+ // fn foo(&self) where Self: Sized;
+ // }
+ // ```
+ //
+ // Then `xform_self_ty` will be `&'erased ?X` and `kind` will contain
+ // the predicate `?X: Sized`, so if we are evaluating `Foo` for a
+ // the receiver `&T`, we'll do the subtyping which will make `?X`
+ // get the right value, then when we evaluate the predicate we'll check
+ // if `T: Sized`.
+ xform_self_ty: Ty<'tcx>,
+ xform_ret_ty: Option<Ty<'tcx>>,
+ item: ty::AssocItem,
+ kind: CandidateKind<'tcx>,
+ import_ids: SmallVec<[LocalDefId; 1]>,
+}
+
+#[derive(Debug, Clone)]
+enum CandidateKind<'tcx> {
+ InherentImplCandidate(
+ SubstsRef<'tcx>,
+ // Normalize obligations
+ Vec<traits::PredicateObligation<'tcx>>,
+ ),
+ ObjectCandidate,
+ TraitCandidate(ty::TraitRef<'tcx>),
+ WhereClauseCandidate(
+ // Trait
+ ty::PolyTraitRef<'tcx>,
+ ),
+}
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+enum ProbeResult {
+ NoMatch,
+ BadReturnType,
+ Match,
+}
+
+/// When adjusting a receiver we often want to do one of
+///
+/// - Add a `&` (or `&mut`), converting the receiver from `T` to `&T` (or `&mut T`)
+/// - If the receiver has type `*mut T`, convert it to `*const T`
+///
+/// This type tells us which one to do.
+///
+/// Note that in principle we could do both at the same time. For example, when the receiver has
+/// type `T`, we could autoref it to `&T`, then convert to `*const T`. Or, when it has type `*mut
+/// T`, we could convert it to `*const T`, then autoref to `&*const T`. However, currently we do
+/// (at most) one of these. Either the receiver has type `T` and we convert it to `&T` (or with
+/// `mut`), or it has type `*mut T` and we convert it to `*const T`.
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum AutorefOrPtrAdjustment {
+ /// Receiver has type `T`, add `&` or `&mut` (it `T` is `mut`), and maybe also "unsize" it.
+ /// Unsizing is used to convert a `[T; N]` to `[T]`, which only makes sense when autorefing.
+ Autoref {
+ mutbl: hir::Mutability,
+
+ /// Indicates that the source expression should be "unsized" to a target type.
+ /// This is special-cased for just arrays unsizing to slices.
+ unsize: bool,
+ },
+ /// Receiver has type `*mut T`, convert to `*const T`
+ ToConstPtr,
+}
+
+impl AutorefOrPtrAdjustment {
+ fn get_unsize(&self) -> bool {
+ match self {
+ AutorefOrPtrAdjustment::Autoref { mutbl: _, unsize } => *unsize,
+ AutorefOrPtrAdjustment::ToConstPtr => false,
+ }
+ }
+}
+
+#[derive(Debug, PartialEq, Clone)]
+pub struct Pick<'tcx> {
+ pub item: ty::AssocItem,
+ pub kind: PickKind<'tcx>,
+ pub import_ids: SmallVec<[LocalDefId; 1]>,
+
+ /// Indicates that the source expression should be autoderef'd N times
+ /// ```ignore (not-rust)
+ /// A = expr | *expr | **expr | ...
+ /// ```
+ pub autoderefs: usize,
+
+ /// Indicates that we want to add an autoref (and maybe also unsize it), or if the receiver is
+ /// `*mut T`, convert it to `*const T`.
+ pub autoref_or_ptr_adjustment: Option<AutorefOrPtrAdjustment>,
+ pub self_ty: Ty<'tcx>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum PickKind<'tcx> {
+ InherentImplPick,
+ ObjectPick,
+ TraitPick,
+ WhereClausePick(
+ // Trait
+ ty::PolyTraitRef<'tcx>,
+ ),
+}
+
+pub type PickResult<'tcx> = Result<Pick<'tcx>, MethodError<'tcx>>;
+
+#[derive(PartialEq, Eq, Copy, Clone, Debug)]
+pub enum Mode {
+ // An expression of the form `receiver.method_name(...)`.
+ // Autoderefs are performed on `receiver`, lookup is done based on the
+ // `self` argument of the method, and static methods aren't considered.
+ MethodCall,
+ // An expression of the form `Type::item` or `<T>::item`.
+ // No autoderefs are performed, lookup is done based on the type each
+ // implementation is for, and static methods are included.
+ Path,
+}
+
+#[derive(PartialEq, Eq, Copy, Clone, Debug)]
+pub enum ProbeScope {
+ // Assemble candidates coming only from traits in scope.
+ TraitsInScope,
+
+ // Assemble candidates coming from all traits.
+ AllTraits,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// This is used to offer suggestions to users. It returns methods
+ /// that could have been called which have the desired return
+ /// type. Some effort is made to rule out methods that, if called,
+ /// would result in an error (basically, the same criteria we
+ /// would use to decide if a method is a plausible fit for
+ /// ambiguity purposes).
+ #[instrument(level = "debug", skip(self, candidate_filter))]
+ pub fn probe_for_return_type(
+ &self,
+ span: Span,
+ mode: Mode,
+ return_type: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: hir::HirId,
+ candidate_filter: impl Fn(&ty::AssocItem) -> bool,
+ ) -> Vec<ty::AssocItem> {
+ let method_names = self
+ .probe_op(
+ span,
+ mode,
+ None,
+ Some(return_type),
+ IsSuggestion(true),
+ self_ty,
+ scope_expr_id,
+ ProbeScope::AllTraits,
+ |probe_cx| Ok(probe_cx.candidate_method_names(candidate_filter)),
+ )
+ .unwrap_or_default();
+ method_names
+ .iter()
+ .flat_map(|&method_name| {
+ self.probe_op(
+ span,
+ mode,
+ Some(method_name),
+ Some(return_type),
+ IsSuggestion(true),
+ self_ty,
+ scope_expr_id,
+ ProbeScope::AllTraits,
+ |probe_cx| probe_cx.pick(),
+ )
+ .ok()
+ .map(|pick| pick.item)
+ })
+ .collect()
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub fn probe_for_name(
+ &self,
+ span: Span,
+ mode: Mode,
+ item_name: Ident,
+ is_suggestion: IsSuggestion,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: hir::HirId,
+ scope: ProbeScope,
+ ) -> PickResult<'tcx> {
+ self.probe_op(
+ span,
+ mode,
+ Some(item_name),
+ None,
+ is_suggestion,
+ self_ty,
+ scope_expr_id,
+ scope,
+ |probe_cx| probe_cx.pick(),
+ )
+ }
+
+ fn probe_op<OP, R>(
+ &'a self,
+ span: Span,
+ mode: Mode,
+ method_name: Option<Ident>,
+ return_type: Option<Ty<'tcx>>,
+ is_suggestion: IsSuggestion,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: hir::HirId,
+ scope: ProbeScope,
+ op: OP,
+ ) -> Result<R, MethodError<'tcx>>
+ where
+ OP: FnOnce(ProbeContext<'a, 'tcx>) -> Result<R, MethodError<'tcx>>,
+ {
+ let mut orig_values = OriginalQueryValues::default();
+ let param_env_and_self_ty = self.canonicalize_query(
+ ParamEnvAnd { param_env: self.param_env, value: self_ty },
+ &mut orig_values,
+ );
+
+ let steps = if mode == Mode::MethodCall {
+ self.tcx.method_autoderef_steps(param_env_and_self_ty)
+ } else {
+ self.probe(|_| {
+ // Mode::Path - the deref steps is "trivial". This turns
+ // our CanonicalQuery into a "trivial" QueryResponse. This
+ // is a bit inefficient, but I don't think that writing
+ // special handling for this "trivial case" is a good idea.
+
+ let infcx = &self.infcx;
+ let (ParamEnvAnd { param_env: _, value: self_ty }, canonical_inference_vars) =
+ infcx.instantiate_canonical_with_fresh_inference_vars(
+ span,
+ &param_env_and_self_ty,
+ );
+ debug!(
+ "probe_op: Mode::Path, param_env_and_self_ty={:?} self_ty={:?}",
+ param_env_and_self_ty, self_ty
+ );
+ MethodAutoderefStepsResult {
+ steps: infcx.tcx.arena.alloc_from_iter([CandidateStep {
+ self_ty: self.make_query_response_ignoring_pending_obligations(
+ canonical_inference_vars,
+ self_ty,
+ ),
+ autoderefs: 0,
+ from_unsafe_deref: false,
+ unsize: false,
+ }]),
+ opt_bad_ty: None,
+ reached_recursion_limit: false,
+ }
+ })
+ };
+
+ // If our autoderef loop had reached the recursion limit,
+ // report an overflow error, but continue going on with
+ // the truncated autoderef list.
+ if steps.reached_recursion_limit {
+ self.probe(|_| {
+ let ty = &steps
+ .steps
+ .last()
+ .unwrap_or_else(|| span_bug!(span, "reached the recursion limit in 0 steps?"))
+ .self_ty;
+ let ty = self
+ .probe_instantiate_query_response(span, &orig_values, ty)
+ .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty));
+ autoderef::report_autoderef_recursion_limit_error(self.tcx, span, ty.value);
+ });
+ }
+
+ // If we encountered an `_` type or an error type during autoderef, this is
+ // ambiguous.
+ if let Some(bad_ty) = &steps.opt_bad_ty {
+ if is_suggestion.0 {
+ // Ambiguity was encountered during a suggestion. Just keep going.
+ debug!("ProbeContext: encountered ambiguity in suggestion");
+ } else if bad_ty.reached_raw_pointer && !self.tcx.features().arbitrary_self_types {
+ // this case used to be allowed by the compiler,
+ // so we do a future-compat lint here for the 2015 edition
+ // (see https://github.com/rust-lang/rust/issues/46906)
+ if self.tcx.sess.rust_2018() {
+ self.tcx.sess.emit_err(MethodCallOnUnknownType { span });
+ } else {
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::TYVAR_BEHIND_RAW_POINTER,
+ scope_expr_id,
+ span,
+ "type annotations needed",
+ |lint| lint,
+ );
+ }
+ } else {
+ // Encountered a real ambiguity, so abort the lookup. If `ty` is not
+ // an `Err`, report the right "type annotations needed" error pointing
+ // to it.
+ let ty = &bad_ty.ty;
+ let ty = self
+ .probe_instantiate_query_response(span, &orig_values, ty)
+ .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty));
+ let ty = self.structurally_resolved_type(span, ty.value);
+ assert!(matches!(ty.kind(), ty::Error(_)));
+ return Err(MethodError::NoMatch(NoMatchData {
+ static_candidates: Vec::new(),
+ unsatisfied_predicates: Vec::new(),
+ out_of_scope_traits: Vec::new(),
+ lev_candidate: None,
+ mode,
+ }));
+ }
+ }
+
+ debug!("ProbeContext: steps for self_ty={:?} are {:?}", self_ty, steps);
+
+ // this creates one big transaction so that all type variables etc
+ // that we create during the probe process are removed later
+ self.probe(|_| {
+ let mut probe_cx = ProbeContext::new(
+ self,
+ span,
+ mode,
+ method_name,
+ return_type,
+ orig_values,
+ steps.steps,
+ is_suggestion,
+ scope_expr_id,
+ );
+
+ probe_cx.assemble_inherent_candidates();
+ match scope {
+ ProbeScope::TraitsInScope => {
+ probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)
+ }
+ ProbeScope::AllTraits => probe_cx.assemble_extension_candidates_for_all_traits(),
+ };
+ op(probe_cx)
+ })
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ providers.method_autoderef_steps = method_autoderef_steps;
+}
+
+fn method_autoderef_steps<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ goal: CanonicalTyGoal<'tcx>,
+) -> MethodAutoderefStepsResult<'tcx> {
+ debug!("method_autoderef_steps({:?})", goal);
+
+ let (ref infcx, goal, inference_vars) = tcx.infer_ctxt().build_with_canonical(DUMMY_SP, &goal);
+ let ParamEnvAnd { param_env, value: self_ty } = goal;
+
+ let mut autoderef =
+ Autoderef::new(infcx, param_env, hir::CRATE_HIR_ID, DUMMY_SP, self_ty, DUMMY_SP)
+ .include_raw_pointers()
+ .silence_errors();
+ let mut reached_raw_pointer = false;
+ let mut steps: Vec<_> = autoderef
+ .by_ref()
+ .map(|(ty, d)| {
+ let step = CandidateStep {
+ self_ty: infcx
+ .make_query_response_ignoring_pending_obligations(inference_vars.clone(), ty),
+ autoderefs: d,
+ from_unsafe_deref: reached_raw_pointer,
+ unsize: false,
+ };
+ if let ty::RawPtr(_) = ty.kind() {
+ // all the subsequent steps will be from_unsafe_deref
+ reached_raw_pointer = true;
+ }
+ step
+ })
+ .collect();
+
+ let final_ty = autoderef.final_ty(true);
+ let opt_bad_ty = match final_ty.kind() {
+ ty::Infer(ty::TyVar(_)) | ty::Error(_) => Some(MethodAutoderefBadTy {
+ reached_raw_pointer,
+ ty: infcx.make_query_response_ignoring_pending_obligations(inference_vars, final_ty),
+ }),
+ ty::Array(elem_ty, _) => {
+ let dereferences = steps.len() - 1;
+
+ steps.push(CandidateStep {
+ self_ty: infcx.make_query_response_ignoring_pending_obligations(
+ inference_vars,
+ infcx.tcx.mk_slice(*elem_ty),
+ ),
+ autoderefs: dereferences,
+ // this could be from an unsafe deref if we had
+ // a *mut/const [T; N]
+ from_unsafe_deref: reached_raw_pointer,
+ unsize: true,
+ });
+
+ None
+ }
+ _ => None,
+ };
+
+ debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty);
+
+ MethodAutoderefStepsResult {
+ steps: tcx.arena.alloc_from_iter(steps),
+ opt_bad_ty: opt_bad_ty.map(|ty| &*tcx.arena.alloc(ty)),
+ reached_recursion_limit: autoderef.reached_recursion_limit(),
+ }
+}
+
+impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
+ fn new(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ mode: Mode,
+ method_name: Option<Ident>,
+ return_type: Option<Ty<'tcx>>,
+ orig_steps_var_values: OriginalQueryValues<'tcx>,
+ steps: &'tcx [CandidateStep<'tcx>],
+ is_suggestion: IsSuggestion,
+ scope_expr_id: hir::HirId,
+ ) -> ProbeContext<'a, 'tcx> {
+ ProbeContext {
+ fcx,
+ span,
+ mode,
+ method_name,
+ return_type,
+ inherent_candidates: Vec::new(),
+ extension_candidates: Vec::new(),
+ impl_dups: FxHashSet::default(),
+ orig_steps_var_values,
+ steps,
+ static_candidates: Vec::new(),
+ allow_similar_names: false,
+ private_candidate: None,
+ unsatisfied_predicates: Vec::new(),
+ is_suggestion,
+ scope_expr_id,
+ }
+ }
+
+ fn reset(&mut self) {
+ self.inherent_candidates.clear();
+ self.extension_candidates.clear();
+ self.impl_dups.clear();
+ self.static_candidates.clear();
+ self.private_candidate = None;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // CANDIDATE ASSEMBLY
+
+ fn push_candidate(&mut self, candidate: Candidate<'tcx>, is_inherent: bool) {
+ let is_accessible = if let Some(name) = self.method_name {
+ let item = candidate.item;
+ let def_scope = self
+ .tcx
+ .adjust_ident_and_get_scope(name, item.container_id(self.tcx), self.body_id)
+ .1;
+ item.visibility(self.tcx).is_accessible_from(def_scope, self.tcx)
+ } else {
+ true
+ };
+ if is_accessible {
+ if is_inherent {
+ self.inherent_candidates.push(candidate);
+ } else {
+ self.extension_candidates.push(candidate);
+ }
+ } else if self.private_candidate.is_none() {
+ self.private_candidate =
+ Some((candidate.item.kind.as_def_kind(), candidate.item.def_id));
+ }
+ }
+
+ fn assemble_inherent_candidates(&mut self) {
+ for step in self.steps.iter() {
+ self.assemble_probe(&step.self_ty);
+ }
+ }
+
+ fn assemble_probe(&mut self, self_ty: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>) {
+ debug!("assemble_probe: self_ty={:?}", self_ty);
+ let raw_self_ty = self_ty.value.value;
+ match *raw_self_ty.kind() {
+ ty::Dynamic(data, ..) if let Some(p) = data.principal() => {
+ // Subtle: we can't use `instantiate_query_response` here: using it will
+ // commit to all of the type equalities assumed by inference going through
+ // autoderef (see the `method-probe-no-guessing` test).
+ //
+ // However, in this code, it is OK if we end up with an object type that is
+ // "more general" than the object type that we are evaluating. For *every*
+ // object type `MY_OBJECT`, a function call that goes through a trait-ref
+ // of the form `<MY_OBJECT as SuperTraitOf(MY_OBJECT)>::func` is a valid
+ // `ObjectCandidate`, and it should be discoverable "exactly" through one
+ // of the iterations in the autoderef loop, so there is no problem with it
+ // being discoverable in another one of these iterations.
+ //
+ // Using `instantiate_canonical_with_fresh_inference_vars` on our
+ // `Canonical<QueryResponse<Ty<'tcx>>>` and then *throwing away* the
+ // `CanonicalVarValues` will exactly give us such a generalization - it
+ // will still match the original object type, but it won't pollute our
+ // type variables in any form, so just do that!
+ let (QueryResponse { value: generalized_self_ty, .. }, _ignored_var_values) =
+ self.fcx
+ .instantiate_canonical_with_fresh_inference_vars(self.span, self_ty);
+
+ self.assemble_inherent_candidates_from_object(generalized_self_ty);
+ self.assemble_inherent_impl_candidates_for_type(p.def_id());
+ if self.tcx.has_attr(p.def_id(), sym::rustc_has_incoherent_inherent_impls) {
+ self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty);
+ }
+ }
+ ty::Adt(def, _) => {
+ let def_id = def.did();
+ self.assemble_inherent_impl_candidates_for_type(def_id);
+ if self.tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) {
+ self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty);
+ }
+ }
+ ty::Foreign(did) => {
+ self.assemble_inherent_impl_candidates_for_type(did);
+ if self.tcx.has_attr(did, sym::rustc_has_incoherent_inherent_impls) {
+ self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty);
+ }
+ }
+ ty::Param(p) => {
+ self.assemble_inherent_candidates_from_param(p);
+ }
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Never
+ | ty::Tuple(..) => self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty),
+ _ => {}
+ }
+ }
+
+ fn assemble_inherent_candidates_for_incoherent_ty(&mut self, self_ty: Ty<'tcx>) {
+ let Some(simp) = simplify_type(self.tcx, self_ty, TreatParams::AsInfer) else {
+ bug!("unexpected incoherent type: {:?}", self_ty)
+ };
+ for &impl_def_id in self.tcx.incoherent_impls(simp) {
+ self.assemble_inherent_impl_probe(impl_def_id);
+ }
+ }
+
+ fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) {
+ let impl_def_ids = self.tcx.at(self.span).inherent_impls(def_id);
+ for &impl_def_id in impl_def_ids.iter() {
+ self.assemble_inherent_impl_probe(impl_def_id);
+ }
+ }
+
+ fn assemble_inherent_impl_probe(&mut self, impl_def_id: DefId) {
+ if !self.impl_dups.insert(impl_def_id) {
+ return; // already visited
+ }
+
+ debug!("assemble_inherent_impl_probe {:?}", impl_def_id);
+
+ for item in self.impl_or_trait_item(impl_def_id) {
+ if !self.has_applicable_self(&item) {
+ // No receiver declared. Not a candidate.
+ self.record_static_candidate(CandidateSource::Impl(impl_def_id));
+ continue;
+ }
+
+ let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
+ let impl_ty = impl_ty.subst(self.tcx, impl_substs);
+
+ debug!("impl_ty: {:?}", impl_ty);
+
+ // Determine the receiver type that the method itself expects.
+ let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(&item, impl_ty, impl_substs);
+ debug!("xform_self_ty: {:?}, xform_ret_ty: {:?}", xform_self_ty, xform_ret_ty);
+
+ // We can't use normalize_associated_types_in as it will pollute the
+ // fcx's fulfillment context after this probe is over.
+ // Note: we only normalize `xform_self_ty` here since the normalization
+ // of the return type can lead to inference results that prohibit
+ // valid candidates from being found, see issue #85671
+ // FIXME Postponing the normalization of the return type likely only hides a deeper bug,
+ // which might be caused by the `param_env` itself. The clauses of the `param_env`
+ // maybe shouldn't include `Param`s, but rather fresh variables or be canonicalized,
+ // see issue #89650
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let selcx = &mut traits::SelectionContext::new(self.fcx);
+ let traits::Normalized { value: xform_self_ty, obligations } =
+ traits::normalize(selcx, self.param_env, cause, xform_self_ty);
+ debug!(
+ "assemble_inherent_impl_probe after normalization: xform_self_ty = {:?}/{:?}",
+ xform_self_ty, xform_ret_ty
+ );
+
+ self.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ kind: InherentImplCandidate(impl_substs, obligations),
+ import_ids: smallvec![],
+ },
+ true,
+ );
+ }
+ }
+
+ fn assemble_inherent_candidates_from_object(&mut self, self_ty: Ty<'tcx>) {
+ debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty);
+
+ let principal = match self_ty.kind() {
+ ty::Dynamic(ref data, ..) => Some(data),
+ _ => None,
+ }
+ .and_then(|data| data.principal())
+ .unwrap_or_else(|| {
+ span_bug!(
+ self.span,
+ "non-object {:?} in assemble_inherent_candidates_from_object",
+ self_ty
+ )
+ });
+
+ // It is illegal to invoke a method on a trait instance that refers to
+ // the `Self` type. An [`ObjectSafetyViolation::SupertraitSelf`] error
+ // will be reported by `object_safety.rs` if the method refers to the
+ // `Self` type anywhere other than the receiver. Here, we use a
+ // substitution that replaces `Self` with the object type itself. Hence,
+ // a `&self` method will wind up with an argument type like `&dyn Trait`.
+ let trait_ref = principal.with_self_ty(self.tcx, self_ty);
+ self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| {
+ let new_trait_ref = this.erase_late_bound_regions(new_trait_ref);
+
+ let (xform_self_ty, xform_ret_ty) =
+ this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs);
+ this.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ kind: ObjectCandidate,
+ import_ids: smallvec![],
+ },
+ true,
+ );
+ });
+ }
+
+ fn assemble_inherent_candidates_from_param(&mut self, param_ty: ty::ParamTy) {
+ // FIXME: do we want to commit to this behavior for param bounds?
+ debug!("assemble_inherent_candidates_from_param(param_ty={:?})", param_ty);
+
+ let bounds = self.param_env.caller_bounds().iter().filter_map(|predicate| {
+ let bound_predicate = predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_predicate) => {
+ match *trait_predicate.trait_ref.self_ty().kind() {
+ ty::Param(p) if p == param_ty => {
+ Some(bound_predicate.rebind(trait_predicate.trait_ref))
+ }
+ _ => None,
+ }
+ }
+ ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::TypeOutlives(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ });
+
+ self.elaborate_bounds(bounds, |this, poly_trait_ref, item| {
+ let trait_ref = this.erase_late_bound_regions(poly_trait_ref);
+
+ let (xform_self_ty, xform_ret_ty) =
+ this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs);
+
+ // Because this trait derives from a where-clause, it
+ // should not contain any inference variables or other
+ // artifacts. This means it is safe to put into the
+ // `WhereClauseCandidate` and (eventually) into the
+ // `WhereClausePick`.
+ assert!(!trait_ref.substs.needs_infer());
+
+ this.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ kind: WhereClauseCandidate(poly_trait_ref),
+ import_ids: smallvec![],
+ },
+ true,
+ );
+ });
+ }
+
+ // Do a search through a list of bounds, using a callback to actually
+ // create the candidates.
+ fn elaborate_bounds<F>(
+ &mut self,
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ mut mk_cand: F,
+ ) where
+ F: for<'b> FnMut(&mut ProbeContext<'b, 'tcx>, ty::PolyTraitRef<'tcx>, ty::AssocItem),
+ {
+ let tcx = self.tcx;
+ for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
+ debug!("elaborate_bounds(bound_trait_ref={:?})", bound_trait_ref);
+ for item in self.impl_or_trait_item(bound_trait_ref.def_id()) {
+ if !self.has_applicable_self(&item) {
+ self.record_static_candidate(CandidateSource::Trait(bound_trait_ref.def_id()));
+ } else {
+ mk_cand(self, bound_trait_ref, item);
+ }
+ }
+ }
+ }
+
+ fn assemble_extension_candidates_for_traits_in_scope(&mut self, expr_hir_id: hir::HirId) {
+ let mut duplicates = FxHashSet::default();
+ let opt_applicable_traits = self.tcx.in_scope_traits(expr_hir_id);
+ if let Some(applicable_traits) = opt_applicable_traits {
+ for trait_candidate in applicable_traits.iter() {
+ let trait_did = trait_candidate.def_id;
+ if duplicates.insert(trait_did) {
+ self.assemble_extension_candidates_for_trait(
+ &trait_candidate.import_ids,
+ trait_did,
+ );
+ }
+ }
+ }
+ }
+
+ fn assemble_extension_candidates_for_all_traits(&mut self) {
+ let mut duplicates = FxHashSet::default();
+ for trait_info in suggest::all_traits(self.tcx) {
+ if duplicates.insert(trait_info.def_id) {
+ self.assemble_extension_candidates_for_trait(&smallvec![], trait_info.def_id);
+ }
+ }
+ }
+
+ pub fn matches_return_type(
+ &self,
+ method: &ty::AssocItem,
+ self_ty: Option<Ty<'tcx>>,
+ expected: Ty<'tcx>,
+ ) -> bool {
+ match method.kind {
+ ty::AssocKind::Fn => {
+ let fty = self.tcx.bound_fn_sig(method.def_id);
+ self.probe(|_| {
+ let substs = self.fresh_substs_for_item(self.span, method.def_id);
+ let fty = fty.subst(self.tcx, substs);
+ let fty =
+ self.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, fty);
+
+ if let Some(self_ty) = self_ty {
+ if self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .sup(fty.inputs()[0], self_ty)
+ .is_err()
+ {
+ return false;
+ }
+ }
+ self.can_sub(self.param_env, fty.output(), expected).is_ok()
+ })
+ }
+ _ => false,
+ }
+ }
+
+ fn assemble_extension_candidates_for_trait(
+ &mut self,
+ import_ids: &SmallVec<[LocalDefId; 1]>,
+ trait_def_id: DefId,
+ ) {
+ debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id);
+ let trait_substs = self.fresh_item_substs(trait_def_id);
+ let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs);
+
+ if self.tcx.is_trait_alias(trait_def_id) {
+ // For trait aliases, assume all supertraits are relevant.
+ let bounds = iter::once(ty::Binder::dummy(trait_ref));
+ self.elaborate_bounds(bounds, |this, new_trait_ref, item| {
+ let new_trait_ref = this.erase_late_bound_regions(new_trait_ref);
+
+ let (xform_self_ty, xform_ret_ty) =
+ this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs);
+ this.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ import_ids: import_ids.clone(),
+ kind: TraitCandidate(new_trait_ref),
+ },
+ false,
+ );
+ });
+ } else {
+ debug_assert!(self.tcx.is_trait(trait_def_id));
+ for item in self.impl_or_trait_item(trait_def_id) {
+ // Check whether `trait_def_id` defines a method with suitable name.
+ if !self.has_applicable_self(&item) {
+ debug!("method has inapplicable self");
+ self.record_static_candidate(CandidateSource::Trait(trait_def_id));
+ continue;
+ }
+
+ let (xform_self_ty, xform_ret_ty) =
+ self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs);
+ self.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ import_ids: import_ids.clone(),
+ kind: TraitCandidate(trait_ref),
+ },
+ false,
+ );
+ }
+ }
+ }
+
+ fn candidate_method_names(
+ &self,
+ candidate_filter: impl Fn(&ty::AssocItem) -> bool,
+ ) -> Vec<Ident> {
+ let mut set = FxHashSet::default();
+ let mut names: Vec<_> = self
+ .inherent_candidates
+ .iter()
+ .chain(&self.extension_candidates)
+ .filter(|candidate| candidate_filter(&candidate.item))
+ .filter(|candidate| {
+ if let Some(return_ty) = self.return_type {
+ self.matches_return_type(&candidate.item, None, return_ty)
+ } else {
+ true
+ }
+ })
+ .map(|candidate| candidate.item.ident(self.tcx))
+ .filter(|&name| set.insert(name))
+ .collect();
+
+ // Sort them by the name so we have a stable result.
+ names.sort_by(|a, b| a.as_str().partial_cmp(b.as_str()).unwrap());
+ names
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // THE ACTUAL SEARCH
+
+ fn pick(mut self) -> PickResult<'tcx> {
+ assert!(self.method_name.is_some());
+
+ if let Some(r) = self.pick_core() {
+ return r;
+ }
+
+ debug!("pick: actual search failed, assemble diagnostics");
+
+ let static_candidates = mem::take(&mut self.static_candidates);
+ let private_candidate = self.private_candidate.take();
+ let unsatisfied_predicates = mem::take(&mut self.unsatisfied_predicates);
+
+ // things failed, so lets look at all traits, for diagnostic purposes now:
+ self.reset();
+
+ let span = self.span;
+ let tcx = self.tcx;
+
+ self.assemble_extension_candidates_for_all_traits();
+
+ let out_of_scope_traits = match self.pick_core() {
+ Some(Ok(p)) => vec![p.item.container_id(self.tcx)],
+ //Some(Ok(p)) => p.iter().map(|p| p.item.container().id()).collect(),
+ Some(Err(MethodError::Ambiguity(v))) => v
+ .into_iter()
+ .map(|source| match source {
+ CandidateSource::Trait(id) => id,
+ CandidateSource::Impl(impl_id) => match tcx.trait_id_of_impl(impl_id) {
+ Some(id) => id,
+ None => span_bug!(span, "found inherent method when looking at traits"),
+ },
+ })
+ .collect(),
+ Some(Err(MethodError::NoMatch(NoMatchData {
+ out_of_scope_traits: others, ..
+ }))) => {
+ assert!(others.is_empty());
+ vec![]
+ }
+ _ => vec![],
+ };
+
+ if let Some((kind, def_id)) = private_candidate {
+ return Err(MethodError::PrivateMatch(kind, def_id, out_of_scope_traits));
+ }
+ let lev_candidate = self.probe_for_lev_candidate()?;
+
+ Err(MethodError::NoMatch(NoMatchData {
+ static_candidates,
+ unsatisfied_predicates,
+ out_of_scope_traits,
+ lev_candidate,
+ mode: self.mode,
+ }))
+ }
+
+ fn pick_core(&mut self) -> Option<PickResult<'tcx>> {
+ let mut unstable_candidates = Vec::new();
+ let pick = self.pick_all_method(Some(&mut unstable_candidates));
+
+ // In this case unstable picking is done by `pick_method`.
+ if !self.tcx.sess.opts.unstable_opts.pick_stable_methods_before_any_unstable {
+ return pick;
+ }
+
+ match pick {
+ // Emit a lint if there are unstable candidates alongside the stable ones.
+ //
+ // We suppress warning if we're picking the method only because it is a
+ // suggestion.
+ Some(Ok(ref p)) if !self.is_suggestion.0 && !unstable_candidates.is_empty() => {
+ self.emit_unstable_name_collision_hint(p, &unstable_candidates);
+ pick
+ }
+ Some(_) => pick,
+ None => self.pick_all_method(None),
+ }
+ }
+
+ fn pick_all_method(
+ &mut self,
+ mut unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ let steps = self.steps.clone();
+ steps
+ .iter()
+ .filter(|step| {
+ debug!("pick_all_method: step={:?}", step);
+ // skip types that are from a type error or that would require dereferencing
+ // a raw pointer
+ !step.self_ty.references_error() && !step.from_unsafe_deref
+ })
+ .flat_map(|step| {
+ let InferOk { value: self_ty, obligations: _ } = self
+ .fcx
+ .probe_instantiate_query_response(
+ self.span,
+ &self.orig_steps_var_values,
+ &step.self_ty,
+ )
+ .unwrap_or_else(|_| {
+ span_bug!(self.span, "{:?} was applicable but now isn't?", step.self_ty)
+ });
+ self.pick_by_value_method(step, self_ty, unstable_candidates.as_deref_mut())
+ .or_else(|| {
+ self.pick_autorefd_method(
+ step,
+ self_ty,
+ hir::Mutability::Not,
+ unstable_candidates.as_deref_mut(),
+ )
+ .or_else(|| {
+ self.pick_autorefd_method(
+ step,
+ self_ty,
+ hir::Mutability::Mut,
+ unstable_candidates.as_deref_mut(),
+ )
+ })
+ .or_else(|| {
+ self.pick_const_ptr_method(
+ step,
+ self_ty,
+ unstable_candidates.as_deref_mut(),
+ )
+ })
+ })
+ })
+ .next()
+ }
+
+ /// For each type `T` in the step list, this attempts to find a method where
+ /// the (transformed) self type is exactly `T`. We do however do one
+ /// transformation on the adjustment: if we are passing a region pointer in,
+ /// we will potentially *reborrow* it to a shorter lifetime. This allows us
+ /// to transparently pass `&mut` pointers, in particular, without consuming
+ /// them for their entire lifetime.
+ fn pick_by_value_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ if step.unsize {
+ return None;
+ }
+
+ self.pick_method(self_ty, unstable_candidates).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+
+ // Insert a `&*` or `&mut *` if this is a reference type:
+ if let ty::Ref(_, _, mutbl) = *step.self_ty.value.value.kind() {
+ pick.autoderefs += 1;
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::Autoref {
+ mutbl,
+ unsize: pick.autoref_or_ptr_adjustment.map_or(false, |a| a.get_unsize()),
+ })
+ }
+
+ pick
+ })
+ })
+ }
+
+ fn pick_autorefd_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ mutbl: hir::Mutability,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ let tcx = self.tcx;
+
+ // In general, during probing we erase regions.
+ let region = tcx.lifetimes.re_erased;
+
+ let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut { ty: self_ty, mutbl });
+ self.pick_method(autoref_ty, unstable_candidates).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+ pick.autoref_or_ptr_adjustment =
+ Some(AutorefOrPtrAdjustment::Autoref { mutbl, unsize: step.unsize });
+ pick
+ })
+ })
+ }
+
+ /// If `self_ty` is `*mut T` then this picks `*const T` methods. The reason why we have a
+ /// special case for this is because going from `*mut T` to `*const T` with autoderefs and
+ /// autorefs would require dereferencing the pointer, which is not safe.
+ fn pick_const_ptr_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ // Don't convert an unsized reference to ptr
+ if step.unsize {
+ return None;
+ }
+
+ let &ty::RawPtr(ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut }) = self_ty.kind() else {
+ return None;
+ };
+
+ let const_self_ty = ty::TypeAndMut { ty, mutbl: hir::Mutability::Not };
+ let const_ptr_ty = self.tcx.mk_ptr(const_self_ty);
+ self.pick_method(const_ptr_ty, unstable_candidates).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::ToConstPtr);
+ pick
+ })
+ })
+ }
+
+ fn pick_method_with_unstable(&mut self, self_ty: Ty<'tcx>) -> Option<PickResult<'tcx>> {
+ debug!("pick_method_with_unstable(self_ty={})", self.ty_to_string(self_ty));
+
+ let mut possibly_unsatisfied_predicates = Vec::new();
+ let mut unstable_candidates = Vec::new();
+
+ for (kind, candidates) in
+ &[("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates)]
+ {
+ debug!("searching {} candidates", kind);
+ let res = self.consider_candidates(
+ self_ty,
+ candidates.iter(),
+ &mut possibly_unsatisfied_predicates,
+ Some(&mut unstable_candidates),
+ );
+ if let Some(pick) = res {
+ if !self.is_suggestion.0 && !unstable_candidates.is_empty() {
+ if let Ok(p) = &pick {
+ // Emit a lint if there are unstable candidates alongside the stable ones.
+ //
+ // We suppress warning if we're picking the method only because it is a
+ // suggestion.
+ self.emit_unstable_name_collision_hint(p, &unstable_candidates);
+ }
+ }
+ return Some(pick);
+ }
+ }
+
+ debug!("searching unstable candidates");
+ let res = self.consider_candidates(
+ self_ty,
+ unstable_candidates.iter().map(|(c, _)| c),
+ &mut possibly_unsatisfied_predicates,
+ None,
+ );
+ if res.is_none() {
+ self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates);
+ }
+ res
+ }
+
+ fn pick_method(
+ &mut self,
+ self_ty: Ty<'tcx>,
+ mut unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ if !self.tcx.sess.opts.unstable_opts.pick_stable_methods_before_any_unstable {
+ return self.pick_method_with_unstable(self_ty);
+ }
+
+ debug!("pick_method(self_ty={})", self.ty_to_string(self_ty));
+
+ let mut possibly_unsatisfied_predicates = Vec::new();
+
+ for (kind, candidates) in
+ &[("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates)]
+ {
+ debug!("searching {} candidates", kind);
+ let res = self.consider_candidates(
+ self_ty,
+ candidates.iter(),
+ &mut possibly_unsatisfied_predicates,
+ unstable_candidates.as_deref_mut(),
+ );
+ if let Some(pick) = res {
+ return Some(pick);
+ }
+ }
+
+ // `pick_method` may be called twice for the same self_ty if no stable methods
+ // match. Only extend once.
+ if unstable_candidates.is_some() {
+ self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates);
+ }
+ None
+ }
+
+ fn consider_candidates<'b, ProbesIter>(
+ &self,
+ self_ty: Ty<'tcx>,
+ probes: ProbesIter,
+ possibly_unsatisfied_predicates: &mut Vec<(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )>,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>>
+ where
+ ProbesIter: Iterator<Item = &'b Candidate<'tcx>> + Clone,
+ 'tcx: 'b,
+ {
+ let mut applicable_candidates: Vec<_> = probes
+ .clone()
+ .map(|probe| {
+ (probe, self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates))
+ })
+ .filter(|&(_, status)| status != ProbeResult::NoMatch)
+ .collect();
+
+ debug!("applicable_candidates: {:?}", applicable_candidates);
+
+ if applicable_candidates.len() > 1 {
+ if let Some(pick) =
+ self.collapse_candidates_to_trait_pick(self_ty, &applicable_candidates)
+ {
+ return Some(Ok(pick));
+ }
+ }
+
+ if let Some(uc) = unstable_candidates {
+ applicable_candidates.retain(|&(p, _)| {
+ if let stability::EvalResult::Deny { feature, .. } =
+ self.tcx.eval_stability(p.item.def_id, None, self.span, None)
+ {
+ uc.push((p.clone(), feature));
+ return false;
+ }
+ true
+ });
+ }
+
+ if applicable_candidates.len() > 1 {
+ let sources = probes.map(|p| self.candidate_source(p, self_ty)).collect();
+ return Some(Err(MethodError::Ambiguity(sources)));
+ }
+
+ applicable_candidates.pop().map(|(probe, status)| {
+ if status == ProbeResult::Match {
+ Ok(probe.to_unadjusted_pick(self_ty))
+ } else {
+ Err(MethodError::BadReturnType)
+ }
+ })
+ }
+
+ fn emit_unstable_name_collision_hint(
+ &self,
+ stable_pick: &Pick<'_>,
+ unstable_candidates: &[(Candidate<'tcx>, Symbol)],
+ ) {
+ let def_kind = stable_pick.item.kind.as_def_kind();
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::UNSTABLE_NAME_COLLISIONS,
+ self.scope_expr_id,
+ self.span,
+ format!(
+ "{} {} with this name may be added to the standard library in the future",
+ def_kind.article(),
+ def_kind.descr(stable_pick.item.def_id),
+ ),
+ |lint| {
+ match (stable_pick.item.kind, stable_pick.item.container) {
+ (ty::AssocKind::Fn, _) => {
+ // FIXME: This should be a `span_suggestion` instead of `help`
+ // However `self.span` only
+ // highlights the method name, so we can't use it. Also consider reusing
+ // the code from `report_method_error()`.
+ lint.help(&format!(
+ "call with fully qualified syntax `{}(...)` to keep using the current \
+ method",
+ self.tcx.def_path_str(stable_pick.item.def_id),
+ ));
+ }
+ (ty::AssocKind::Const, ty::AssocItemContainer::TraitContainer) => {
+ let def_id = stable_pick.item.container_id(self.tcx);
+ lint.span_suggestion(
+ self.span,
+ "use the fully qualified path to the associated const",
+ format!(
+ "<{} as {}>::{}",
+ stable_pick.self_ty,
+ self.tcx.def_path_str(def_id),
+ stable_pick.item.name
+ ),
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
+ if self.tcx.sess.is_nightly_build() {
+ for (candidate, feature) in unstable_candidates {
+ lint.help(&format!(
+ "add `#![feature({})]` to the crate attributes to enable `{}`",
+ feature,
+ self.tcx.def_path_str(candidate.item.def_id),
+ ));
+ }
+ }
+
+ lint
+ },
+ );
+ }
+
+ fn select_trait_candidate(
+ &self,
+ trait_ref: ty::TraitRef<'tcx>,
+ ) -> traits::SelectionResult<'tcx, traits::Selection<'tcx>> {
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let predicate = ty::Binder::dummy(trait_ref).to_poly_trait_predicate();
+ let obligation = traits::Obligation::new(cause, self.param_env, predicate);
+ traits::SelectionContext::new(self).select(&obligation)
+ }
+
+ fn candidate_source(&self, candidate: &Candidate<'tcx>, self_ty: Ty<'tcx>) -> CandidateSource {
+ match candidate.kind {
+ InherentImplCandidate(..) => {
+ CandidateSource::Impl(candidate.item.container_id(self.tcx))
+ }
+ ObjectCandidate | WhereClauseCandidate(_) => {
+ CandidateSource::Trait(candidate.item.container_id(self.tcx))
+ }
+ TraitCandidate(trait_ref) => self.probe(|_| {
+ let _ = self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .define_opaque_types(false)
+ .sup(candidate.xform_self_ty, self_ty);
+ match self.select_trait_candidate(trait_ref) {
+ Ok(Some(traits::ImplSource::UserDefined(ref impl_data))) => {
+ // If only a single impl matches, make the error message point
+ // to that impl.
+ CandidateSource::Impl(impl_data.impl_def_id)
+ }
+ _ => CandidateSource::Trait(candidate.item.container_id(self.tcx)),
+ }
+ }),
+ }
+ }
+
+ fn consider_probe(
+ &self,
+ self_ty: Ty<'tcx>,
+ probe: &Candidate<'tcx>,
+ possibly_unsatisfied_predicates: &mut Vec<(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )>,
+ ) -> ProbeResult {
+ debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe);
+
+ self.probe(|_| {
+ // First check that the self type can be related.
+ let sub_obligations = match self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .define_opaque_types(false)
+ .sup(probe.xform_self_ty, self_ty)
+ {
+ Ok(InferOk { obligations, value: () }) => obligations,
+ Err(err) => {
+ debug!("--> cannot relate self-types {:?}", err);
+ return ProbeResult::NoMatch;
+ }
+ };
+
+ let mut result = ProbeResult::Match;
+ let mut xform_ret_ty = probe.xform_ret_ty;
+ debug!(?xform_ret_ty);
+
+ let selcx = &mut traits::SelectionContext::new(self);
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+
+ let mut parent_pred = None;
+
+ // If so, impls may carry other conditions (e.g., where
+ // clauses) that must be considered. Make sure that those
+ // match as well (or at least may match, sometimes we
+ // don't have enough information to fully evaluate).
+ match probe.kind {
+ InherentImplCandidate(ref substs, ref ref_obligations) => {
+ // `xform_ret_ty` hasn't been normalized yet, only `xform_self_ty`,
+ // see the reasons mentioned in the comments in `assemble_inherent_impl_probe`
+ // for why this is necessary
+ let traits::Normalized {
+ value: normalized_xform_ret_ty,
+ obligations: normalization_obligations,
+ } = traits::normalize(selcx, self.param_env, cause.clone(), probe.xform_ret_ty);
+ xform_ret_ty = normalized_xform_ret_ty;
+ debug!("xform_ret_ty after normalization: {:?}", xform_ret_ty);
+
+ // Check whether the impl imposes obligations we have to worry about.
+ let impl_def_id = probe.item.container_id(self.tcx);
+ let impl_bounds = self.tcx.predicates_of(impl_def_id);
+ let impl_bounds = impl_bounds.instantiate(self.tcx, substs);
+ let traits::Normalized { value: impl_bounds, obligations: norm_obligations } =
+ traits::normalize(selcx, self.param_env, cause.clone(), impl_bounds);
+
+ // Convert the bounds into obligations.
+ let impl_obligations = traits::predicates_for_generics(
+ move |_, _| cause.clone(),
+ self.param_env,
+ impl_bounds,
+ );
+
+ let candidate_obligations = impl_obligations
+ .chain(norm_obligations.into_iter())
+ .chain(ref_obligations.iter().cloned())
+ .chain(normalization_obligations.into_iter());
+
+ // Evaluate those obligations to see if they might possibly hold.
+ for o in candidate_obligations {
+ let o = self.resolve_vars_if_possible(o);
+ if !self.predicate_may_hold(&o) {
+ result = ProbeResult::NoMatch;
+ possibly_unsatisfied_predicates.push((
+ o.predicate,
+ None,
+ Some(o.cause),
+ ));
+ }
+ }
+ }
+
+ ObjectCandidate | WhereClauseCandidate(..) => {
+ // These have no additional conditions to check.
+ }
+
+ TraitCandidate(trait_ref) => {
+ if let Some(method_name) = self.method_name {
+ // Some trait methods are excluded for arrays before 2021.
+ // (`array.into_iter()` wants a slice iterator for compatibility.)
+ if self_ty.is_array() && !method_name.span.rust_2021() {
+ let trait_def = self.tcx.trait_def(trait_ref.def_id);
+ if trait_def.skip_array_during_method_dispatch {
+ return ProbeResult::NoMatch;
+ }
+ }
+ }
+ let predicate =
+ ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx);
+ parent_pred = Some(predicate);
+ let obligation = traits::Obligation::new(cause, self.param_env, predicate);
+ if !self.predicate_may_hold(&obligation) {
+ result = ProbeResult::NoMatch;
+ if self.probe(|_| {
+ match self.select_trait_candidate(trait_ref) {
+ Err(_) => return true,
+ Ok(Some(impl_source))
+ if !impl_source.borrow_nested_obligations().is_empty() =>
+ {
+ for obligation in impl_source.borrow_nested_obligations() {
+ // Determine exactly which obligation wasn't met, so
+ // that we can give more context in the error.
+ if !self.predicate_may_hold(obligation) {
+ let nested_predicate =
+ self.resolve_vars_if_possible(obligation.predicate);
+ let predicate =
+ self.resolve_vars_if_possible(predicate);
+ let p = if predicate == nested_predicate {
+ // Avoid "`MyStruct: Foo` which is required by
+ // `MyStruct: Foo`" in E0599.
+ None
+ } else {
+ Some(predicate)
+ };
+ possibly_unsatisfied_predicates.push((
+ nested_predicate,
+ p,
+ Some(obligation.cause.clone()),
+ ));
+ }
+ }
+ }
+ _ => {
+ // Some nested subobligation of this predicate
+ // failed.
+ let predicate = self.resolve_vars_if_possible(predicate);
+ possibly_unsatisfied_predicates.push((predicate, None, None));
+ }
+ }
+ false
+ }) {
+ // This candidate's primary obligation doesn't even
+ // select - don't bother registering anything in
+ // `potentially_unsatisfied_predicates`.
+ return ProbeResult::NoMatch;
+ }
+ }
+ }
+ }
+
+ // Evaluate those obligations to see if they might possibly hold.
+ for o in sub_obligations {
+ let o = self.resolve_vars_if_possible(o);
+ if !self.predicate_may_hold(&o) {
+ result = ProbeResult::NoMatch;
+ possibly_unsatisfied_predicates.push((o.predicate, parent_pred, Some(o.cause)));
+ }
+ }
+
+ if let ProbeResult::Match = result {
+ if let (Some(return_ty), Some(xform_ret_ty)) = (self.return_type, xform_ret_ty) {
+ let xform_ret_ty = self.resolve_vars_if_possible(xform_ret_ty);
+ debug!(
+ "comparing return_ty {:?} with xform ret ty {:?}",
+ return_ty, probe.xform_ret_ty
+ );
+ if self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .define_opaque_types(false)
+ .sup(return_ty, xform_ret_ty)
+ .is_err()
+ {
+ return ProbeResult::BadReturnType;
+ }
+ }
+ }
+
+ result
+ })
+ }
+
+ /// Sometimes we get in a situation where we have multiple probes that are all impls of the
+ /// same trait, but we don't know which impl to use. In this case, since in all cases the
+ /// external interface of the method can be determined from the trait, it's ok not to decide.
+ /// We can basically just collapse all of the probes for various impls into one where-clause
+ /// probe. This will result in a pending obligation so when more type-info is available we can
+ /// make the final decision.
+ ///
+ /// Example (`src/test/ui/method-two-trait-defer-resolution-1.rs`):
+ ///
+ /// ```ignore (illustrative)
+ /// trait Foo { ... }
+ /// impl Foo for Vec<i32> { ... }
+ /// impl Foo for Vec<usize> { ... }
+ /// ```
+ ///
+ /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we
+ /// use, so it's ok to just commit to "using the method from the trait Foo".
+ fn collapse_candidates_to_trait_pick(
+ &self,
+ self_ty: Ty<'tcx>,
+ probes: &[(&Candidate<'tcx>, ProbeResult)],
+ ) -> Option<Pick<'tcx>> {
+ // Do all probes correspond to the same trait?
+ let container = probes[0].0.item.trait_container(self.tcx)?;
+ for (p, _) in &probes[1..] {
+ let p_container = p.item.trait_container(self.tcx)?;
+ if p_container != container {
+ return None;
+ }
+ }
+
+ // FIXME: check the return type here somehow.
+ // If so, just use this trait and call it a day.
+ Some(Pick {
+ item: probes[0].0.item,
+ kind: TraitPick,
+ import_ids: probes[0].0.import_ids.clone(),
+ autoderefs: 0,
+ autoref_or_ptr_adjustment: None,
+ self_ty,
+ })
+ }
+
+ /// Similarly to `probe_for_return_type`, this method attempts to find the best matching
+ /// candidate method where the method name may have been misspelled. Similarly to other
+ /// Levenshtein based suggestions, we provide at most one such suggestion.
+ fn probe_for_lev_candidate(&mut self) -> Result<Option<ty::AssocItem>, MethodError<'tcx>> {
+ debug!("probing for method names similar to {:?}", self.method_name);
+
+ let steps = self.steps.clone();
+ self.probe(|_| {
+ let mut pcx = ProbeContext::new(
+ self.fcx,
+ self.span,
+ self.mode,
+ self.method_name,
+ self.return_type,
+ self.orig_steps_var_values.clone(),
+ steps,
+ IsSuggestion(true),
+ self.scope_expr_id,
+ );
+ pcx.allow_similar_names = true;
+ pcx.assemble_inherent_candidates();
+
+ let method_names = pcx.candidate_method_names(|_| true);
+ pcx.allow_similar_names = false;
+ let applicable_close_candidates: Vec<ty::AssocItem> = method_names
+ .iter()
+ .filter_map(|&method_name| {
+ pcx.reset();
+ pcx.method_name = Some(method_name);
+ pcx.assemble_inherent_candidates();
+ pcx.pick_core().and_then(|pick| pick.ok()).map(|pick| pick.item)
+ })
+ .collect();
+
+ if applicable_close_candidates.is_empty() {
+ Ok(None)
+ } else {
+ let best_name = {
+ let names = applicable_close_candidates
+ .iter()
+ .map(|cand| cand.name)
+ .collect::<Vec<Symbol>>();
+ find_best_match_for_name_with_substrings(
+ &names,
+ self.method_name.unwrap().name,
+ None,
+ )
+ }
+ .unwrap();
+ Ok(applicable_close_candidates.into_iter().find(|method| method.name == best_name))
+ }
+ })
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // MISCELLANY
+ fn has_applicable_self(&self, item: &ty::AssocItem) -> bool {
+ // "Fast track" -- check for usage of sugar when in method call
+ // mode.
+ //
+ // In Path mode (i.e., resolving a value like `T::next`), consider any
+ // associated value (i.e., methods, constants) but not types.
+ match self.mode {
+ Mode::MethodCall => item.fn_has_self_parameter,
+ Mode::Path => match item.kind {
+ ty::AssocKind::Type => false,
+ ty::AssocKind::Fn | ty::AssocKind::Const => true,
+ },
+ }
+ // FIXME -- check for types that deref to `Self`,
+ // like `Rc<Self>` and so on.
+ //
+ // Note also that the current code will break if this type
+ // includes any of the type parameters defined on the method
+ // -- but this could be overcome.
+ }
+
+ fn record_static_candidate(&mut self, source: CandidateSource) {
+ self.static_candidates.push(source);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn xform_self_ty(
+ &self,
+ item: &ty::AssocItem,
+ impl_ty: Ty<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> (Ty<'tcx>, Option<Ty<'tcx>>) {
+ if item.kind == ty::AssocKind::Fn && self.mode == Mode::MethodCall {
+ let sig = self.xform_method_sig(item.def_id, substs);
+ (sig.inputs()[0], Some(sig.output()))
+ } else {
+ (impl_ty, None)
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn xform_method_sig(&self, method: DefId, substs: SubstsRef<'tcx>) -> ty::FnSig<'tcx> {
+ let fn_sig = self.tcx.bound_fn_sig(method);
+ debug!(?fn_sig);
+
+ assert!(!substs.has_escaping_bound_vars());
+
+ // It is possible for type parameters or early-bound lifetimes
+ // to appear in the signature of `self`. The substitutions we
+ // are given do not include type/lifetime parameters for the
+ // method yet. So create fresh variables here for those too,
+ // if there are any.
+ let generics = self.tcx.generics_of(method);
+ assert_eq!(substs.len(), generics.parent_count as usize);
+
+ let xform_fn_sig = if generics.params.is_empty() {
+ fn_sig.subst(self.tcx, substs)
+ } else {
+ let substs = InternalSubsts::for_item(self.tcx, method, |param, _| {
+ let i = param.index as usize;
+ if i < substs.len() {
+ substs[i]
+ } else {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ // In general, during probe we erase regions.
+ self.tcx.lifetimes.re_erased.into()
+ }
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ self.var_for_def(self.span, param)
+ }
+ }
+ }
+ });
+ fn_sig.subst(self.tcx, substs)
+ };
+
+ self.erase_late_bound_regions(xform_fn_sig)
+ }
+
+ /// Gets the type of an impl and generate substitutions with inference vars.
+ fn impl_ty_and_substs(
+ &self,
+ impl_def_id: DefId,
+ ) -> (ty::EarlyBinder<Ty<'tcx>>, SubstsRef<'tcx>) {
+ (self.tcx.bound_type_of(impl_def_id), self.fresh_item_substs(impl_def_id))
+ }
+
+ fn fresh_item_substs(&self, def_id: DefId) -> SubstsRef<'tcx> {
+ InternalSubsts::for_item(self.tcx, def_id, |param, _| match param.kind {
+ GenericParamDefKind::Lifetime => self.tcx.lifetimes.re_erased.into(),
+ GenericParamDefKind::Type { .. } => self
+ .next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::SubstitutionPlaceholder,
+ span: self.tcx.def_span(def_id),
+ })
+ .into(),
+ GenericParamDefKind::Const { .. } => {
+ let span = self.tcx.def_span(def_id);
+ let origin = ConstVariableOrigin {
+ kind: ConstVariableOriginKind::SubstitutionPlaceholder,
+ span,
+ };
+ self.next_const_var(self.tcx.type_of(param.def_id), origin).into()
+ }
+ })
+ }
+
+ /// Replaces late-bound-regions bound by `value` with `'static` using
+ /// `ty::erase_late_bound_regions`.
+ ///
+ /// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of
+ /// method matching. It is reasonable during the probe phase because we don't consider region
+ /// relationships at all. Therefore, we can just replace all the region variables with 'static
+ /// rather than creating fresh region variables. This is nice for two reasons:
+ ///
+ /// 1. Because the numbers of the region variables would otherwise be fairly unique to this
+ /// particular method call, it winds up creating fewer types overall, which helps for memory
+ /// usage. (Admittedly, this is a rather small effect, though measurable.)
+ ///
+ /// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any
+ /// late-bound regions with 'static. Otherwise, if we were going to replace late-bound
+ /// regions with actual region variables as is proper, we'd have to ensure that the same
+ /// region got replaced with the same variable, which requires a bit more coordination
+ /// and/or tracking the substitution and
+ /// so forth.
+ fn erase_late_bound_regions<T>(&self, value: ty::Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.tcx.erase_late_bound_regions(value)
+ }
+
+ /// Finds the method with the appropriate name (or return type, as the case may be). If
+ /// `allow_similar_names` is set, find methods with close-matching names.
+ // The length of the returned iterator is nearly always 0 or 1 and this
+ // method is fairly hot.
+ fn impl_or_trait_item(&self, def_id: DefId) -> SmallVec<[ty::AssocItem; 1]> {
+ if let Some(name) = self.method_name {
+ if self.allow_similar_names {
+ let max_dist = max(name.as_str().len(), 3) / 3;
+ self.tcx
+ .associated_items(def_id)
+ .in_definition_order()
+ .filter(|x| {
+ if x.kind.namespace() != Namespace::ValueNS {
+ return false;
+ }
+ match lev_distance_with_substrings(name.as_str(), x.name.as_str(), max_dist)
+ {
+ Some(d) => d > 0,
+ None => false,
+ }
+ })
+ .copied()
+ .collect()
+ } else {
+ self.fcx
+ .associated_value(def_id, name)
+ .map_or_else(SmallVec::new, |x| SmallVec::from_buf([x]))
+ }
+ } else {
+ self.tcx.associated_items(def_id).in_definition_order().copied().collect()
+ }
+ }
+}
+
+impl<'tcx> Candidate<'tcx> {
+ fn to_unadjusted_pick(&self, self_ty: Ty<'tcx>) -> Pick<'tcx> {
+ Pick {
+ item: self.item,
+ kind: match self.kind {
+ InherentImplCandidate(..) => InherentImplPick,
+ ObjectCandidate => ObjectPick,
+ TraitCandidate(_) => TraitPick,
+ WhereClauseCandidate(ref trait_ref) => {
+ // Only trait derived from where-clauses should
+ // appear here, so they should not contain any
+ // inference variables or other artifacts. This
+ // means they are safe to put into the
+ // `WhereClausePick`.
+ assert!(
+ !trait_ref.skip_binder().substs.needs_infer()
+ && !trait_ref.skip_binder().substs.has_placeholders()
+ );
+
+ WhereClausePick(*trait_ref)
+ }
+ },
+ import_ids: self.import_ids.clone(),
+ autoderefs: 0,
+ autoref_or_ptr_adjustment: None,
+ self_ty,
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
new file mode 100644
index 000000000..6c21ed902
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -0,0 +1,2605 @@
+//! Give useful errors and suggestions to users when an item can't be
+//! found or is otherwise invalid.
+
+use crate::errors;
+use crate::FnCtxt;
+use rustc_ast::ast::Mutability;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ExprKind, Node, QPath};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::traits::util::supertraits;
+use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
+use rustc_middle::ty::print::with_crate_prefix;
+use rustc_middle::ty::{self, DefIdTree, ToPredicate, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{IsSuggestable, ToPolyTraitRef};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Symbol;
+use rustc_span::{lev_distance, source_map, ExpnKind, FileName, MacroKind, Span};
+use rustc_trait_selection::traits::error_reporting::on_unimplemented::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ FulfillmentError, Obligation, ObligationCause, ObligationCauseCode, OnUnimplementedNote,
+};
+
+use std::cmp::Ordering;
+use std::iter;
+
+use super::probe::{AutorefOrPtrAdjustment, IsSuggestion, Mode, ProbeScope};
+use super::{CandidateSource, MethodError, NoMatchData};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn is_fn_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ let tcx = self.tcx;
+ match ty.kind() {
+ // Not all of these (e.g., unsafe fns) implement `FnOnce`,
+ // so we look for these beforehand.
+ ty::Closure(..) | ty::FnDef(..) | ty::FnPtr(_) => true,
+ // If it's not a simple function, look for things which implement `FnOnce`.
+ _ => {
+ let Some(fn_once) = tcx.lang_items().fn_once_trait() else {
+ return false;
+ };
+
+ // This conditional prevents us from asking to call errors and unresolved types.
+ // It might seem that we can use `predicate_must_hold_modulo_regions`,
+ // but since a Dummy binder is used to fill in the FnOnce trait's arguments,
+ // type resolution always gives a "maybe" here.
+ if self.autoderef(span, ty).any(|(ty, _)| {
+ info!("check deref {:?} error", ty);
+ matches!(ty.kind(), ty::Error(_) | ty::Infer(_))
+ }) {
+ return false;
+ }
+
+ self.autoderef(span, ty).any(|(ty, _)| {
+ info!("check deref {:?} impl FnOnce", ty);
+ self.probe(|_| {
+ let fn_once_substs = tcx.mk_substs_trait(
+ ty,
+ &[self
+ .next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span,
+ })
+ .into()],
+ );
+ let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs);
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ let obligation = Obligation::misc(
+ span,
+ self.body_id,
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(tcx),
+ );
+ self.predicate_may_hold(&obligation)
+ })
+ })
+ }
+ }
+ }
+
+ fn is_slice_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ self.autoderef(span, ty).any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
+ }
+
+ pub fn report_method_error(
+ &self,
+ mut span: Span,
+ rcvr_ty: Ty<'tcx>,
+ item_name: Ident,
+ source: SelfSource<'tcx>,
+ error: MethodError<'tcx>,
+ args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
+ ) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
+ // Avoid suggestions when we don't know what's going on.
+ if rcvr_ty.references_error() {
+ return None;
+ }
+
+ let report_candidates = |span: Span,
+ err: &mut Diagnostic,
+ sources: &mut Vec<CandidateSource>,
+ sugg_span: Span| {
+ sources.sort();
+ sources.dedup();
+ // Dynamic limit to avoid hiding just one candidate, which is silly.
+ let limit = if sources.len() == 5 { 5 } else { 4 };
+
+ for (idx, source) in sources.iter().take(limit).enumerate() {
+ match *source {
+ CandidateSource::Impl(impl_did) => {
+ // Provide the best span we can. Use the item, if local to crate, else
+ // the impl, if local to crate (item may be defaulted), else nothing.
+ let Some(item) = self.associated_value(impl_did, item_name).or_else(|| {
+ let impl_trait_ref = self.tcx.impl_trait_ref(impl_did)?;
+ self.associated_value(impl_trait_ref.def_id, item_name)
+ }) else {
+ continue;
+ };
+
+ let note_span = if item.def_id.is_local() {
+ Some(self.tcx.def_span(item.def_id))
+ } else if impl_did.is_local() {
+ Some(self.tcx.def_span(impl_did))
+ } else {
+ None
+ };
+
+ let impl_ty = self.tcx.at(span).type_of(impl_did);
+
+ let insertion = match self.tcx.impl_trait_ref(impl_did) {
+ None => String::new(),
+ Some(trait_ref) => format!(
+ " of the trait `{}`",
+ self.tcx.def_path_str(trait_ref.def_id)
+ ),
+ };
+
+ let (note_str, idx) = if sources.len() > 1 {
+ (
+ format!(
+ "candidate #{} is defined in an impl{} for the type `{}`",
+ idx + 1,
+ insertion,
+ impl_ty,
+ ),
+ Some(idx + 1),
+ )
+ } else {
+ (
+ format!(
+ "the candidate is defined in an impl{} for the type `{}`",
+ insertion, impl_ty,
+ ),
+ None,
+ )
+ };
+ if let Some(note_span) = note_span {
+ // We have a span pointing to the method. Show note with snippet.
+ err.span_note(note_span, &note_str);
+ } else {
+ err.note(&note_str);
+ }
+ if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_did) {
+ let path = self.tcx.def_path_str(trait_ref.def_id);
+
+ let ty = match item.kind {
+ ty::AssocKind::Const | ty::AssocKind::Type => rcvr_ty,
+ ty::AssocKind::Fn => self
+ .tcx
+ .fn_sig(item.def_id)
+ .inputs()
+ .skip_binder()
+ .get(0)
+ .filter(|ty| ty.is_region_ptr() && !rcvr_ty.is_region_ptr())
+ .copied()
+ .unwrap_or(rcvr_ty),
+ };
+ print_disambiguation_help(
+ item_name,
+ args,
+ err,
+ path,
+ ty,
+ item.kind,
+ item.def_id,
+ sugg_span,
+ idx,
+ self.tcx.sess.source_map(),
+ item.fn_has_self_parameter,
+ );
+ }
+ }
+ CandidateSource::Trait(trait_did) => {
+ let Some(item) = self.associated_value(trait_did, item_name) else { continue };
+ let item_span = self.tcx.def_span(item.def_id);
+ let idx = if sources.len() > 1 {
+ let msg = &format!(
+ "candidate #{} is defined in the trait `{}`",
+ idx + 1,
+ self.tcx.def_path_str(trait_did)
+ );
+ err.span_note(item_span, msg);
+ Some(idx + 1)
+ } else {
+ let msg = &format!(
+ "the candidate is defined in the trait `{}`",
+ self.tcx.def_path_str(trait_did)
+ );
+ err.span_note(item_span, msg);
+ None
+ };
+ let path = self.tcx.def_path_str(trait_did);
+ print_disambiguation_help(
+ item_name,
+ args,
+ err,
+ path,
+ rcvr_ty,
+ item.kind,
+ item.def_id,
+ sugg_span,
+ idx,
+ self.tcx.sess.source_map(),
+ item.fn_has_self_parameter,
+ );
+ }
+ }
+ }
+ if sources.len() > limit {
+ err.note(&format!("and {} others", sources.len() - limit));
+ }
+ };
+
+ let sugg_span = if let SelfSource::MethodCall(expr) = source {
+ // Given `foo.bar(baz)`, `expr` is `bar`, but we want to point to the whole thing.
+ self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id)).span
+ } else {
+ span
+ };
+
+ match error {
+ MethodError::NoMatch(NoMatchData {
+ static_candidates: mut static_sources,
+ unsatisfied_predicates,
+ out_of_scope_traits,
+ lev_candidate,
+ mode,
+ }) => {
+ let tcx = self.tcx;
+
+ let actual = self.resolve_vars_if_possible(rcvr_ty);
+ let ty_str = self.ty_to_string(actual);
+ let is_method = mode == Mode::MethodCall;
+ let item_kind = if is_method {
+ "method"
+ } else if actual.is_enum() {
+ "variant or associated item"
+ } else {
+ match (item_name.as_str().chars().next(), actual.is_fresh_ty()) {
+ (Some(name), false) if name.is_lowercase() => "function or associated item",
+ (Some(_), false) => "associated item",
+ (Some(_), true) | (None, false) => "variant or associated item",
+ (None, true) => "variant",
+ }
+ };
+
+ if self.suggest_wrapping_range_with_parens(
+ tcx, actual, source, span, item_name, &ty_str,
+ ) || self.suggest_constraining_numerical_ty(
+ tcx, actual, source, span, item_kind, item_name, &ty_str,
+ ) {
+ return None;
+ }
+
+ span = item_name.span;
+
+ // Don't show generic arguments when the method can't be found in any implementation (#81576).
+ let mut ty_str_reported = ty_str.clone();
+ if let ty::Adt(_, generics) = actual.kind() {
+ if generics.len() > 0 {
+ let mut autoderef = self.autoderef(span, actual);
+ let candidate_found = autoderef.any(|(ty, _)| {
+ if let ty::Adt(adt_deref, _) = ty.kind() {
+ self.tcx
+ .inherent_impls(adt_deref.did())
+ .iter()
+ .filter_map(|def_id| self.associated_value(*def_id, item_name))
+ .count()
+ >= 1
+ } else {
+ false
+ }
+ });
+ let has_deref = autoderef.step_count() > 0;
+ if !candidate_found && !has_deref && unsatisfied_predicates.is_empty() {
+ if let Some((path_string, _)) = ty_str.split_once('<') {
+ ty_str_reported = path_string.to_string();
+ }
+ }
+ }
+ }
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0599,
+ "no {} named `{}` found for {} `{}` in the current scope",
+ item_kind,
+ item_name,
+ actual.prefix_string(self.tcx),
+ ty_str_reported,
+ );
+ if actual.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ if let Mode::MethodCall = mode && let SelfSource::MethodCall(cal) = source {
+ self.suggest_await_before_method(
+ &mut err, item_name, actual, cal, span,
+ );
+ }
+ if let Some(span) = tcx.resolutions(()).confused_type_with_std_module.get(&span) {
+ err.span_suggestion(
+ span.shrink_to_lo(),
+ "you are looking for the module in `std`, not the primitive type",
+ "std::",
+ Applicability::MachineApplicable,
+ );
+ }
+ if let ty::RawPtr(_) = &actual.kind() {
+ err.note(
+ "try using `<*const T>::as_ref()` to get a reference to the \
+ type behind the pointer: https://doc.rust-lang.org/std/\
+ primitive.pointer.html#method.as_ref",
+ );
+ err.note(
+ "using `<*const T>::as_ref()` on a pointer which is unaligned or points \
+ to invalid or uninitialized memory is undefined behavior",
+ );
+ }
+
+ let ty_span = match actual.kind() {
+ ty::Param(param_type) => {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let type_param = generics.type_param(param_type, self.tcx);
+ Some(self.tcx.def_span(type_param.def_id))
+ }
+ ty::Adt(def, _) if def.did().is_local() => Some(tcx.def_span(def.did())),
+ _ => None,
+ };
+
+ if let Some(span) = ty_span {
+ err.span_label(
+ span,
+ format!(
+ "{item_kind} `{item_name}` not found for this {}",
+ actual.prefix_string(self.tcx)
+ ),
+ );
+ }
+
+ if let SelfSource::MethodCall(rcvr_expr) = source {
+ self.suggest_fn_call(&mut err, rcvr_expr, rcvr_ty, |output_ty| {
+ let call_expr = self
+ .tcx
+ .hir()
+ .expect_expr(self.tcx.hir().get_parent_node(rcvr_expr.hir_id));
+ let probe = self.lookup_probe(
+ span,
+ item_name,
+ output_ty,
+ call_expr,
+ ProbeScope::AllTraits,
+ );
+ probe.is_ok()
+ });
+ }
+
+ let mut custom_span_label = false;
+
+ if !static_sources.is_empty() {
+ err.note(
+ "found the following associated functions; to be used as methods, \
+ functions must have a `self` parameter",
+ );
+ err.span_label(span, "this is an associated function, not a method");
+ custom_span_label = true;
+ }
+ if static_sources.len() == 1 {
+ let ty_str =
+ if let Some(CandidateSource::Impl(impl_did)) = static_sources.get(0) {
+ // When the "method" is resolved through dereferencing, we really want the
+ // original type that has the associated function for accurate suggestions.
+ // (#61411)
+ let ty = tcx.at(span).type_of(*impl_did);
+ match (&ty.peel_refs().kind(), &actual.peel_refs().kind()) {
+ (ty::Adt(def, _), ty::Adt(def_actual, _)) if def == def_actual => {
+ // Use `actual` as it will have more `substs` filled in.
+ self.ty_to_value_string(actual.peel_refs())
+ }
+ _ => self.ty_to_value_string(ty.peel_refs()),
+ }
+ } else {
+ self.ty_to_value_string(actual.peel_refs())
+ };
+ if let SelfSource::MethodCall(expr) = source {
+ err.span_suggestion(
+ expr.span.to(span),
+ "use associated function syntax instead",
+ format!("{}::{}", ty_str, item_name),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.help(&format!("try with `{}::{}`", ty_str, item_name,));
+ }
+
+ report_candidates(span, &mut err, &mut static_sources, sugg_span);
+ } else if static_sources.len() > 1 {
+ report_candidates(span, &mut err, &mut static_sources, sugg_span);
+ }
+
+ let mut bound_spans = vec![];
+ let mut restrict_type_params = false;
+ let mut unsatisfied_bounds = false;
+ if item_name.name == sym::count && self.is_slice_ty(actual, span) {
+ let msg = "consider using `len` instead";
+ if let SelfSource::MethodCall(_expr) = source {
+ err.span_suggestion_short(
+ span,
+ msg,
+ "len",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_label(span, msg);
+ }
+ if let Some(iterator_trait) = self.tcx.get_diagnostic_item(sym::Iterator) {
+ let iterator_trait = self.tcx.def_path_str(iterator_trait);
+ err.note(&format!("`count` is defined on `{iterator_trait}`, which `{actual}` does not implement"));
+ }
+ } else if !unsatisfied_predicates.is_empty() {
+ let mut type_params = FxHashMap::default();
+
+ // Pick out the list of unimplemented traits on the receiver.
+ // This is used for custom error messages with the `#[rustc_on_unimplemented]` attribute.
+ let mut unimplemented_traits = FxHashMap::default();
+ let mut unimplemented_traits_only = true;
+ for (predicate, _parent_pred, cause) in &unsatisfied_predicates {
+ if let (ty::PredicateKind::Trait(p), Some(cause)) =
+ (predicate.kind().skip_binder(), cause.as_ref())
+ {
+ if p.trait_ref.self_ty() != rcvr_ty {
+ // This is necessary, not just to keep the errors clean, but also
+ // because our derived obligations can wind up with a trait ref that
+ // requires a different param_env to be correctly compared.
+ continue;
+ }
+ unimplemented_traits.entry(p.trait_ref.def_id).or_insert((
+ predicate.kind().rebind(p.trait_ref),
+ Obligation {
+ cause: cause.clone(),
+ param_env: self.param_env,
+ predicate: *predicate,
+ recursion_depth: 0,
+ },
+ ));
+ }
+ }
+
+ // Make sure that, if any traits other than the found ones were involved,
+ // we don't don't report an unimplemented trait.
+ // We don't want to say that `iter::Cloned` is not an iterator, just
+ // because of some non-Clone item being iterated over.
+ for (predicate, _parent_pred, _cause) in &unsatisfied_predicates {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(p)
+ if unimplemented_traits.contains_key(&p.trait_ref.def_id) => {}
+ _ => {
+ unimplemented_traits_only = false;
+ break;
+ }
+ }
+ }
+
+ let mut collect_type_param_suggestions =
+ |self_ty: Ty<'tcx>, parent_pred: ty::Predicate<'tcx>, obligation: &str| {
+ // We don't care about regions here, so it's fine to skip the binder here.
+ if let (ty::Param(_), ty::PredicateKind::Trait(p)) =
+ (self_ty.kind(), parent_pred.kind().skip_binder())
+ {
+ let node = match p.trait_ref.self_ty().kind() {
+ ty::Param(_) => {
+ // Account for `fn` items like in `issue-35677.rs` to
+ // suggest restricting its type params.
+ let did = self.tcx.hir().body_owner_def_id(hir::BodyId {
+ hir_id: self.body_id,
+ });
+ Some(
+ self.tcx
+ .hir()
+ .get(self.tcx.hir().local_def_id_to_hir_id(did)),
+ )
+ }
+ ty::Adt(def, _) => def.did().as_local().map(|def_id| {
+ self.tcx
+ .hir()
+ .get(self.tcx.hir().local_def_id_to_hir_id(def_id))
+ }),
+ _ => None,
+ };
+ if let Some(hir::Node::Item(hir::Item { kind, .. })) = node {
+ if let Some(g) = kind.generics() {
+ let key = (
+ g.tail_span_for_predicate_suggestion(),
+ g.add_where_or_trailing_comma(),
+ );
+ type_params
+ .entry(key)
+ .or_insert_with(FxHashSet::default)
+ .insert(obligation.to_owned());
+ }
+ }
+ }
+ };
+ let mut bound_span_label = |self_ty: Ty<'_>, obligation: &str, quiet: &str| {
+ let msg = format!(
+ "doesn't satisfy `{}`",
+ if obligation.len() > 50 { quiet } else { obligation }
+ );
+ match &self_ty.kind() {
+ // Point at the type that couldn't satisfy the bound.
+ ty::Adt(def, _) => {
+ bound_spans.push((self.tcx.def_span(def.did()), msg))
+ }
+ // Point at the trait object that couldn't satisfy the bound.
+ ty::Dynamic(preds, _, _) => {
+ for pred in preds.iter() {
+ match pred.skip_binder() {
+ ty::ExistentialPredicate::Trait(tr) => bound_spans
+ .push((self.tcx.def_span(tr.def_id), msg.clone())),
+ ty::ExistentialPredicate::Projection(_)
+ | ty::ExistentialPredicate::AutoTrait(_) => {}
+ }
+ }
+ }
+ // Point at the closure that couldn't satisfy the bound.
+ ty::Closure(def_id, _) => bound_spans.push((
+ tcx.def_span(*def_id),
+ format!("doesn't satisfy `{}`", quiet),
+ )),
+ _ => {}
+ }
+ };
+ let mut format_pred = |pred: ty::Predicate<'tcx>| {
+ let bound_predicate = pred.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Projection(pred) => {
+ let pred = bound_predicate.rebind(pred);
+ // `<Foo as Iterator>::Item = String`.
+ let projection_ty = pred.skip_binder().projection_ty;
+
+ let substs_with_infer_self = tcx.mk_substs(
+ iter::once(tcx.mk_ty_var(ty::TyVid::from_u32(0)).into())
+ .chain(projection_ty.substs.iter().skip(1)),
+ );
+
+ let quiet_projection_ty = ty::ProjectionTy {
+ substs: substs_with_infer_self,
+ item_def_id: projection_ty.item_def_id,
+ };
+
+ let term = pred.skip_binder().term;
+
+ let obligation = format!("{} = {}", projection_ty, term);
+ let quiet = format!("{} = {}", quiet_projection_ty, term);
+
+ bound_span_label(projection_ty.self_ty(), &obligation, &quiet);
+ Some((obligation, projection_ty.self_ty()))
+ }
+ ty::PredicateKind::Trait(poly_trait_ref) => {
+ let p = poly_trait_ref.trait_ref;
+ let self_ty = p.self_ty();
+ let path = p.print_only_trait_path();
+ let obligation = format!("{}: {}", self_ty, path);
+ let quiet = format!("_: {}", path);
+ bound_span_label(self_ty, &obligation, &quiet);
+ Some((obligation, self_ty))
+ }
+ _ => None,
+ }
+ };
+
+ // Find all the requirements that come from a local `impl` block.
+ let mut skip_list: FxHashSet<_> = Default::default();
+ let mut spanned_predicates: FxHashMap<MultiSpan, _> = Default::default();
+ for (data, p, parent_p, impl_def_id, cause) in unsatisfied_predicates
+ .iter()
+ .filter_map(|(p, parent, c)| c.as_ref().map(|c| (p, parent, c)))
+ .filter_map(|(p, parent, c)| match c.code() {
+ ObligationCauseCode::ImplDerivedObligation(ref data) => {
+ Some((&data.derived, p, parent, data.impl_def_id, data))
+ }
+ _ => None,
+ })
+ {
+ let parent_trait_ref = data.parent_trait_pred;
+ let path = parent_trait_ref.print_modifiers_and_trait_path();
+ let tr_self_ty = parent_trait_ref.skip_binder().self_ty();
+ let unsatisfied_msg = "unsatisfied trait bound introduced here";
+ let derive_msg =
+ "unsatisfied trait bound introduced in this `derive` macro";
+ match self.tcx.hir().get_if_local(impl_def_id) {
+ // Unmet obligation comes from a `derive` macro, point at it once to
+ // avoid multiple span labels pointing at the same place.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ident,
+ ..
+ })) if matches!(
+ ident.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) =>
+ {
+ let span = ident.span.ctxt().outer_expn_data().call_site;
+ let mut spans: MultiSpan = span.into();
+ spans.push_span_label(span, derive_msg);
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
+ ..
+ })) if matches!(
+ self_ty.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) || matches!(
+ of_trait.as_ref().map(|t| t
+ .path
+ .span
+ .ctxt()
+ .outer_expn_data()
+ .kind),
+ Some(ExpnKind::Macro(MacroKind::Derive, _))
+ ) =>
+ {
+ let span = self_ty.span.ctxt().outer_expn_data().call_site;
+ let mut spans: MultiSpan = span.into();
+ spans.push_span_label(span, derive_msg);
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ // Unmet obligation coming from a `trait`.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ident,
+ span: item_span,
+ ..
+ })) if !matches!(
+ ident.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) =>
+ {
+ if let Some(pred) = parent_p {
+ // Done to add the "doesn't satisfy" `span_label`.
+ let _ = format_pred(*pred);
+ }
+ skip_list.insert(p);
+ let mut spans = if cause.span != *item_span {
+ let mut spans: MultiSpan = cause.span.into();
+ spans.push_span_label(cause.span, unsatisfied_msg);
+ spans
+ } else {
+ ident.span.into()
+ };
+ spans.push_span_label(ident.span, "in this trait");
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ // Unmet obligation coming from an `impl`.
+ Some(Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Impl(hir::Impl {
+ of_trait, self_ty, generics, ..
+ }),
+ span: item_span,
+ ..
+ })) if !matches!(
+ self_ty.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) && !matches!(
+ of_trait.as_ref().map(|t| t
+ .path
+ .span
+ .ctxt()
+ .outer_expn_data()
+ .kind),
+ Some(ExpnKind::Macro(MacroKind::Derive, _))
+ ) =>
+ {
+ let sized_pred =
+ unsatisfied_predicates.iter().any(|(pred, _, _)| {
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ Some(pred.def_id())
+ == self.tcx.lang_items().sized_trait()
+ && pred.polarity == ty::ImplPolarity::Positive
+ }
+ _ => false,
+ }
+ });
+ for param in generics.params {
+ if param.span == cause.span && sized_pred {
+ let (sp, sugg) = match param.colon_span {
+ Some(sp) => (sp.shrink_to_hi(), " ?Sized +"),
+ None => (param.span.shrink_to_hi(), ": ?Sized"),
+ };
+ err.span_suggestion_verbose(
+ sp,
+ "consider relaxing the type parameter's implicit \
+ `Sized` bound",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ if let Some(pred) = parent_p {
+ // Done to add the "doesn't satisfy" `span_label`.
+ let _ = format_pred(*pred);
+ }
+ skip_list.insert(p);
+ let mut spans = if cause.span != *item_span {
+ let mut spans: MultiSpan = cause.span.into();
+ spans.push_span_label(cause.span, unsatisfied_msg);
+ spans
+ } else {
+ let mut spans = Vec::with_capacity(2);
+ if let Some(trait_ref) = of_trait {
+ spans.push(trait_ref.path.span);
+ }
+ spans.push(self_ty.span);
+ spans.into()
+ };
+ if let Some(trait_ref) = of_trait {
+ spans.push_span_label(trait_ref.path.span, "");
+ }
+ spans.push_span_label(self_ty.span, "");
+
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+ _ => {}
+ }
+ }
+ let mut spanned_predicates: Vec<_> = spanned_predicates.into_iter().collect();
+ spanned_predicates.sort_by_key(|(span, (_, _, _))| span.primary_span());
+ for (span, (_path, _self_ty, preds)) in spanned_predicates {
+ let mut preds: Vec<_> = preds
+ .into_iter()
+ .filter_map(|pred| format_pred(*pred))
+ .map(|(p, _)| format!("`{}`", p))
+ .collect();
+ preds.sort();
+ preds.dedup();
+ let msg = if let [pred] = &preds[..] {
+ format!("trait bound {} was not satisfied", pred)
+ } else {
+ format!(
+ "the following trait bounds were not satisfied:\n{}",
+ preds.join("\n"),
+ )
+ };
+ err.span_note(span, &msg);
+ unsatisfied_bounds = true;
+ }
+
+ // The requirements that didn't have an `impl` span to show.
+ let mut bound_list = unsatisfied_predicates
+ .iter()
+ .filter_map(|(pred, parent_pred, _cause)| {
+ format_pred(*pred).map(|(p, self_ty)| {
+ collect_type_param_suggestions(self_ty, *pred, &p);
+ (
+ match parent_pred {
+ None => format!("`{}`", &p),
+ Some(parent_pred) => match format_pred(*parent_pred) {
+ None => format!("`{}`", &p),
+ Some((parent_p, _)) => {
+ collect_type_param_suggestions(
+ self_ty,
+ *parent_pred,
+ &p,
+ );
+ format!(
+ "`{}`\nwhich is required by `{}`",
+ p, parent_p
+ )
+ }
+ },
+ },
+ *pred,
+ )
+ })
+ })
+ .filter(|(_, pred)| !skip_list.contains(&pred))
+ .map(|(t, _)| t)
+ .enumerate()
+ .collect::<Vec<(usize, String)>>();
+
+ for ((span, add_where_or_comma), obligations) in type_params.into_iter() {
+ restrict_type_params = true;
+ // #74886: Sort here so that the output is always the same.
+ let mut obligations = obligations.into_iter().collect::<Vec<_>>();
+ obligations.sort();
+ err.span_suggestion_verbose(
+ span,
+ &format!(
+ "consider restricting the type parameter{s} to satisfy the \
+ trait bound{s}",
+ s = pluralize!(obligations.len())
+ ),
+ format!("{} {}", add_where_or_comma, obligations.join(", ")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ bound_list.sort_by(|(_, a), (_, b)| a.cmp(b)); // Sort alphabetically.
+ bound_list.dedup_by(|(_, a), (_, b)| a == b); // #35677
+ bound_list.sort_by_key(|(pos, _)| *pos); // Keep the original predicate order.
+
+ if !bound_list.is_empty() || !skip_list.is_empty() {
+ let bound_list = bound_list
+ .into_iter()
+ .map(|(_, path)| path)
+ .collect::<Vec<_>>()
+ .join("\n");
+ let actual_prefix = actual.prefix_string(self.tcx);
+ info!("unimplemented_traits.len() == {}", unimplemented_traits.len());
+ let (primary_message, label) =
+ if unimplemented_traits.len() == 1 && unimplemented_traits_only {
+ unimplemented_traits
+ .into_iter()
+ .next()
+ .map(|(_, (trait_ref, obligation))| {
+ if trait_ref.self_ty().references_error()
+ || actual.references_error()
+ {
+ // Avoid crashing.
+ return (None, None);
+ }
+ let OnUnimplementedNote { message, label, .. } = self
+ .err_ctxt()
+ .on_unimplemented_note(trait_ref, &obligation);
+ (message, label)
+ })
+ .unwrap_or((None, None))
+ } else {
+ (None, None)
+ };
+ let primary_message = primary_message.unwrap_or_else(|| format!(
+ "the {item_kind} `{item_name}` exists for {actual_prefix} `{ty_str}`, but its trait bounds were not satisfied"
+ ));
+ err.set_primary_message(&primary_message);
+ if let Some(label) = label {
+ custom_span_label = true;
+ err.span_label(span, label);
+ }
+ if !bound_list.is_empty() {
+ err.note(&format!(
+ "the following trait bounds were not satisfied:\n{bound_list}"
+ ));
+ }
+ self.suggest_derive(&mut err, &unsatisfied_predicates);
+
+ unsatisfied_bounds = true;
+ }
+ }
+
+ let label_span_not_found = |err: &mut Diagnostic| {
+ if unsatisfied_predicates.is_empty() {
+ err.span_label(span, format!("{item_kind} not found in `{ty_str}`"));
+ let is_string_or_ref_str = match actual.kind() {
+ ty::Ref(_, ty, _) => {
+ ty.is_str()
+ || matches!(
+ ty.kind(),
+ ty::Adt(adt, _) if self.tcx.is_diagnostic_item(sym::String, adt.did())
+ )
+ }
+ ty::Adt(adt, _) => self.tcx.is_diagnostic_item(sym::String, adt.did()),
+ _ => false,
+ };
+ if is_string_or_ref_str && item_name.name == sym::iter {
+ err.span_suggestion_verbose(
+ item_name.span,
+ "because of the in-memory representation of `&str`, to obtain \
+ an `Iterator` over each of its codepoint use method `chars`",
+ "chars",
+ Applicability::MachineApplicable,
+ );
+ }
+ if let ty::Adt(adt, _) = rcvr_ty.kind() {
+ let mut inherent_impls_candidate = self
+ .tcx
+ .inherent_impls(adt.did())
+ .iter()
+ .copied()
+ .filter(|def_id| {
+ if let Some(assoc) = self.associated_value(*def_id, item_name) {
+ // Check for both mode is the same so we avoid suggesting
+ // incorrect associated item.
+ match (mode, assoc.fn_has_self_parameter, source) {
+ (Mode::MethodCall, true, SelfSource::MethodCall(_)) => {
+ // We check that the suggest type is actually
+ // different from the received one
+ // So we avoid suggestion method with Box<Self>
+ // for instance
+ self.tcx.at(span).type_of(*def_id) != actual
+ && self.tcx.at(span).type_of(*def_id) != rcvr_ty
+ }
+ (Mode::Path, false, _) => true,
+ _ => false,
+ }
+ } else {
+ false
+ }
+ })
+ .collect::<Vec<_>>();
+ if !inherent_impls_candidate.is_empty() {
+ inherent_impls_candidate.sort();
+ inherent_impls_candidate.dedup();
+
+ // number of type to shows at most.
+ let limit = if inherent_impls_candidate.len() == 5 { 5 } else { 4 };
+ let type_candidates = inherent_impls_candidate
+ .iter()
+ .take(limit)
+ .map(|impl_item| {
+ format!("- `{}`", self.tcx.at(span).type_of(*impl_item))
+ })
+ .collect::<Vec<_>>()
+ .join("\n");
+ let additional_types = if inherent_impls_candidate.len() > limit {
+ format!(
+ "\nand {} more types",
+ inherent_impls_candidate.len() - limit
+ )
+ } else {
+ "".to_string()
+ };
+ err.note(&format!(
+ "the {item_kind} was found for\n{}{}",
+ type_candidates, additional_types
+ ));
+ }
+ }
+ } else {
+ err.span_label(span, format!("{item_kind} cannot be called on `{ty_str}` due to unsatisfied trait bounds"));
+ }
+ };
+
+ // If the method name is the name of a field with a function or closure type,
+ // give a helping note that it has to be called as `(x.f)(...)`.
+ if let SelfSource::MethodCall(expr) = source {
+ if !self.suggest_field_call(span, rcvr_ty, expr, item_name, &mut err)
+ && lev_candidate.is_none()
+ && !custom_span_label
+ {
+ label_span_not_found(&mut err);
+ }
+ } else if !custom_span_label {
+ label_span_not_found(&mut err);
+ }
+
+ // Don't suggest (for example) `expr.field.method()` if `expr.method()`
+ // doesn't exist due to unsatisfied predicates.
+ if unsatisfied_predicates.is_empty() {
+ self.check_for_field_method(&mut err, source, span, actual, item_name);
+ }
+
+ self.check_for_inner_self(&mut err, source, span, actual, item_name);
+
+ bound_spans.sort();
+ bound_spans.dedup();
+ for (span, msg) in bound_spans.into_iter() {
+ err.span_label(span, &msg);
+ }
+
+ if actual.is_numeric() && actual.is_fresh() || restrict_type_params {
+ } else {
+ self.suggest_traits_to_import(
+ &mut err,
+ span,
+ rcvr_ty,
+ item_name,
+ args.map(|(_, args)| args.len() + 1),
+ source,
+ out_of_scope_traits,
+ &unsatisfied_predicates,
+ &static_sources,
+ unsatisfied_bounds,
+ );
+ }
+
+ // Don't emit a suggestion if we found an actual method
+ // that had unsatisfied trait bounds
+ if unsatisfied_predicates.is_empty() && actual.is_enum() {
+ let adt_def = actual.ty_adt_def().expect("enum is not an ADT");
+ if let Some(suggestion) = lev_distance::find_best_match_for_name(
+ &adt_def.variants().iter().map(|s| s.name).collect::<Vec<_>>(),
+ item_name.name,
+ None,
+ ) {
+ err.span_suggestion(
+ span,
+ "there is a variant with a similar name",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ if item_name.name == sym::as_str && actual.peel_refs().is_str() {
+ let msg = "remove this method call";
+ let mut fallback_span = true;
+ if let SelfSource::MethodCall(expr) = source {
+ let call_expr =
+ self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id));
+ if let Some(span) = call_expr.span.trim_start(expr.span) {
+ err.span_suggestion(span, msg, "", Applicability::MachineApplicable);
+ fallback_span = false;
+ }
+ }
+ if fallback_span {
+ err.span_label(span, msg);
+ }
+ } else if let Some(lev_candidate) = lev_candidate {
+ // Don't emit a suggestion if we found an actual method
+ // that had unsatisfied trait bounds
+ if unsatisfied_predicates.is_empty() {
+ let def_kind = lev_candidate.kind.as_def_kind();
+ // Methods are defined within the context of a struct and their first parameter is always self,
+ // which represents the instance of the struct the method is being called on
+ // Associated functions don’t take self as a parameter and
+ // they are not methods because they don’t have an instance of the struct to work with.
+ if def_kind == DefKind::AssocFn && lev_candidate.fn_has_self_parameter {
+ err.span_suggestion(
+ span,
+ &format!("there is a method with a similar name",),
+ lev_candidate.name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion(
+ span,
+ &format!(
+ "there is {} {} with a similar name",
+ def_kind.article(),
+ def_kind.descr(lev_candidate.def_id),
+ ),
+ lev_candidate.name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ self.check_for_deref_method(&mut err, source, rcvr_ty, item_name);
+
+ return Some(err);
+ }
+
+ MethodError::Ambiguity(mut sources) => {
+ let mut err = struct_span_err!(
+ self.sess(),
+ item_name.span,
+ E0034,
+ "multiple applicable items in scope"
+ );
+ err.span_label(item_name.span, format!("multiple `{}` found", item_name));
+
+ report_candidates(span, &mut err, &mut sources, sugg_span);
+ err.emit();
+ }
+
+ MethodError::PrivateMatch(kind, def_id, out_of_scope_traits) => {
+ let kind = kind.descr(def_id);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ item_name.span,
+ E0624,
+ "{} `{}` is private",
+ kind,
+ item_name
+ );
+ err.span_label(item_name.span, &format!("private {}", kind));
+ let sp = self
+ .tcx
+ .hir()
+ .span_if_local(def_id)
+ .unwrap_or_else(|| self.tcx.def_span(def_id));
+ err.span_label(sp, &format!("private {} defined here", kind));
+ self.suggest_valid_traits(&mut err, out_of_scope_traits);
+ err.emit();
+ }
+
+ MethodError::IllegalSizedBound(candidates, needs_mut, bound_span) => {
+ let msg = format!("the `{}` method cannot be invoked on a trait object", item_name);
+ let mut err = self.sess().struct_span_err(span, &msg);
+ err.span_label(bound_span, "this has a `Sized` requirement");
+ if !candidates.is_empty() {
+ let help = format!(
+ "{an}other candidate{s} {were} found in the following trait{s}, perhaps \
+ add a `use` for {one_of_them}:",
+ an = if candidates.len() == 1 { "an" } else { "" },
+ s = pluralize!(candidates.len()),
+ were = pluralize!("was", candidates.len()),
+ one_of_them = if candidates.len() == 1 { "it" } else { "one_of_them" },
+ );
+ self.suggest_use_candidates(&mut err, help, candidates);
+ }
+ if let ty::Ref(region, t_type, mutability) = rcvr_ty.kind() {
+ if needs_mut {
+ let trait_type = self.tcx.mk_ref(
+ *region,
+ ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() },
+ );
+ err.note(&format!("you need `{}` instead of `{}`", trait_type, rcvr_ty));
+ }
+ }
+ err.emit();
+ }
+
+ MethodError::BadReturnType => bug!("no return type expectations but got BadReturnType"),
+ }
+ None
+ }
+
+ fn suggest_field_call(
+ &self,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ item_name: Ident,
+ err: &mut Diagnostic,
+ ) -> bool {
+ let tcx = self.tcx;
+ let field_receiver = self.autoderef(span, rcvr_ty).find_map(|(ty, _)| match ty.kind() {
+ ty::Adt(def, substs) if !def.is_enum() => {
+ let variant = &def.non_enum_variant();
+ tcx.find_field_index(item_name, variant).map(|index| {
+ let field = &variant.fields[index];
+ let field_ty = field.ty(tcx, substs);
+ (field, field_ty)
+ })
+ }
+ _ => None,
+ });
+ if let Some((field, field_ty)) = field_receiver {
+ let scope = tcx.parent_module(self.body_id);
+ let is_accessible = field.vis.is_accessible_from(scope, tcx);
+
+ if is_accessible {
+ if self.is_fn_ty(field_ty, span) {
+ let expr_span = expr.span.to(item_name.span);
+ err.multipart_suggestion(
+ &format!(
+ "to call the function stored in `{}`, \
+ surround the field access with parentheses",
+ item_name,
+ ),
+ vec![
+ (expr_span.shrink_to_lo(), '('.to_string()),
+ (expr_span.shrink_to_hi(), ')'.to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id));
+
+ if let Some(span) = call_expr.span.trim_start(item_name.span) {
+ err.span_suggestion(
+ span,
+ "remove the arguments",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ let field_kind = if is_accessible { "field" } else { "private field" };
+ err.span_label(item_name.span, format!("{}, not a method", field_kind));
+ return true;
+ }
+ false
+ }
+
+ /// Suggest possible range with adding parentheses, for example:
+ /// when encountering `0..1.map(|i| i + 1)` suggest `(0..1).map(|i| i + 1)`.
+ fn suggest_wrapping_range_with_parens(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ actual: Ty<'tcx>,
+ source: SelfSource<'tcx>,
+ span: Span,
+ item_name: Ident,
+ ty_str: &str,
+ ) -> bool {
+ if let SelfSource::MethodCall(expr) = source {
+ for (_, parent) in tcx.hir().parent_iter(expr.hir_id).take(5) {
+ if let Node::Expr(parent_expr) = parent {
+ let lang_item = match parent_expr.kind {
+ ExprKind::Struct(ref qpath, _, _) => match **qpath {
+ QPath::LangItem(LangItem::Range, ..) => Some(LangItem::Range),
+ QPath::LangItem(LangItem::RangeTo, ..) => Some(LangItem::RangeTo),
+ QPath::LangItem(LangItem::RangeToInclusive, ..) => {
+ Some(LangItem::RangeToInclusive)
+ }
+ _ => None,
+ },
+ ExprKind::Call(ref func, _) => match func.kind {
+ // `..=` desugars into `::std::ops::RangeInclusive::new(...)`.
+ ExprKind::Path(QPath::LangItem(LangItem::RangeInclusiveNew, ..)) => {
+ Some(LangItem::RangeInclusiveStruct)
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+
+ if lang_item.is_none() {
+ continue;
+ }
+
+ let span_included = match parent_expr.kind {
+ hir::ExprKind::Struct(_, eps, _) => {
+ eps.len() > 0 && eps.last().map_or(false, |ep| ep.span.contains(span))
+ }
+ // `..=` desugars into `::std::ops::RangeInclusive::new(...)`.
+ hir::ExprKind::Call(ref func, ..) => func.span.contains(span),
+ _ => false,
+ };
+
+ if !span_included {
+ continue;
+ }
+
+ let range_def_id = self.tcx.require_lang_item(lang_item.unwrap(), None);
+ let range_ty =
+ self.tcx.bound_type_of(range_def_id).subst(self.tcx, &[actual.into()]);
+
+ let pick = self.probe_for_name(
+ span,
+ Mode::MethodCall,
+ item_name,
+ IsSuggestion(true),
+ range_ty,
+ expr.hir_id,
+ ProbeScope::AllTraits,
+ );
+ if pick.is_ok() {
+ let range_span = parent_expr.span.with_hi(expr.span.hi());
+ tcx.sess.emit_err(errors::MissingParentheseInRange {
+ span,
+ ty_str: ty_str.to_string(),
+ method_name: item_name.as_str().to_string(),
+ add_missing_parentheses: Some(errors::AddMissingParenthesesInRange {
+ func_name: item_name.name.as_str().to_string(),
+ left: range_span.shrink_to_lo(),
+ right: range_span.shrink_to_hi(),
+ }),
+ });
+ return true;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ fn suggest_constraining_numerical_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ actual: Ty<'tcx>,
+ source: SelfSource<'_>,
+ span: Span,
+ item_kind: &str,
+ item_name: Ident,
+ ty_str: &str,
+ ) -> bool {
+ let found_candidate = all_traits(self.tcx)
+ .into_iter()
+ .any(|info| self.associated_value(info.def_id, item_name).is_some());
+ let found_assoc = |ty: Ty<'tcx>| {
+ simplify_type(tcx, ty, TreatParams::AsInfer)
+ .and_then(|simp| {
+ tcx.incoherent_impls(simp)
+ .iter()
+ .find_map(|&id| self.associated_value(id, item_name))
+ })
+ .is_some()
+ };
+ let found_candidate = found_candidate
+ || found_assoc(tcx.types.i8)
+ || found_assoc(tcx.types.i16)
+ || found_assoc(tcx.types.i32)
+ || found_assoc(tcx.types.i64)
+ || found_assoc(tcx.types.i128)
+ || found_assoc(tcx.types.u8)
+ || found_assoc(tcx.types.u16)
+ || found_assoc(tcx.types.u32)
+ || found_assoc(tcx.types.u64)
+ || found_assoc(tcx.types.u128)
+ || found_assoc(tcx.types.f32)
+ || found_assoc(tcx.types.f32);
+ if found_candidate
+ && actual.is_numeric()
+ && !actual.has_concrete_skeleton()
+ && let SelfSource::MethodCall(expr) = source
+ {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0689,
+ "can't call {} `{}` on ambiguous numeric type `{}`",
+ item_kind,
+ item_name,
+ ty_str
+ );
+ let concrete_type = if actual.is_integral() { "i32" } else { "f32" };
+ match expr.kind {
+ ExprKind::Lit(ref lit) => {
+ // numeric literal
+ let snippet = tcx
+ .sess
+ .source_map()
+ .span_to_snippet(lit.span)
+ .unwrap_or_else(|_| "<numeric literal>".to_owned());
+
+ // If this is a floating point literal that ends with '.',
+ // get rid of it to stop this from becoming a member access.
+ let snippet = snippet.strip_suffix('.').unwrap_or(&snippet);
+ err.span_suggestion(
+ lit.span,
+ &format!(
+ "you must specify a concrete type for this numeric value, \
+ like `{}`",
+ concrete_type
+ ),
+ format!("{snippet}_{concrete_type}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ExprKind::Path(QPath::Resolved(_, path)) => {
+ // local binding
+ if let hir::def::Res::Local(hir_id) = path.res {
+ let span = tcx.hir().span(hir_id);
+ let filename = tcx.sess.source_map().span_to_filename(span);
+
+ let parent_node =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(hir_id));
+ let msg = format!(
+ "you must specify a type for this binding, like `{}`",
+ concrete_type,
+ );
+
+ match (filename, parent_node) {
+ (
+ FileName::Real(_),
+ Node::Local(hir::Local {
+ source: hir::LocalSource::Normal,
+ ty,
+ ..
+ }),
+ ) => {
+ let type_span = ty.map(|ty| ty.span.with_lo(span.hi())).unwrap_or(span.shrink_to_hi());
+ err.span_suggestion(
+ // account for `let x: _ = 42;`
+ // ^^^
+ type_span,
+ &msg,
+ format!(": {concrete_type}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {
+ err.span_label(span, msg);
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ err.emit();
+ return true;
+ }
+ false
+ }
+
+ fn check_for_field_method(
+ &self,
+ err: &mut Diagnostic,
+ source: SelfSource<'tcx>,
+ span: Span,
+ actual: Ty<'tcx>,
+ item_name: Ident,
+ ) {
+ if let SelfSource::MethodCall(expr) = source
+ && let mod_id = self.tcx.parent_module(expr.hir_id).to_def_id()
+ && let Some((fields, substs)) =
+ self.get_field_candidates_considering_privacy(span, actual, mod_id)
+ {
+ let call_expr = self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id));
+
+ let lang_items = self.tcx.lang_items();
+ let never_mention_traits = [
+ lang_items.clone_trait(),
+ lang_items.deref_trait(),
+ lang_items.deref_mut_trait(),
+ self.tcx.get_diagnostic_item(sym::AsRef),
+ self.tcx.get_diagnostic_item(sym::AsMut),
+ self.tcx.get_diagnostic_item(sym::Borrow),
+ self.tcx.get_diagnostic_item(sym::BorrowMut),
+ ];
+ let candidate_fields: Vec<_> = fields
+ .filter_map(|candidate_field| {
+ self.check_for_nested_field_satisfying(
+ span,
+ &|_, field_ty| {
+ self.lookup_probe(
+ span,
+ item_name,
+ field_ty,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ )
+ .map_or(false, |pick| {
+ !never_mention_traits
+ .iter()
+ .flatten()
+ .any(|def_id| self.tcx.parent(pick.item.def_id) == *def_id)
+ })
+ },
+ candidate_field,
+ substs,
+ vec![],
+ mod_id,
+ )
+ })
+ .map(|field_path| {
+ field_path
+ .iter()
+ .map(|id| id.name.to_ident_string())
+ .collect::<Vec<String>>()
+ .join(".")
+ })
+ .collect();
+
+ let len = candidate_fields.len();
+ if len > 0 {
+ err.span_suggestions(
+ item_name.span.shrink_to_lo(),
+ format!(
+ "{} of the expressions' fields {} a method of the same name",
+ if len > 1 { "some" } else { "one" },
+ if len > 1 { "have" } else { "has" },
+ ),
+ candidate_fields.iter().map(|path| format!("{path}.")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ fn check_for_inner_self(
+ &self,
+ err: &mut Diagnostic,
+ source: SelfSource<'tcx>,
+ span: Span,
+ actual: Ty<'tcx>,
+ item_name: Ident,
+ ) {
+ let tcx = self.tcx;
+ let SelfSource::MethodCall(expr) = source else { return; };
+ let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id));
+
+ let ty::Adt(kind, substs) = actual.kind() else { return; };
+ match kind.adt_kind() {
+ ty::AdtKind::Enum => {
+ let matching_variants: Vec<_> = kind
+ .variants()
+ .iter()
+ .flat_map(|variant| {
+ let [field] = &variant.fields[..] else { return None; };
+ let field_ty = field.ty(tcx, substs);
+
+ // Skip `_`, since that'll just lead to ambiguity.
+ if self.resolve_vars_if_possible(field_ty).is_ty_var() {
+ return None;
+ }
+
+ self.lookup_probe(
+ span,
+ item_name,
+ field_ty,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ )
+ .ok()
+ .map(|pick| (variant, field, pick))
+ })
+ .collect();
+
+ let ret_ty_matches = |diagnostic_item| {
+ if let Some(ret_ty) = self
+ .ret_coercion
+ .as_ref()
+ .map(|c| self.resolve_vars_if_possible(c.borrow().expected_ty()))
+ && let ty::Adt(kind, _) = ret_ty.kind()
+ && tcx.get_diagnostic_item(diagnostic_item) == Some(kind.did())
+ {
+ true
+ } else {
+ false
+ }
+ };
+
+ match &matching_variants[..] {
+ [(_, field, pick)] => {
+ let self_ty = field.ty(tcx, substs);
+ err.span_note(
+ tcx.def_span(pick.item.def_id),
+ &format!("the method `{item_name}` exists on the type `{self_ty}`"),
+ );
+ let (article, kind, variant, question) =
+ if tcx.is_diagnostic_item(sym::Result, kind.did()) {
+ ("a", "Result", "Err", ret_ty_matches(sym::Result))
+ } else if tcx.is_diagnostic_item(sym::Option, kind.did()) {
+ ("an", "Option", "None", ret_ty_matches(sym::Option))
+ } else {
+ return;
+ };
+ if question {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use the `?` operator to extract the `{self_ty}` value, propagating \
+ {article} `{kind}::{variant}` value to the caller"
+ ),
+ "?",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "consider using `{kind}::expect` to unwrap the `{self_ty}` value, \
+ panicking if the value is {article} `{kind}::{variant}`"
+ ),
+ ".expect(\"REASON\")",
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ // FIXME(compiler-errors): Support suggestions for other matching enum variants
+ _ => {}
+ }
+ }
+ // Target wrapper types - types that wrap or pretend to wrap another type,
+ // perhaps this inner type is meant to be called?
+ ty::AdtKind::Struct | ty::AdtKind::Union => {
+ let [first] = ***substs else { return; };
+ let ty::GenericArgKind::Type(ty) = first.unpack() else { return; };
+ let Ok(pick) = self.lookup_probe(
+ span,
+ item_name,
+ ty,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ ) else { return; };
+
+ let name = self.ty_to_value_string(actual);
+ let inner_id = kind.did();
+ let mutable = if let Some(AutorefOrPtrAdjustment::Autoref { mutbl, .. }) =
+ pick.autoref_or_ptr_adjustment
+ {
+ Some(mutbl)
+ } else {
+ None
+ };
+
+ if tcx.is_diagnostic_item(sym::LocalKey, inner_id) {
+ err.help("use `with` or `try_with` to access thread local storage");
+ } else if Some(kind.did()) == tcx.lang_items().maybe_uninit() {
+ err.help(format!(
+ "if this `{name}` has been initialized, \
+ use one of the `assume_init` methods to access the inner value"
+ ));
+ } else if tcx.is_diagnostic_item(sym::RefCell, inner_id) {
+ let (suggestion, borrow_kind, panic_if) = match mutable {
+ Some(Mutability::Not) => (".borrow()", "borrow", "a mutable borrow exists"),
+ Some(Mutability::Mut) => {
+ (".borrow_mut()", "mutably borrow", "any borrows exist")
+ }
+ None => return,
+ };
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{suggestion}` to {borrow_kind} the `{ty}`, \
+ panicking if {panic_if}"
+ ),
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ } else if tcx.is_diagnostic_item(sym::Mutex, inner_id) {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `.lock().unwrap()` to borrow the `{ty}`, \
+ blocking the current thread until it can be acquired"
+ ),
+ ".lock().unwrap()",
+ Applicability::MaybeIncorrect,
+ );
+ } else if tcx.is_diagnostic_item(sym::RwLock, inner_id) {
+ let (suggestion, borrow_kind) = match mutable {
+ Some(Mutability::Not) => (".read().unwrap()", "borrow"),
+ Some(Mutability::Mut) => (".write().unwrap()", "mutably borrow"),
+ None => return,
+ };
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{suggestion}` to {borrow_kind} the `{ty}`, \
+ blocking the current thread until it can be acquired"
+ ),
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ return;
+ };
+
+ err.span_note(
+ tcx.def_span(pick.item.def_id),
+ &format!("the method `{item_name}` exists on the type `{ty}`"),
+ );
+ }
+ }
+ }
+
+ pub(crate) fn note_unmet_impls_on_type(
+ &self,
+ err: &mut Diagnostic,
+ errors: Vec<FulfillmentError<'tcx>>,
+ ) {
+ let all_local_types_needing_impls =
+ errors.iter().all(|e| match e.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => match pred.self_ty().kind() {
+ ty::Adt(def, _) => def.did().is_local(),
+ _ => false,
+ },
+ _ => false,
+ });
+ let mut preds: Vec<_> = errors
+ .iter()
+ .filter_map(|e| match e.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => Some(pred),
+ _ => None,
+ })
+ .collect();
+ preds.sort_by_key(|pred| (pred.def_id(), pred.self_ty()));
+ let def_ids = preds
+ .iter()
+ .filter_map(|pred| match pred.self_ty().kind() {
+ ty::Adt(def, _) => Some(def.did()),
+ _ => None,
+ })
+ .collect::<FxHashSet<_>>();
+ let mut spans: MultiSpan = def_ids
+ .iter()
+ .filter_map(|def_id| {
+ let span = self.tcx.def_span(*def_id);
+ if span.is_dummy() { None } else { Some(span) }
+ })
+ .collect::<Vec<_>>()
+ .into();
+
+ for pred in &preds {
+ match pred.self_ty().kind() {
+ ty::Adt(def, _) if def.did().is_local() => {
+ spans.push_span_label(
+ self.tcx.def_span(def.did()),
+ format!("must implement `{}`", pred.trait_ref.print_only_trait_path()),
+ );
+ }
+ _ => {}
+ }
+ }
+
+ if all_local_types_needing_impls && spans.primary_span().is_some() {
+ let msg = if preds.len() == 1 {
+ format!(
+ "an implementation of `{}` might be missing for `{}`",
+ preds[0].trait_ref.print_only_trait_path(),
+ preds[0].self_ty()
+ )
+ } else {
+ format!(
+ "the following type{} would have to `impl` {} required trait{} for this \
+ operation to be valid",
+ pluralize!(def_ids.len()),
+ if def_ids.len() == 1 { "its" } else { "their" },
+ pluralize!(preds.len()),
+ )
+ };
+ err.span_note(spans, &msg);
+ }
+
+ let preds: Vec<_> = errors
+ .iter()
+ .map(|e| (e.obligation.predicate, None, Some(e.obligation.cause.clone())))
+ .collect();
+ self.suggest_derive(err, &preds);
+ }
+
+ fn suggest_derive(
+ &self,
+ err: &mut Diagnostic,
+ unsatisfied_predicates: &[(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )],
+ ) {
+ let mut derives = Vec::<(String, Span, Symbol)>::new();
+ let mut traits = Vec::<Span>::new();
+ for (pred, _, _) in unsatisfied_predicates {
+ let ty::PredicateKind::Trait(trait_pred) = pred.kind().skip_binder() else { continue };
+ let adt = match trait_pred.self_ty().ty_adt_def() {
+ Some(adt) if adt.did().is_local() => adt,
+ _ => continue,
+ };
+ if let Some(diagnostic_name) = self.tcx.get_diagnostic_name(trait_pred.def_id()) {
+ let can_derive = match diagnostic_name {
+ sym::Default => !adt.is_enum(),
+ sym::Eq
+ | sym::PartialEq
+ | sym::Ord
+ | sym::PartialOrd
+ | sym::Clone
+ | sym::Copy
+ | sym::Hash
+ | sym::Debug => true,
+ _ => false,
+ };
+ if can_derive {
+ let self_name = trait_pred.self_ty().to_string();
+ let self_span = self.tcx.def_span(adt.did());
+ if let Some(poly_trait_ref) = pred.to_opt_poly_trait_pred() {
+ for super_trait in supertraits(self.tcx, poly_trait_ref.to_poly_trait_ref())
+ {
+ if let Some(parent_diagnostic_name) =
+ self.tcx.get_diagnostic_name(super_trait.def_id())
+ {
+ derives.push((
+ self_name.clone(),
+ self_span,
+ parent_diagnostic_name,
+ ));
+ }
+ }
+ }
+ derives.push((self_name, self_span, diagnostic_name));
+ } else {
+ traits.push(self.tcx.def_span(trait_pred.def_id()));
+ }
+ } else {
+ traits.push(self.tcx.def_span(trait_pred.def_id()));
+ }
+ }
+ traits.sort();
+ traits.dedup();
+
+ derives.sort();
+ derives.dedup();
+
+ let mut derives_grouped = Vec::<(String, Span, String)>::new();
+ for (self_name, self_span, trait_name) in derives.into_iter() {
+ if let Some((last_self_name, _, ref mut last_trait_names)) = derives_grouped.last_mut()
+ {
+ if last_self_name == &self_name {
+ last_trait_names.push_str(format!(", {}", trait_name).as_str());
+ continue;
+ }
+ }
+ derives_grouped.push((self_name, self_span, trait_name.to_string()));
+ }
+
+ let len = traits.len();
+ if len > 0 {
+ let span: MultiSpan = traits.into();
+ err.span_note(
+ span,
+ &format!("the following trait{} must be implemented", pluralize!(len),),
+ );
+ }
+
+ for (self_name, self_span, traits) in &derives_grouped {
+ err.span_suggestion_verbose(
+ self_span.shrink_to_lo(),
+ &format!("consider annotating `{}` with `#[derive({})]`", self_name, traits),
+ format!("#[derive({})]\n", traits),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn check_for_deref_method(
+ &self,
+ err: &mut Diagnostic,
+ self_source: SelfSource<'tcx>,
+ rcvr_ty: Ty<'tcx>,
+ item_name: Ident,
+ ) {
+ let SelfSource::QPath(ty) = self_source else { return; };
+ for (deref_ty, _) in self.autoderef(rustc_span::DUMMY_SP, rcvr_ty).skip(1) {
+ if let Ok(pick) = self.probe_for_name(
+ ty.span,
+ Mode::Path,
+ item_name,
+ IsSuggestion(true),
+ deref_ty,
+ ty.hir_id,
+ ProbeScope::TraitsInScope,
+ ) {
+ if deref_ty.is_suggestable(self.tcx, true)
+ // If this method receives `&self`, then the provided
+ // argument _should_ coerce, so it's valid to suggest
+ // just changing the path.
+ && pick.item.fn_has_self_parameter
+ && let Some(self_ty) =
+ self.tcx.fn_sig(pick.item.def_id).inputs().skip_binder().get(0)
+ && self_ty.is_ref()
+ {
+ let suggested_path = match deref_ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(_, _)
+ | ty::Str
+ | ty::Projection(_)
+ | ty::Param(_) => format!("{deref_ty}"),
+ _ => format!("<{deref_ty}>"),
+ };
+ err.span_suggestion_verbose(
+ ty.span,
+ format!("the function `{item_name}` is implemented on `{deref_ty}`"),
+ suggested_path,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_note(
+ ty.span,
+ format!("the function `{item_name}` is implemented on `{deref_ty}`"),
+ );
+ }
+ return;
+ }
+ }
+ }
+
+ /// Print out the type for use in value namespace.
+ fn ty_to_value_string(&self, ty: Ty<'tcx>) -> String {
+ match ty.kind() {
+ ty::Adt(def, substs) => format!("{}", ty::Instance::new(def.did(), substs)),
+ _ => self.ty_to_string(ty),
+ }
+ }
+
+ fn suggest_await_before_method(
+ &self,
+ err: &mut Diagnostic,
+ item_name: Ident,
+ ty: Ty<'tcx>,
+ call: &hir::Expr<'_>,
+ span: Span,
+ ) {
+ let output_ty = match self.get_impl_future_output_ty(ty) {
+ Some(output_ty) => self.resolve_vars_if_possible(output_ty).skip_binder(),
+ _ => return,
+ };
+ let method_exists = self.method_exists(item_name, output_ty, call.hir_id, true);
+ debug!("suggest_await_before_method: is_method_exist={}", method_exists);
+ if method_exists {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider `await`ing on the `Future` and calling the method on its `Output`",
+ "await.",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn suggest_use_candidates(&self, err: &mut Diagnostic, msg: String, candidates: Vec<DefId>) {
+ let parent_map = self.tcx.visible_parent_map(());
+
+ // Separate out candidates that must be imported with a glob, because they are named `_`
+ // and cannot be referred with their identifier.
+ let (candidates, globs): (Vec<_>, Vec<_>) = candidates.into_iter().partition(|trait_did| {
+ if let Some(parent_did) = parent_map.get(trait_did) {
+ // If the item is re-exported as `_`, we should suggest a glob-import instead.
+ if *parent_did != self.tcx.parent(*trait_did)
+ && self
+ .tcx
+ .module_children(*parent_did)
+ .iter()
+ .filter(|child| child.res.opt_def_id() == Some(*trait_did))
+ .all(|child| child.ident.name == kw::Underscore)
+ {
+ return false;
+ }
+ }
+
+ true
+ });
+
+ let module_did = self.tcx.parent_module(self.body_id);
+ let (module, _, _) = self.tcx.hir().get_module(module_did);
+ let span = module.spans.inject_use_span;
+
+ let path_strings = candidates.iter().map(|trait_did| {
+ format!("use {};\n", with_crate_prefix!(self.tcx.def_path_str(*trait_did)),)
+ });
+
+ let glob_path_strings = globs.iter().map(|trait_did| {
+ let parent_did = parent_map.get(trait_did).unwrap();
+ format!(
+ "use {}::*; // trait {}\n",
+ with_crate_prefix!(self.tcx.def_path_str(*parent_did)),
+ self.tcx.item_name(*trait_did),
+ )
+ });
+
+ err.span_suggestions(
+ span,
+ &msg,
+ path_strings.chain(glob_path_strings),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ fn suggest_valid_traits(
+ &self,
+ err: &mut Diagnostic,
+ valid_out_of_scope_traits: Vec<DefId>,
+ ) -> bool {
+ if !valid_out_of_scope_traits.is_empty() {
+ let mut candidates = valid_out_of_scope_traits;
+ candidates.sort();
+ candidates.dedup();
+
+ // `TryFrom` and `FromIterator` have no methods
+ let edition_fix = candidates
+ .iter()
+ .find(|did| self.tcx.is_diagnostic_item(sym::TryInto, **did))
+ .copied();
+
+ err.help("items from traits can only be used if the trait is in scope");
+ let msg = format!(
+ "the following {traits_are} implemented but not in scope; \
+ perhaps add a `use` for {one_of_them}:",
+ traits_are = if candidates.len() == 1 { "trait is" } else { "traits are" },
+ one_of_them = if candidates.len() == 1 { "it" } else { "one of them" },
+ );
+
+ self.suggest_use_candidates(err, msg, candidates);
+ if let Some(did) = edition_fix {
+ err.note(&format!(
+ "'{}' is included in the prelude starting in Edition 2021",
+ with_crate_prefix!(self.tcx.def_path_str(did))
+ ));
+ }
+
+ true
+ } else {
+ false
+ }
+ }
+
+ fn suggest_traits_to_import(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ item_name: Ident,
+ inputs_len: Option<usize>,
+ source: SelfSource<'tcx>,
+ valid_out_of_scope_traits: Vec<DefId>,
+ unsatisfied_predicates: &[(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )],
+ static_candidates: &[CandidateSource],
+ unsatisfied_bounds: bool,
+ ) {
+ let mut alt_rcvr_sugg = false;
+ if let (SelfSource::MethodCall(rcvr), false) = (source, unsatisfied_bounds) {
+ debug!(?span, ?item_name, ?rcvr_ty, ?rcvr);
+ let skippable = [
+ self.tcx.lang_items().clone_trait(),
+ self.tcx.lang_items().deref_trait(),
+ self.tcx.lang_items().deref_mut_trait(),
+ self.tcx.lang_items().drop_trait(),
+ self.tcx.get_diagnostic_item(sym::AsRef),
+ ];
+ // Try alternative arbitrary self types that could fulfill this call.
+ // FIXME: probe for all types that *could* be arbitrary self-types, not
+ // just this list.
+ for (rcvr_ty, post) in &[
+ (rcvr_ty, ""),
+ (self.tcx.mk_mut_ref(self.tcx.lifetimes.re_erased, rcvr_ty), "&mut "),
+ (self.tcx.mk_imm_ref(self.tcx.lifetimes.re_erased, rcvr_ty), "&"),
+ ] {
+ match self.lookup_probe(span, item_name, *rcvr_ty, rcvr, ProbeScope::AllTraits) {
+ Ok(pick) => {
+ // If the method is defined for the receiver we have, it likely wasn't `use`d.
+ // We point at the method, but we just skip the rest of the check for arbitrary
+ // self types and rely on the suggestion to `use` the trait from
+ // `suggest_valid_traits`.
+ let did = Some(pick.item.container_id(self.tcx));
+ let skip = skippable.contains(&did);
+ if pick.autoderefs == 0 && !skip {
+ err.span_label(
+ pick.item.ident(self.tcx).span,
+ &format!("the method is available for `{}` here", rcvr_ty),
+ );
+ }
+ break;
+ }
+ Err(MethodError::Ambiguity(_)) => {
+ // If the method is defined (but ambiguous) for the receiver we have, it is also
+ // likely we haven't `use`d it. It may be possible that if we `Box`/`Pin`/etc.
+ // the receiver, then it might disambiguate this method, but I think these
+ // suggestions are generally misleading (see #94218).
+ break;
+ }
+ _ => {}
+ }
+
+ for (rcvr_ty, pre) in &[
+ (self.tcx.mk_lang_item(*rcvr_ty, LangItem::OwnedBox), "Box::new"),
+ (self.tcx.mk_lang_item(*rcvr_ty, LangItem::Pin), "Pin::new"),
+ (self.tcx.mk_diagnostic_item(*rcvr_ty, sym::Arc), "Arc::new"),
+ (self.tcx.mk_diagnostic_item(*rcvr_ty, sym::Rc), "Rc::new"),
+ ] {
+ if let Some(new_rcvr_t) = *rcvr_ty
+ && let Ok(pick) = self.lookup_probe(
+ span,
+ item_name,
+ new_rcvr_t,
+ rcvr,
+ ProbeScope::AllTraits,
+ )
+ {
+ debug!("try_alt_rcvr: pick candidate {:?}", pick);
+ let did = Some(pick.item.container_id(self.tcx));
+ // We don't want to suggest a container type when the missing
+ // method is `.clone()` or `.deref()` otherwise we'd suggest
+ // `Arc::new(foo).clone()`, which is far from what the user wants.
+ // Explicitly ignore the `Pin::as_ref()` method as `Pin` does not
+ // implement the `AsRef` trait.
+ let skip = skippable.contains(&did)
+ || (("Pin::new" == *pre) && (sym::as_ref == item_name.name))
+ || inputs_len.map_or(false, |inputs_len| pick.item.kind == ty::AssocKind::Fn && self.tcx.fn_sig(pick.item.def_id).skip_binder().inputs().len() != inputs_len);
+ // Make sure the method is defined for the *actual* receiver: we don't
+ // want to treat `Box<Self>` as a receiver if it only works because of
+ // an autoderef to `&self`
+ if pick.autoderefs == 0 && !skip {
+ err.span_label(
+ pick.item.ident(self.tcx).span,
+ &format!("the method is available for `{}` here", new_rcvr_t),
+ );
+ err.multipart_suggestion(
+ "consider wrapping the receiver expression with the \
+ appropriate type",
+ vec![
+ (rcvr.span.shrink_to_lo(), format!("{}({}", pre, post)),
+ (rcvr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ // We don't care about the other suggestions.
+ alt_rcvr_sugg = true;
+ }
+ }
+ }
+ }
+ }
+ if self.suggest_valid_traits(err, valid_out_of_scope_traits) {
+ return;
+ }
+
+ let type_is_local = self.type_derefs_to_local(span, rcvr_ty, source);
+
+ let mut arbitrary_rcvr = vec![];
+ // There are no traits implemented, so lets suggest some traits to
+ // implement, by finding ones that have the item name, and are
+ // legal to implement.
+ let mut candidates = all_traits(self.tcx)
+ .into_iter()
+ // Don't issue suggestions for unstable traits since they're
+ // unlikely to be implementable anyway
+ .filter(|info| match self.tcx.lookup_stability(info.def_id) {
+ Some(attr) => attr.level.is_stable(),
+ None => true,
+ })
+ .filter(|info| {
+ // Static candidates are already implemented, and known not to work
+ // Do not suggest them again
+ static_candidates.iter().all(|sc| match *sc {
+ CandidateSource::Trait(def_id) => def_id != info.def_id,
+ CandidateSource::Impl(def_id) => {
+ self.tcx.trait_id_of_impl(def_id) != Some(info.def_id)
+ }
+ })
+ })
+ .filter(|info| {
+ // We approximate the coherence rules to only suggest
+ // traits that are legal to implement by requiring that
+ // either the type or trait is local. Multi-dispatch means
+ // this isn't perfect (that is, there are cases when
+ // implementing a trait would be legal but is rejected
+ // here).
+ unsatisfied_predicates.iter().all(|(p, _, _)| {
+ match p.kind().skip_binder() {
+ // Hide traits if they are present in predicates as they can be fixed without
+ // having to implement them.
+ ty::PredicateKind::Trait(t) => t.def_id() == info.def_id,
+ ty::PredicateKind::Projection(p) => {
+ p.projection_ty.item_def_id == info.def_id
+ }
+ _ => false,
+ }
+ }) && (type_is_local || info.def_id.is_local())
+ && self
+ .associated_value(info.def_id, item_name)
+ .filter(|item| {
+ if let ty::AssocKind::Fn = item.kind {
+ let id = item
+ .def_id
+ .as_local()
+ .map(|def_id| self.tcx.hir().local_def_id_to_hir_id(def_id));
+ if let Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(fn_sig, method),
+ ..
+ })) = id.map(|id| self.tcx.hir().get(id))
+ {
+ let self_first_arg = match method {
+ hir::TraitFn::Required([ident, ..]) => {
+ ident.name == kw::SelfLower
+ }
+ hir::TraitFn::Provided(body_id) => {
+ self.tcx.hir().body(*body_id).params.first().map_or(
+ false,
+ |param| {
+ matches!(
+ param.pat.kind,
+ hir::PatKind::Binding(_, _, ident, _)
+ if ident.name == kw::SelfLower
+ )
+ },
+ )
+ }
+ _ => false,
+ };
+
+ if !fn_sig.decl.implicit_self.has_implicit_self()
+ && self_first_arg
+ {
+ if let Some(ty) = fn_sig.decl.inputs.get(0) {
+ arbitrary_rcvr.push(ty.span);
+ }
+ return false;
+ }
+ }
+ }
+ // We only want to suggest public or local traits (#45781).
+ item.visibility(self.tcx).is_public() || info.def_id.is_local()
+ })
+ .is_some()
+ })
+ .collect::<Vec<_>>();
+ for span in &arbitrary_rcvr {
+ err.span_label(
+ *span,
+ "the method might not be found because of this arbitrary self type",
+ );
+ }
+ if alt_rcvr_sugg {
+ return;
+ }
+
+ if !candidates.is_empty() {
+ // Sort from most relevant to least relevant.
+ candidates.sort_by(|a, b| a.cmp(b).reverse());
+ candidates.dedup();
+
+ let param_type = match rcvr_ty.kind() {
+ ty::Param(param) => Some(param),
+ ty::Ref(_, ty, _) => match ty.kind() {
+ ty::Param(param) => Some(param),
+ _ => None,
+ },
+ _ => None,
+ };
+ err.help(if param_type.is_some() {
+ "items from traits can only be used if the type parameter is bounded by the trait"
+ } else {
+ "items from traits can only be used if the trait is implemented and in scope"
+ });
+ let candidates_len = candidates.len();
+ let message = |action| {
+ format!(
+ "the following {traits_define} an item `{name}`, perhaps you need to {action} \
+ {one_of_them}:",
+ traits_define =
+ if candidates_len == 1 { "trait defines" } else { "traits define" },
+ action = action,
+ one_of_them = if candidates_len == 1 { "it" } else { "one of them" },
+ name = item_name,
+ )
+ };
+ // Obtain the span for `param` and use it for a structured suggestion.
+ if let Some(param) = param_type {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let type_param = generics.type_param(param, self.tcx);
+ let hir = self.tcx.hir();
+ if let Some(def_id) = type_param.def_id.as_local() {
+ let id = hir.local_def_id_to_hir_id(def_id);
+ // Get the `hir::Param` to verify whether it already has any bounds.
+ // We do this to avoid suggesting code that ends up as `T: FooBar`,
+ // instead we suggest `T: Foo + Bar` in that case.
+ match hir.get(id) {
+ Node::GenericParam(param) => {
+ enum Introducer {
+ Plus,
+ Colon,
+ Nothing,
+ }
+ let ast_generics = hir.get_generics(id.owner.def_id).unwrap();
+ let (sp, mut introducer) = if let Some(span) =
+ ast_generics.bounds_span_for_suggestions(def_id)
+ {
+ (span, Introducer::Plus)
+ } else if let Some(colon_span) = param.colon_span {
+ (colon_span.shrink_to_hi(), Introducer::Nothing)
+ } else {
+ (param.span.shrink_to_hi(), Introducer::Colon)
+ };
+ if matches!(
+ param.kind,
+ hir::GenericParamKind::Type { synthetic: true, .. },
+ ) {
+ introducer = Introducer::Plus
+ }
+ let trait_def_ids: FxHashSet<DefId> = ast_generics
+ .bounds_for_param(def_id)
+ .flat_map(|bp| bp.bounds.iter())
+ .filter_map(|bound| bound.trait_ref()?.trait_def_id())
+ .collect();
+ if !candidates.iter().any(|t| trait_def_ids.contains(&t.def_id)) {
+ err.span_suggestions(
+ sp,
+ &message(format!(
+ "restrict type parameter `{}` with",
+ param.name.ident(),
+ )),
+ candidates.iter().map(|t| {
+ format!(
+ "{} {}",
+ match introducer {
+ Introducer::Plus => " +",
+ Introducer::Colon => ":",
+ Introducer::Nothing => "",
+ },
+ self.tcx.def_path_str(t.def_id),
+ )
+ }),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ return;
+ }
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(.., bounds, _),
+ ident,
+ ..
+ }) => {
+ let (sp, sep, article) = if bounds.is_empty() {
+ (ident.span.shrink_to_hi(), ":", "a")
+ } else {
+ (bounds.last().unwrap().span().shrink_to_hi(), " +", "another")
+ };
+ err.span_suggestions(
+ sp,
+ &message(format!("add {} supertrait for", article)),
+ candidates.iter().map(|t| {
+ format!("{} {}", sep, self.tcx.def_path_str(t.def_id),)
+ }),
+ Applicability::MaybeIncorrect,
+ );
+ return;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ let (potential_candidates, explicitly_negative) = if param_type.is_some() {
+ // FIXME: Even though negative bounds are not implemented, we could maybe handle
+ // cases where a positive bound implies a negative impl.
+ (candidates, Vec::new())
+ } else if let Some(simp_rcvr_ty) =
+ simplify_type(self.tcx, rcvr_ty, TreatParams::AsPlaceholder)
+ {
+ let mut potential_candidates = Vec::new();
+ let mut explicitly_negative = Vec::new();
+ for candidate in candidates {
+ // Check if there's a negative impl of `candidate` for `rcvr_ty`
+ if self
+ .tcx
+ .all_impls(candidate.def_id)
+ .filter(|imp_did| {
+ self.tcx.impl_polarity(*imp_did) == ty::ImplPolarity::Negative
+ })
+ .any(|imp_did| {
+ let imp = self.tcx.impl_trait_ref(imp_did).unwrap();
+ let imp_simp =
+ simplify_type(self.tcx, imp.self_ty(), TreatParams::AsPlaceholder);
+ imp_simp.map_or(false, |s| s == simp_rcvr_ty)
+ })
+ {
+ explicitly_negative.push(candidate);
+ } else {
+ potential_candidates.push(candidate);
+ }
+ }
+ (potential_candidates, explicitly_negative)
+ } else {
+ // We don't know enough about `recv_ty` to make proper suggestions.
+ (candidates, Vec::new())
+ };
+
+ let action = if let Some(param) = param_type {
+ format!("restrict type parameter `{}` with", param)
+ } else {
+ // FIXME: it might only need to be imported into scope, not implemented.
+ "implement".to_string()
+ };
+ match &potential_candidates[..] {
+ [] => {}
+ [trait_info] if trait_info.def_id.is_local() => {
+ err.span_note(
+ self.tcx.def_span(trait_info.def_id),
+ &format!(
+ "`{}` defines an item `{}`, perhaps you need to {} it",
+ self.tcx.def_path_str(trait_info.def_id),
+ item_name,
+ action
+ ),
+ );
+ }
+ trait_infos => {
+ let mut msg = message(action);
+ for (i, trait_info) in trait_infos.iter().enumerate() {
+ msg.push_str(&format!(
+ "\ncandidate #{}: `{}`",
+ i + 1,
+ self.tcx.def_path_str(trait_info.def_id),
+ ));
+ }
+ err.note(&msg);
+ }
+ }
+ match &explicitly_negative[..] {
+ [] => {}
+ [trait_info] => {
+ let msg = format!(
+ "the trait `{}` defines an item `{}`, but is explicitly unimplemented",
+ self.tcx.def_path_str(trait_info.def_id),
+ item_name
+ );
+ err.note(&msg);
+ }
+ trait_infos => {
+ let mut msg = format!(
+ "the following traits define an item `{}`, but are explicitly unimplemented:",
+ item_name
+ );
+ for trait_info in trait_infos {
+ msg.push_str(&format!("\n{}", self.tcx.def_path_str(trait_info.def_id)));
+ }
+ err.note(&msg);
+ }
+ }
+ }
+ }
+
+ /// issue #102320, for `unwrap_or` with closure as argument, suggest `unwrap_or_else`
+ /// FIXME: currently not working for suggesting `map_or_else`, see #102408
+ pub(crate) fn suggest_else_fn_with_closure(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ found: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> bool {
+ let Some((_def_id_or_name, output, _inputs)) = self.extract_callable_info(expr, found)
+ else { return false; };
+
+ if !self.can_coerce(output, expected) {
+ return false;
+ }
+
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ if let Some(Node::Expr(call_expr)) = self.tcx.hir().find(parent) &&
+ let hir::ExprKind::MethodCall(
+ hir::PathSegment { ident: method_name, .. },
+ self_expr,
+ args,
+ ..,
+ ) = call_expr.kind &&
+ let Some(self_ty) = self.typeck_results.borrow().expr_ty_opt(self_expr) {
+ let new_name = Ident {
+ name: Symbol::intern(&format!("{}_else", method_name.as_str())),
+ span: method_name.span,
+ };
+ let probe = self.lookup_probe(
+ expr.span,
+ new_name,
+ self_ty,
+ self_expr,
+ ProbeScope::TraitsInScope,
+ );
+
+ // check the method arguments number
+ if let Ok(pick) = probe &&
+ let fn_sig = self.tcx.fn_sig(pick.item.def_id) &&
+ let fn_args = fn_sig.skip_binder().inputs() &&
+ fn_args.len() == args.len() + 1 {
+ err.span_suggestion_verbose(
+ method_name.span.shrink_to_hi(),
+ &format!("try calling `{}` instead", new_name.name.as_str()),
+ "_else",
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Checks whether there is a local type somewhere in the chain of
+ /// autoderefs of `rcvr_ty`.
+ fn type_derefs_to_local(
+ &self,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ source: SelfSource<'tcx>,
+ ) -> bool {
+ fn is_local(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Adt(def, _) => def.did().is_local(),
+ ty::Foreign(did) => did.is_local(),
+ ty::Dynamic(tr, ..) => tr.principal().map_or(false, |d| d.def_id().is_local()),
+ ty::Param(_) => true,
+
+ // Everything else (primitive types, etc.) is effectively
+ // non-local (there are "edge" cases, e.g., `(LocalType,)`, but
+ // the noise from these sort of types is usually just really
+ // annoying, rather than any sort of help).
+ _ => false,
+ }
+ }
+
+ // This occurs for UFCS desugaring of `T::method`, where there is no
+ // receiver expression for the method call, and thus no autoderef.
+ if let SelfSource::QPath(_) = source {
+ return is_local(self.resolve_vars_with_obligations(rcvr_ty));
+ }
+
+ self.autoderef(span, rcvr_ty).any(|(ty, _)| is_local(ty))
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum SelfSource<'a> {
+ QPath(&'a hir::Ty<'a>),
+ MethodCall(&'a hir::Expr<'a> /* rcvr */),
+}
+
+#[derive(Copy, Clone)]
+pub struct TraitInfo {
+ pub def_id: DefId,
+}
+
+impl PartialEq for TraitInfo {
+ fn eq(&self, other: &TraitInfo) -> bool {
+ self.cmp(other) == Ordering::Equal
+ }
+}
+impl Eq for TraitInfo {}
+impl PartialOrd for TraitInfo {
+ fn partial_cmp(&self, other: &TraitInfo) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+impl Ord for TraitInfo {
+ fn cmp(&self, other: &TraitInfo) -> Ordering {
+ // Local crates are more important than remote ones (local:
+ // `cnum == 0`), and otherwise we throw in the defid for totality.
+
+ let lhs = (other.def_id.krate, other.def_id);
+ let rhs = (self.def_id.krate, self.def_id);
+ lhs.cmp(&rhs)
+ }
+}
+
+/// Retrieves all traits in this crate and any dependent crates,
+/// and wraps them into `TraitInfo` for custom sorting.
+pub fn all_traits(tcx: TyCtxt<'_>) -> Vec<TraitInfo> {
+ tcx.all_traits().map(|def_id| TraitInfo { def_id }).collect()
+}
+
+fn print_disambiguation_help<'tcx>(
+ item_name: Ident,
+ args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
+ err: &mut Diagnostic,
+ trait_name: String,
+ rcvr_ty: Ty<'_>,
+ kind: ty::AssocKind,
+ def_id: DefId,
+ span: Span,
+ candidate: Option<usize>,
+ source_map: &source_map::SourceMap,
+ fn_has_self_parameter: bool,
+) {
+ let mut applicability = Applicability::MachineApplicable;
+ let (span, sugg) = if let (ty::AssocKind::Fn, Some((receiver, args))) = (kind, args) {
+ let args = format!(
+ "({}{})",
+ if rcvr_ty.is_region_ptr() {
+ if rcvr_ty.is_mutable_ptr() { "&mut " } else { "&" }
+ } else {
+ ""
+ },
+ std::iter::once(receiver)
+ .chain(args.iter())
+ .map(|arg| source_map.span_to_snippet(arg.span).unwrap_or_else(|_| {
+ applicability = Applicability::HasPlaceholders;
+ "_".to_owned()
+ }))
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+ let trait_name = if !fn_has_self_parameter {
+ format!("<{} as {}>", rcvr_ty, trait_name)
+ } else {
+ trait_name
+ };
+ (span, format!("{}::{}{}", trait_name, item_name, args))
+ } else {
+ (span.with_hi(item_name.span.lo()), format!("<{} as {}>::", rcvr_ty, trait_name))
+ };
+ err.span_suggestion_verbose(
+ span,
+ &format!(
+ "disambiguate the {} for {}",
+ kind.as_def_kind().descr(def_id),
+ if let Some(candidate) = candidate {
+ format!("candidate #{}", candidate)
+ } else {
+ "the candidate".to_string()
+ },
+ ),
+ sugg,
+ applicability,
+ );
+}
diff --git a/compiler/rustc_hir_typeck/src/op.rs b/compiler/rustc_hir_typeck/src/op.rs
new file mode 100644
index 000000000..895739976
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/op.rs
@@ -0,0 +1,994 @@
+//! Code related to processing overloaded binary and unary operators.
+
+use super::method::MethodCallee;
+use super::{has_expected_num_generic_args, FnCtxt};
+use crate::Expectation;
+use rustc_ast as ast;
+use rustc_errors::{self, struct_span_err, Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::traits::ObligationCauseCode;
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
+};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeVisitable};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::suggestions::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{FulfillmentError, TraitEngine, TraitEngineExt};
+use rustc_type_ir::sty::TyKind::*;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Checks a `a <op>= b`
+ pub fn check_binop_assign(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let (lhs_ty, rhs_ty, return_ty) =
+ self.check_overloaded_binop(expr, lhs, rhs, op, IsAssign::Yes, expected);
+
+ let ty =
+ if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) {
+ self.enforce_builtin_binop_types(lhs.span, lhs_ty, rhs.span, rhs_ty, op);
+ self.tcx.mk_unit()
+ } else {
+ return_ty
+ };
+
+ self.check_lhs_assignable(lhs, "E0067", op.span, |err| {
+ if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some(rhs_ty),
+ Some(rhs),
+ Op::Binary(op, IsAssign::Yes),
+ expected,
+ )
+ .is_ok()
+ {
+ // If LHS += RHS is an error, but *LHS += RHS is successful, then we will have
+ // emitted a better suggestion during error handling in check_overloaded_binop.
+ if self
+ .lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs),
+ Op::Binary(op, IsAssign::Yes),
+ expected,
+ )
+ .is_err()
+ {
+ err.downgrade_to_delayed_bug();
+ } else {
+ // Otherwise, it's valid to suggest dereferencing the LHS here.
+ err.span_suggestion_verbose(
+ lhs.span.shrink_to_lo(),
+ "consider dereferencing the left-hand side of this operation",
+ "*",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ });
+
+ ty
+ }
+
+ /// Checks a potentially overloaded binary operator.
+ pub fn check_binop(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ debug!(
+ "check_binop(expr.hir_id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})",
+ expr.hir_id, expr, op, lhs_expr, rhs_expr
+ );
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ // && and || are a simple case.
+ self.check_expr_coercable_to_type(lhs_expr, tcx.types.bool, None);
+ let lhs_diverges = self.diverges.get();
+ self.check_expr_coercable_to_type(rhs_expr, tcx.types.bool, None);
+
+ // Depending on the LHS' value, the RHS can never execute.
+ self.diverges.set(lhs_diverges);
+
+ tcx.types.bool
+ }
+ _ => {
+ // Otherwise, we always treat operators as if they are
+ // overloaded. This is the way to be most flexible w/r/t
+ // types that get inferred.
+ let (lhs_ty, rhs_ty, return_ty) = self.check_overloaded_binop(
+ expr,
+ lhs_expr,
+ rhs_expr,
+ op,
+ IsAssign::No,
+ expected,
+ );
+
+ // Supply type inference hints if relevant. Probably these
+ // hints should be enforced during select as part of the
+ // `consider_unification_despite_ambiguity` routine, but this
+ // more convenient for now.
+ //
+ // The basic idea is to help type inference by taking
+ // advantage of things we know about how the impls for
+ // scalar types are arranged. This is important in a
+ // scenario like `1_u32 << 2`, because it lets us quickly
+ // deduce that the result type should be `u32`, even
+ // though we don't know yet what type 2 has and hence
+ // can't pin this down to a specific impl.
+ if !lhs_ty.is_ty_var()
+ && !rhs_ty.is_ty_var()
+ && is_builtin_binop(lhs_ty, rhs_ty, op)
+ {
+ let builtin_return_ty = self.enforce_builtin_binop_types(
+ lhs_expr.span,
+ lhs_ty,
+ rhs_expr.span,
+ rhs_ty,
+ op,
+ );
+ self.demand_suptype(expr.span, builtin_return_ty, return_ty);
+ }
+
+ return_ty
+ }
+ }
+ }
+
+ fn enforce_builtin_binop_types(
+ &self,
+ lhs_span: Span,
+ lhs_ty: Ty<'tcx>,
+ rhs_span: Span,
+ rhs_ty: Ty<'tcx>,
+ op: hir::BinOp,
+ ) -> Ty<'tcx> {
+ debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op));
+
+ // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
+ // (See https://github.com/rust-lang/rust/issues/57447.)
+ let (lhs_ty, rhs_ty) = (deref_ty_if_possible(lhs_ty), deref_ty_if_possible(rhs_ty));
+
+ let tcx = self.tcx;
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ self.demand_suptype(lhs_span, tcx.types.bool, lhs_ty);
+ self.demand_suptype(rhs_span, tcx.types.bool, rhs_ty);
+ tcx.types.bool
+ }
+
+ BinOpCategory::Shift => {
+ // result type is same as LHS always
+ lhs_ty
+ }
+
+ BinOpCategory::Math | BinOpCategory::Bitwise => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
+ lhs_ty
+ }
+
+ BinOpCategory::Comparison => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
+ tcx.types.bool
+ }
+ }
+ }
+
+ fn check_overloaded_binop(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ is_assign: IsAssign,
+ expected: Expectation<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
+ debug!(
+ "check_overloaded_binop(expr.hir_id={}, op={:?}, is_assign={:?})",
+ expr.hir_id, op, is_assign
+ );
+
+ let lhs_ty = match is_assign {
+ IsAssign::No => {
+ // Find a suitable supertype of the LHS expression's type, by coercing to
+ // a type variable, to pass as the `Self` to the trait, avoiding invariant
+ // trait matching creating lifetime constraints that are too strict.
+ // e.g., adding `&'a T` and `&'b T`, given `&'x T: Add<&'x T>`, will result
+ // in `&'a T <: &'x T` and `&'b T <: &'x T`, instead of `'a = 'b = 'x`.
+ let lhs_ty = self.check_expr(lhs_expr);
+ let fresh_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: lhs_expr.span,
+ });
+ self.demand_coerce(lhs_expr, lhs_ty, fresh_var, Some(rhs_expr), AllowTwoPhase::No)
+ }
+ IsAssign::Yes => {
+ // rust-lang/rust#52126: We have to use strict
+ // equivalence on the LHS of an assign-op like `+=`;
+ // overwritten or mutably-borrowed places cannot be
+ // coerced to a supertype.
+ self.check_expr(lhs_expr)
+ }
+ };
+ let lhs_ty = self.resolve_vars_with_obligations(lhs_ty);
+
+ // N.B., as we have not yet type-checked the RHS, we don't have the
+ // type at hand. Make a variable to represent it. The whole reason
+ // for this indirection is so that, below, we can check the expr
+ // using this variable as the expected type, which sometimes lets
+ // us do better coercions than we would be able to do otherwise,
+ // particularly for things like `String + &String`.
+ let rhs_ty_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: rhs_expr.span,
+ });
+
+ let result = self.lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty_var),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ );
+
+ // see `NB` above
+ let rhs_ty = self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var, Some(lhs_expr));
+ let rhs_ty = self.resolve_vars_with_obligations(rhs_ty);
+
+ let return_ty = match result {
+ Ok(method) => {
+ let by_ref_binop = !op.node.is_by_value();
+ if is_assign == IsAssign::Yes || by_ref_binop {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() {
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Allow two-phase borrows for binops in initial deployment
+ // since they desugar to methods
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ let autoref = Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[0],
+ };
+ self.apply_adjustments(lhs_expr, vec![autoref]);
+ }
+ }
+ if by_ref_binop {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].kind() {
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Allow two-phase borrows for binops in initial deployment
+ // since they desugar to methods
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ let autoref = Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[1],
+ };
+ // HACK(eddyb) Bypass checks due to reborrows being in
+ // some cases applied on the RHS, on top of which we need
+ // to autoref, which is not allowed by apply_adjustments.
+ // self.apply_adjustments(rhs_expr, vec![autoref]);
+ self.typeck_results
+ .borrow_mut()
+ .adjustments_mut()
+ .entry(rhs_expr.hir_id)
+ .or_default()
+ .push(autoref);
+ }
+ }
+ self.write_method_call(expr.hir_id, method);
+
+ method.sig.output()
+ }
+ // error types are considered "builtin"
+ Err(_) if lhs_ty.references_error() || rhs_ty.references_error() => self.tcx.ty_error(),
+ Err(errors) => {
+ let (_, trait_def_id) =
+ lang_item_for_op(self.tcx, Op::Binary(op, is_assign), op.span);
+ let missing_trait = trait_def_id
+ .map(|def_id| with_no_trimmed_paths!(self.tcx.def_path_str(def_id)));
+ let (mut err, output_def_id) = match is_assign {
+ IsAssign::Yes => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ expr.span,
+ E0368,
+ "binary assignment operation `{}=` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty,
+ );
+ err.span_label(
+ lhs_expr.span,
+ format!("cannot use `{}=` on type `{}`", op.node.as_str(), lhs_ty),
+ );
+ self.note_unmet_impls_on_type(&mut err, errors);
+ (err, None)
+ }
+ IsAssign::No => {
+ let message = match op.node {
+ hir::BinOpKind::Add => {
+ format!("cannot add `{rhs_ty}` to `{lhs_ty}`")
+ }
+ hir::BinOpKind::Sub => {
+ format!("cannot subtract `{rhs_ty}` from `{lhs_ty}`")
+ }
+ hir::BinOpKind::Mul => {
+ format!("cannot multiply `{lhs_ty}` by `{rhs_ty}`")
+ }
+ hir::BinOpKind::Div => {
+ format!("cannot divide `{lhs_ty}` by `{rhs_ty}`")
+ }
+ hir::BinOpKind::Rem => {
+ format!("cannot mod `{lhs_ty}` by `{rhs_ty}`")
+ }
+ hir::BinOpKind::BitAnd => {
+ format!("no implementation for `{lhs_ty} & {rhs_ty}`")
+ }
+ hir::BinOpKind::BitXor => {
+ format!("no implementation for `{lhs_ty} ^ {rhs_ty}`")
+ }
+ hir::BinOpKind::BitOr => {
+ format!("no implementation for `{lhs_ty} | {rhs_ty}`")
+ }
+ hir::BinOpKind::Shl => {
+ format!("no implementation for `{lhs_ty} << {rhs_ty}`")
+ }
+ hir::BinOpKind::Shr => {
+ format!("no implementation for `{lhs_ty} >> {rhs_ty}`")
+ }
+ _ => format!(
+ "binary operation `{}` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty
+ ),
+ };
+ let output_def_id = trait_def_id.and_then(|def_id| {
+ self.tcx
+ .associated_item_def_ids(def_id)
+ .iter()
+ .find(|item_def_id| {
+ self.tcx.associated_item(*item_def_id).name == sym::Output
+ })
+ .cloned()
+ });
+ let mut err = struct_span_err!(self.tcx.sess, op.span, E0369, "{message}");
+ if !lhs_expr.span.eq(&rhs_expr.span) {
+ err.span_label(lhs_expr.span, lhs_ty.to_string());
+ err.span_label(rhs_expr.span, rhs_ty.to_string());
+ }
+ self.note_unmet_impls_on_type(&mut err, errors);
+ (err, output_def_id)
+ }
+ };
+
+ let mut suggest_deref_binop = |lhs_deref_ty: Ty<'tcx>| {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ {
+ let msg = &format!(
+ "`{}{}` can be used on `{}` if you dereference the left-hand side",
+ op.node.as_str(),
+ match is_assign {
+ IsAssign::Yes => "=",
+ IsAssign::No => "",
+ },
+ lhs_deref_ty,
+ );
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_lo(),
+ msg,
+ "*",
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ };
+
+ let is_compatible = |lhs_ty, rhs_ty| {
+ self.lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ };
+
+ // We should suggest `a + b` => `*a + b` if `a` is copy, and suggest
+ // `a += b` => `*a += b` if a is a mut ref.
+ if !op.span.can_be_used_for_suggestions() {
+ // Suppress suggestions when lhs and rhs are not in the same span as the error
+ } else if is_assign == IsAssign::Yes
+ && let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty)
+ {
+ suggest_deref_binop(lhs_deref_ty);
+ } else if is_assign == IsAssign::No
+ && let Ref(_, lhs_deref_ty, _) = lhs_ty.kind()
+ {
+ if self.type_is_copy_modulo_regions(
+ self.param_env,
+ *lhs_deref_ty,
+ lhs_expr.span,
+ ) {
+ suggest_deref_binop(*lhs_deref_ty);
+ }
+ } else if self.suggest_fn_call(&mut err, lhs_expr, lhs_ty, |lhs_ty| {
+ is_compatible(lhs_ty, rhs_ty)
+ }) || self.suggest_fn_call(&mut err, rhs_expr, rhs_ty, |rhs_ty| {
+ is_compatible(lhs_ty, rhs_ty)
+ }) || self.suggest_two_fn_call(
+ &mut err,
+ rhs_expr,
+ rhs_ty,
+ lhs_expr,
+ lhs_ty,
+ |lhs_ty, rhs_ty| is_compatible(lhs_ty, rhs_ty),
+ ) {
+ // Cool
+ }
+
+ if let Some(missing_trait) = missing_trait {
+ if op.node == hir::BinOpKind::Add
+ && self.check_str_addition(
+ lhs_expr, rhs_expr, lhs_ty, rhs_ty, &mut err, is_assign, op,
+ )
+ {
+ // This has nothing here because it means we did string
+ // concatenation (e.g., "Hello " + "World!"). This means
+ // we don't want the note in the else clause to be emitted
+ } else if lhs_ty.has_non_region_param() {
+ // Look for a TraitPredicate in the Fulfillment errors,
+ // and use it to generate a suggestion.
+ //
+ // Note that lookup_op_method must be called again but
+ // with a specific rhs_ty instead of a placeholder so
+ // the resulting predicate generates a more specific
+ // suggestion for the user.
+ let errors = self
+ .lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .unwrap_err();
+ if !errors.is_empty() {
+ for error in errors {
+ if let Some(trait_pred) =
+ error.obligation.predicate.to_opt_poly_trait_pred()
+ {
+ let output_associated_item = match error.obligation.cause.code()
+ {
+ ObligationCauseCode::BinOp {
+ output_ty: Some(output_ty),
+ ..
+ } => {
+ // Make sure that we're attaching `Output = ..` to the right trait predicate
+ if let Some(output_def_id) = output_def_id
+ && let Some(trait_def_id) = trait_def_id
+ && self.tcx.parent(output_def_id) == trait_def_id
+ {
+ Some(("Output", *output_ty))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+
+ self.err_ctxt().suggest_restricting_param_bound(
+ &mut err,
+ trait_pred,
+ output_associated_item,
+ self.body_id,
+ );
+ }
+ }
+ } else {
+ // When we know that a missing bound is responsible, we don't show
+ // this note as it is redundant.
+ err.note(&format!(
+ "the trait `{missing_trait}` is not implemented for `{lhs_ty}`"
+ ));
+ }
+ }
+ }
+ err.emit();
+ self.tcx.ty_error()
+ }
+ };
+
+ (lhs_ty, rhs_ty, return_ty)
+ }
+
+ /// Provide actionable suggestions when trying to add two strings with incorrect types,
+ /// like `&str + &str`, `String + String` and `&str + &String`.
+ ///
+ /// If this function returns `true` it means a note was printed, so we don't need
+ /// to print the normal "implementation of `std::ops::Add` might be missing" note
+ fn check_str_addition(
+ &self,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ lhs_ty: Ty<'tcx>,
+ rhs_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ is_assign: IsAssign,
+ op: hir::BinOp,
+ ) -> bool {
+ let str_concat_note = "string concatenation requires an owned `String` on the left";
+ let rm_borrow_msg = "remove the borrow to obtain an owned `String`";
+ let to_owned_msg = "create an owned `String` from a string reference";
+
+ let is_std_string = |ty: Ty<'tcx>| {
+ ty.ty_adt_def()
+ .map_or(false, |ty_def| self.tcx.is_diagnostic_item(sym::String, ty_def.did()))
+ };
+
+ match (lhs_ty.kind(), rhs_ty.kind()) {
+ (&Ref(_, l_ty, _), &Ref(_, r_ty, _)) // &str or &String + &str, &String or &&str
+ if (*l_ty.kind() == Str || is_std_string(l_ty))
+ && (*r_ty.kind() == Str
+ || is_std_string(r_ty)
+ || matches!(
+ r_ty.kind(), Ref(_, inner_ty, _) if *inner_ty.kind() == Str
+ )) =>
+ {
+ if let IsAssign::No = is_assign { // Do not supply this message if `&str += &str`
+ err.span_label(op.span, "`+` cannot be used to concatenate two `&str` strings");
+ err.note(str_concat_note);
+ if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
+ err.span_suggestion_verbose(
+ lhs_expr.span.until(lhs_inner_expr.span),
+ rm_borrow_msg,
+ "",
+ Applicability::MachineApplicable
+ );
+ } else {
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_hi(),
+ to_owned_msg,
+ ".to_owned()",
+ Applicability::MachineApplicable
+ );
+ }
+ }
+ true
+ }
+ (&Ref(_, l_ty, _), &Adt(..)) // Handle `&str` & `&String` + `String`
+ if (*l_ty.kind() == Str || is_std_string(l_ty)) && is_std_string(rhs_ty) =>
+ {
+ err.span_label(
+ op.span,
+ "`+` cannot be used to concatenate a `&str` with a `String`",
+ );
+ match is_assign {
+ IsAssign::No => {
+ let sugg_msg;
+ let lhs_sugg = if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
+ sugg_msg = "remove the borrow on the left and add one on the right";
+ (lhs_expr.span.until(lhs_inner_expr.span), "".to_owned())
+ } else {
+ sugg_msg = "create an owned `String` on the left and add a borrow on the right";
+ (lhs_expr.span.shrink_to_hi(), ".to_owned()".to_owned())
+ };
+ let suggestions = vec![
+ lhs_sugg,
+ (rhs_expr.span.shrink_to_lo(), "&".to_owned()),
+ ];
+ err.multipart_suggestion_verbose(
+ sugg_msg,
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ }
+ IsAssign::Yes => {
+ err.note(str_concat_note);
+ }
+ }
+ true
+ }
+ _ => false,
+ }
+ }
+
+ pub fn check_user_unop(
+ &self,
+ ex: &'tcx hir::Expr<'tcx>,
+ operand_ty: Ty<'tcx>,
+ op: hir::UnOp,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ assert!(op.is_by_value());
+ match self.lookup_op_method(operand_ty, None, None, Op::Unary(op, ex.span), expected) {
+ Ok(method) => {
+ self.write_method_call(ex.hir_id, method);
+ method.sig.output()
+ }
+ Err(errors) => {
+ let actual = self.resolve_vars_if_possible(operand_ty);
+ if !actual.references_error() {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ ex.span,
+ E0600,
+ "cannot apply unary operator `{}` to type `{}`",
+ op.as_str(),
+ actual
+ );
+ err.span_label(
+ ex.span,
+ format!("cannot apply unary operator `{}`", op.as_str()),
+ );
+
+ if operand_ty.has_non_region_param() {
+ let predicates = errors.iter().filter_map(|error| {
+ error.obligation.predicate.to_opt_poly_trait_pred()
+ });
+ for pred in predicates {
+ self.err_ctxt().suggest_restricting_param_bound(
+ &mut err,
+ pred,
+ None,
+ self.body_id,
+ );
+ }
+ }
+
+ let sp = self.tcx.sess.source_map().start_point(ex.span);
+ if let Some(sp) =
+ self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
+ {
+ // If the previous expression was a block expression, suggest parentheses
+ // (turning this into a binary subtraction operation instead.)
+ // for example, `{2} - 2` -> `({2}) - 2` (see src\test\ui\parser\expr-as-stmt.rs)
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ } else {
+ match actual.kind() {
+ Uint(_) if op == hir::UnOp::Neg => {
+ err.note("unsigned values cannot be negated");
+
+ if let hir::ExprKind::Unary(
+ _,
+ hir::Expr {
+ kind:
+ hir::ExprKind::Lit(Spanned {
+ node: ast::LitKind::Int(1, _),
+ ..
+ }),
+ ..
+ },
+ ) = ex.kind
+ {
+ err.span_suggestion(
+ ex.span,
+ &format!(
+ "you may have meant the maximum value of `{actual}`",
+ ),
+ format!("{actual}::MAX"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ Str | Never | Char | Tuple(_) | Array(_, _) => {}
+ Ref(_, lty, _) if *lty.kind() == Str => {}
+ _ => {
+ self.note_unmet_impls_on_type(&mut err, errors);
+ }
+ }
+ }
+ err.emit();
+ }
+ self.tcx.ty_error()
+ }
+ }
+ }
+
+ fn lookup_op_method(
+ &self,
+ lhs_ty: Ty<'tcx>,
+ other_ty: Option<Ty<'tcx>>,
+ other_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ op: Op,
+ expected: Expectation<'tcx>,
+ ) -> Result<MethodCallee<'tcx>, Vec<FulfillmentError<'tcx>>> {
+ let span = match op {
+ Op::Binary(op, _) => op.span,
+ Op::Unary(_, span) => span,
+ };
+ let (opname, trait_did) = lang_item_for_op(self.tcx, op, span);
+
+ debug!(
+ "lookup_op_method(lhs_ty={:?}, op={:?}, opname={:?}, trait_did={:?})",
+ lhs_ty, op, opname, trait_did
+ );
+
+ // Catches cases like #83893, where a lang item is declared with the
+ // wrong number of generic arguments. Should have yielded an error
+ // elsewhere by now, but we have to catch it here so that we do not
+ // index `other_tys` out of bounds (if the lang item has too many
+ // generic arguments, `other_tys` is too short).
+ if !has_expected_num_generic_args(
+ self.tcx,
+ trait_did,
+ match op {
+ // Binary ops have a generic right-hand side, unary ops don't
+ Op::Binary(..) => 1,
+ Op::Unary(..) => 0,
+ },
+ ) {
+ return Err(vec![]);
+ }
+
+ let opname = Ident::with_dummy_span(opname);
+ let method = trait_did.and_then(|trait_did| {
+ self.lookup_op_method_in_trait(
+ span,
+ opname,
+ trait_did,
+ lhs_ty,
+ other_ty,
+ other_ty_expr,
+ expected,
+ )
+ });
+
+ match (method, trait_did) {
+ (Some(ok), _) => {
+ let method = self.register_infer_ok_obligations(ok);
+ self.select_obligations_where_possible(false, |_| {});
+ Ok(method)
+ }
+ (None, None) => Err(vec![]),
+ (None, Some(trait_did)) => {
+ let (obligation, _) = self.obligation_for_op_method(
+ span,
+ trait_did,
+ lhs_ty,
+ other_ty,
+ other_ty_expr,
+ expected,
+ );
+ let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
+ fulfill.register_predicate_obligation(self, obligation);
+ Err(fulfill.select_where_possible(&self.infcx))
+ }
+ }
+ }
+}
+
+fn lang_item_for_op(
+ tcx: TyCtxt<'_>,
+ op: Op,
+ span: Span,
+) -> (rustc_span::Symbol, Option<hir::def_id::DefId>) {
+ let lang = tcx.lang_items();
+ if let Op::Binary(op, IsAssign::Yes) = op {
+ match op.node {
+ hir::BinOpKind::Add => (sym::add_assign, lang.add_assign_trait()),
+ hir::BinOpKind::Sub => (sym::sub_assign, lang.sub_assign_trait()),
+ hir::BinOpKind::Mul => (sym::mul_assign, lang.mul_assign_trait()),
+ hir::BinOpKind::Div => (sym::div_assign, lang.div_assign_trait()),
+ hir::BinOpKind::Rem => (sym::rem_assign, lang.rem_assign_trait()),
+ hir::BinOpKind::BitXor => (sym::bitxor_assign, lang.bitxor_assign_trait()),
+ hir::BinOpKind::BitAnd => (sym::bitand_assign, lang.bitand_assign_trait()),
+ hir::BinOpKind::BitOr => (sym::bitor_assign, lang.bitor_assign_trait()),
+ hir::BinOpKind::Shl => (sym::shl_assign, lang.shl_assign_trait()),
+ hir::BinOpKind::Shr => (sym::shr_assign, lang.shr_assign_trait()),
+ hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt
+ | hir::BinOpKind::Eq
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::And
+ | hir::BinOpKind::Or => {
+ span_bug!(span, "impossible assignment operation: {}=", op.node.as_str())
+ }
+ }
+ } else if let Op::Binary(op, IsAssign::No) = op {
+ match op.node {
+ hir::BinOpKind::Add => (sym::add, lang.add_trait()),
+ hir::BinOpKind::Sub => (sym::sub, lang.sub_trait()),
+ hir::BinOpKind::Mul => (sym::mul, lang.mul_trait()),
+ hir::BinOpKind::Div => (sym::div, lang.div_trait()),
+ hir::BinOpKind::Rem => (sym::rem, lang.rem_trait()),
+ hir::BinOpKind::BitXor => (sym::bitxor, lang.bitxor_trait()),
+ hir::BinOpKind::BitAnd => (sym::bitand, lang.bitand_trait()),
+ hir::BinOpKind::BitOr => (sym::bitor, lang.bitor_trait()),
+ hir::BinOpKind::Shl => (sym::shl, lang.shl_trait()),
+ hir::BinOpKind::Shr => (sym::shr, lang.shr_trait()),
+ hir::BinOpKind::Lt => (sym::lt, lang.partial_ord_trait()),
+ hir::BinOpKind::Le => (sym::le, lang.partial_ord_trait()),
+ hir::BinOpKind::Ge => (sym::ge, lang.partial_ord_trait()),
+ hir::BinOpKind::Gt => (sym::gt, lang.partial_ord_trait()),
+ hir::BinOpKind::Eq => (sym::eq, lang.eq_trait()),
+ hir::BinOpKind::Ne => (sym::ne, lang.eq_trait()),
+ hir::BinOpKind::And | hir::BinOpKind::Or => {
+ span_bug!(span, "&& and || are not overloadable")
+ }
+ }
+ } else if let Op::Unary(hir::UnOp::Not, _) = op {
+ (sym::not, lang.not_trait())
+ } else if let Op::Unary(hir::UnOp::Neg, _) = op {
+ (sym::neg, lang.neg_trait())
+ } else {
+ bug!("lookup_op_method: op not supported: {:?}", op)
+ }
+}
+
+// Binary operator categories. These categories summarize the behavior
+// with respect to the builtin operations supported.
+enum BinOpCategory {
+ /// &&, || -- cannot be overridden
+ Shortcircuit,
+
+ /// <<, >> -- when shifting a single integer, rhs can be any
+ /// integer type. For simd, types must match.
+ Shift,
+
+ /// +, -, etc -- takes equal types, produces same type as input,
+ /// applicable to ints/floats/simd
+ Math,
+
+ /// &, |, ^ -- takes equal types, produces same type as input,
+ /// applicable to ints/floats/simd/bool
+ Bitwise,
+
+ /// ==, !=, etc -- takes equal types, produces bools, except for simd,
+ /// which produce the input type
+ Comparison,
+}
+
+impl BinOpCategory {
+ fn from(op: hir::BinOp) -> BinOpCategory {
+ match op.node {
+ hir::BinOpKind::Shl | hir::BinOpKind::Shr => BinOpCategory::Shift,
+
+ hir::BinOpKind::Add
+ | hir::BinOpKind::Sub
+ | hir::BinOpKind::Mul
+ | hir::BinOpKind::Div
+ | hir::BinOpKind::Rem => BinOpCategory::Math,
+
+ hir::BinOpKind::BitXor | hir::BinOpKind::BitAnd | hir::BinOpKind::BitOr => {
+ BinOpCategory::Bitwise
+ }
+
+ hir::BinOpKind::Eq
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt => BinOpCategory::Comparison,
+
+ hir::BinOpKind::And | hir::BinOpKind::Or => BinOpCategory::Shortcircuit,
+ }
+ }
+}
+
+/// Whether the binary operation is an assignment (`a += b`), or not (`a + b`)
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum IsAssign {
+ No,
+ Yes,
+}
+
+#[derive(Clone, Copy, Debug)]
+enum Op {
+ Binary(hir::BinOp, IsAssign),
+ Unary(hir::UnOp, Span),
+}
+
+/// Dereferences a single level of immutable referencing.
+fn deref_ty_if_possible<'tcx>(ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Ref(_, ty, hir::Mutability::Not) => *ty,
+ _ => ty,
+ }
+}
+
+/// Returns `true` if this is a built-in arithmetic operation (e.g., u32
+/// + u32, i16x4 == i16x4) and false if these types would have to be
+/// overloaded to be legal. There are two reasons that we distinguish
+/// builtin operations from overloaded ones (vs trying to drive
+/// everything uniformly through the trait system and intrinsics or
+/// something like that):
+///
+/// 1. Builtin operations can trivially be evaluated in constants.
+/// 2. For comparison operators applied to SIMD types the result is
+/// not of type `bool`. For example, `i16x4 == i16x4` yields a
+/// type like `i16x4`. This means that the overloaded trait
+/// `PartialEq` is not applicable.
+///
+/// Reason #2 is the killer. I tried for a while to always use
+/// overloaded logic and just check the types in constants/codegen after
+/// the fact, and it worked fine, except for SIMD types. -nmatsakis
+fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>, rhs: Ty<'tcx>, op: hir::BinOp) -> bool {
+ // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
+ // (See https://github.com/rust-lang/rust/issues/57447.)
+ let (lhs, rhs) = (deref_ty_if_possible(lhs), deref_ty_if_possible(rhs));
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => true,
+
+ BinOpCategory::Shift => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ }
+
+ BinOpCategory::Math => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ || lhs.is_floating_point() && rhs.is_floating_point()
+ }
+
+ BinOpCategory::Bitwise => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ || lhs.is_floating_point() && rhs.is_floating_point()
+ || lhs.is_bool() && rhs.is_bool()
+ }
+
+ BinOpCategory::Comparison => {
+ lhs.references_error() || rhs.references_error() || lhs.is_scalar() && rhs.is_scalar()
+ }
+ }
+}
+
+struct TypeParamEraser<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, Span);
+
+impl<'tcx> TypeFolder<'tcx> for TypeParamEraser<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.0.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Param(_) => self.0.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.1,
+ }),
+ _ => ty.super_fold_with(self),
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs
new file mode 100644
index 000000000..ea90da4a6
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/pat.rs
@@ -0,0 +1,2185 @@
+use crate::FnCtxt;
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::pat_util::EnumerateAndAdjustIterator;
+use rustc_hir::{HirId, Pat, PatKind};
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::middle::stability::EvalResult;
+use rustc_middle::ty::{self, Adt, BindingMode, Ty, TypeVisitable};
+use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::{Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::{BytePos, DUMMY_SP};
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::traits::{ObligationCause, Pattern};
+use ty::VariantDef;
+
+use std::cmp;
+use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+use super::report_unexpected_variant_res;
+
+const CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ: &str = "\
+This error indicates that a pointer to a trait type cannot be implicitly dereferenced by a \
+pattern. Every trait defines a type, but because the size of trait implementors isn't fixed, \
+this type has no compile-time size. Therefore, all accesses to trait types must be through \
+pointers. If you encounter this error you should try to avoid dereferencing the pointer.
+
+You can read more about trait objects in the Trait Objects section of the Reference: \
+https://doc.rust-lang.org/reference/types.html#trait-objects";
+
+/// Information about the expected type at the top level of type checking a pattern.
+///
+/// **NOTE:** This is only for use by diagnostics. Do NOT use for type checking logic!
+#[derive(Copy, Clone)]
+struct TopInfo<'tcx> {
+ /// The `expected` type at the top level of type checking a pattern.
+ expected: Ty<'tcx>,
+ /// Was the origin of the `span` from a scrutinee expression?
+ ///
+ /// Otherwise there is no scrutinee and it could be e.g. from the type of a formal parameter.
+ origin_expr: bool,
+ /// The span giving rise to the `expected` type, if one could be provided.
+ ///
+ /// If `origin_expr` is `true`, then this is the span of the scrutinee as in:
+ ///
+ /// - `match scrutinee { ... }`
+ /// - `let _ = scrutinee;`
+ ///
+ /// This is used to point to add context in type errors.
+ /// In the following example, `span` corresponds to the `a + b` expression:
+ ///
+ /// ```text
+ /// error[E0308]: mismatched types
+ /// --> src/main.rs:L:C
+ /// |
+ /// L | let temp: usize = match a + b {
+ /// | ----- this expression has type `usize`
+ /// L | Ok(num) => num,
+ /// | ^^^^^^^ expected `usize`, found enum `std::result::Result`
+ /// |
+ /// = note: expected type `usize`
+ /// found type `std::result::Result<_, _>`
+ /// ```
+ span: Option<Span>,
+}
+
+impl<'tcx> FnCtxt<'_, 'tcx> {
+ fn pattern_cause(&self, ti: TopInfo<'tcx>, cause_span: Span) -> ObligationCause<'tcx> {
+ let code = Pattern { span: ti.span, root_ty: ti.expected, origin_expr: ti.origin_expr };
+ self.cause(cause_span, code)
+ }
+
+ fn demand_eqtype_pat_diag(
+ &self,
+ cause_span: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ self.demand_eqtype_with_origin(&self.pattern_cause(ti, cause_span), expected, actual)
+ }
+
+ fn demand_eqtype_pat(
+ &self,
+ cause_span: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) {
+ if let Some(mut err) = self.demand_eqtype_pat_diag(cause_span, expected, actual, ti) {
+ err.emit();
+ }
+ }
+}
+
+const INITIAL_BM: BindingMode = BindingMode::BindByValue(hir::Mutability::Not);
+
+/// Mode for adjusting the expected type and binding mode.
+enum AdjustMode {
+ /// Peel off all immediate reference types.
+ Peel,
+ /// Reset binding mode to the initial mode.
+ Reset,
+ /// Pass on the input binding mode and expected type.
+ Pass,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Type check the given top level pattern against the `expected` type.
+ ///
+ /// If a `Some(span)` is provided and `origin_expr` holds,
+ /// then the `span` represents the scrutinee's span.
+ /// The scrutinee is found in e.g. `match scrutinee { ... }` and `let pat = scrutinee;`.
+ ///
+ /// Otherwise, `Some(span)` represents the span of a type expression
+ /// which originated the `expected` type.
+ pub fn check_pat_top(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ span: Option<Span>,
+ origin_expr: bool,
+ ) {
+ let info = TopInfo { expected, origin_expr, span };
+ self.check_pat(pat, expected, INITIAL_BM, info);
+ }
+
+ /// Type check the given `pat` against the `expected` type
+ /// with the provided `def_bm` (default binding mode).
+ ///
+ /// Outside of this module, `check_pat_top` should always be used.
+ /// Conversely, inside this module, `check_pat_top` should never be used.
+ #[instrument(level = "debug", skip(self, ti))]
+ fn check_pat(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) {
+ let path_res = match &pat.kind {
+ PatKind::Path(qpath) => {
+ Some(self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span))
+ }
+ _ => None,
+ };
+ let adjust_mode = self.calc_adjust_mode(pat, path_res.map(|(res, ..)| res));
+ let (expected, def_bm) = self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode);
+
+ let ty = match pat.kind {
+ PatKind::Wild => expected,
+ PatKind::Lit(lt) => self.check_pat_lit(pat.span, lt, expected, ti),
+ PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti),
+ PatKind::Binding(ba, var_id, _, sub) => {
+ self.check_pat_ident(pat, ba, var_id, sub, expected, def_bm, ti)
+ }
+ PatKind::TupleStruct(ref qpath, subpats, ddpos) => {
+ self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, def_bm, ti)
+ }
+ PatKind::Path(ref qpath) => {
+ self.check_pat_path(pat, qpath, path_res.unwrap(), expected, ti)
+ }
+ PatKind::Struct(ref qpath, fields, has_rest_pat) => {
+ self.check_pat_struct(pat, qpath, fields, has_rest_pat, expected, def_bm, ti)
+ }
+ PatKind::Or(pats) => {
+ for pat in pats {
+ self.check_pat(pat, expected, def_bm, ti);
+ }
+ expected
+ }
+ PatKind::Tuple(elements, ddpos) => {
+ self.check_pat_tuple(pat.span, elements, ddpos, expected, def_bm, ti)
+ }
+ PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, def_bm, ti),
+ PatKind::Ref(inner, mutbl) => {
+ self.check_pat_ref(pat, inner, mutbl, expected, def_bm, ti)
+ }
+ PatKind::Slice(before, slice, after) => {
+ self.check_pat_slice(pat.span, before, slice, after, expected, def_bm, ti)
+ }
+ };
+
+ self.write_ty(pat.hir_id, ty);
+
+ // (note_1): In most of the cases where (note_1) is referenced
+ // (literals and constants being the exception), we relate types
+ // using strict equality, even though subtyping would be sufficient.
+ // There are a few reasons for this, some of which are fairly subtle
+ // and which cost me (nmatsakis) an hour or two debugging to remember,
+ // so I thought I'd write them down this time.
+ //
+ // 1. There is no loss of expressiveness here, though it does
+ // cause some inconvenience. What we are saying is that the type
+ // of `x` becomes *exactly* what is expected. This can cause unnecessary
+ // errors in some cases, such as this one:
+ //
+ // ```
+ // fn foo<'x>(x: &'x i32) {
+ // let a = 1;
+ // let mut z = x;
+ // z = &a;
+ // }
+ // ```
+ //
+ // The reason we might get an error is that `z` might be
+ // assigned a type like `&'x i32`, and then we would have
+ // a problem when we try to assign `&a` to `z`, because
+ // the lifetime of `&a` (i.e., the enclosing block) is
+ // shorter than `'x`.
+ //
+ // HOWEVER, this code works fine. The reason is that the
+ // expected type here is whatever type the user wrote, not
+ // the initializer's type. In this case the user wrote
+ // nothing, so we are going to create a type variable `Z`.
+ // Then we will assign the type of the initializer (`&'x i32`)
+ // as a subtype of `Z`: `&'x i32 <: Z`. And hence we
+ // will instantiate `Z` as a type `&'0 i32` where `'0` is
+ // a fresh region variable, with the constraint that `'x : '0`.
+ // So basically we're all set.
+ //
+ // Note that there are two tests to check that this remains true
+ // (`regions-reassign-{match,let}-bound-pointer.rs`).
+ //
+ // 2. Things go horribly wrong if we use subtype. The reason for
+ // THIS is a fairly subtle case involving bound regions. See the
+ // `givens` field in `region_constraints`, as well as the test
+ // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
+ // for details. Short version is that we must sometimes detect
+ // relationships between specific region variables and regions
+ // bound in a closure signature, and that detection gets thrown
+ // off when we substitute fresh region variables here to enable
+ // subtyping.
+ }
+
+ /// Compute the new expected type and default binding mode from the old ones
+ /// as well as the pattern form we are currently checking.
+ fn calc_default_binding_mode(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ adjust_mode: AdjustMode,
+ ) -> (Ty<'tcx>, BindingMode) {
+ match adjust_mode {
+ AdjustMode::Pass => (expected, def_bm),
+ AdjustMode::Reset => (expected, INITIAL_BM),
+ AdjustMode::Peel => self.peel_off_references(pat, expected, def_bm),
+ }
+ }
+
+ /// How should the binding mode and expected type be adjusted?
+ ///
+ /// When the pattern is a path pattern, `opt_path_res` must be `Some(res)`.
+ fn calc_adjust_mode(&self, pat: &'tcx Pat<'tcx>, opt_path_res: Option<Res>) -> AdjustMode {
+ // When we perform destructuring assignment, we disable default match bindings, which are
+ // unintuitive in this context.
+ if !pat.default_binding_modes {
+ return AdjustMode::Reset;
+ }
+ match &pat.kind {
+ // Type checking these product-like types successfully always require
+ // that the expected type be of those types and not reference types.
+ PatKind::Struct(..)
+ | PatKind::TupleStruct(..)
+ | PatKind::Tuple(..)
+ | PatKind::Box(_)
+ | PatKind::Range(..)
+ | PatKind::Slice(..) => AdjustMode::Peel,
+ // String and byte-string literals result in types `&str` and `&[u8]` respectively.
+ // All other literals result in non-reference types.
+ // As a result, we allow `if let 0 = &&0 {}` but not `if let "foo" = &&"foo {}`.
+ //
+ // Call `resolve_vars_if_possible` here for inline const blocks.
+ PatKind::Lit(lt) => match self.resolve_vars_if_possible(self.check_expr(lt)).kind() {
+ ty::Ref(..) => AdjustMode::Pass,
+ _ => AdjustMode::Peel,
+ },
+ PatKind::Path(_) => match opt_path_res.unwrap() {
+ // These constants can be of a reference type, e.g. `const X: &u8 = &0;`.
+ // Peeling the reference types too early will cause type checking failures.
+ // Although it would be possible to *also* peel the types of the constants too.
+ Res::Def(DefKind::Const | DefKind::AssocConst, _) => AdjustMode::Pass,
+ // In the `ValueNS`, we have `SelfCtor(..) | Ctor(_, Const), _)` remaining which
+ // could successfully compile. The former being `Self` requires a unit struct.
+ // In either case, and unlike constants, the pattern itself cannot be
+ // a reference type wherefore peeling doesn't give up any expressiveness.
+ _ => AdjustMode::Peel,
+ },
+ // When encountering a `& mut? pat` pattern, reset to "by value".
+ // This is so that `x` and `y` here are by value, as they appear to be:
+ //
+ // ```
+ // match &(&22, &44) {
+ // (&x, &y) => ...
+ // }
+ // ```
+ //
+ // See issue #46688.
+ PatKind::Ref(..) => AdjustMode::Reset,
+ // A `_` pattern works with any expected type, so there's no need to do anything.
+ PatKind::Wild
+ // Bindings also work with whatever the expected type is,
+ // and moreover if we peel references off, that will give us the wrong binding type.
+ // Also, we can have a subpattern `binding @ pat`.
+ // Each side of the `@` should be treated independently (like with OR-patterns).
+ | PatKind::Binding(..)
+ // An OR-pattern just propagates to each individual alternative.
+ // This is maximally flexible, allowing e.g., `Some(mut x) | &Some(mut x)`.
+ // In that example, `Some(mut x)` results in `Peel` whereas `&Some(mut x)` in `Reset`.
+ | PatKind::Or(_) => AdjustMode::Pass,
+ }
+ }
+
+ /// Peel off as many immediately nested `& mut?` from the expected type as possible
+ /// and return the new expected type and binding default binding mode.
+ /// The adjustments vector, if non-empty is stored in a table.
+ fn peel_off_references(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ mut def_bm: BindingMode,
+ ) -> (Ty<'tcx>, BindingMode) {
+ let mut expected = self.resolve_vars_with_obligations(expected);
+
+ // Peel off as many `&` or `&mut` from the scrutinee type as possible. For example,
+ // for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches
+ // the `Some(5)` which is not of type Ref.
+ //
+ // For each ampersand peeled off, update the binding mode and push the original
+ // type into the adjustments vector.
+ //
+ // See the examples in `ui/match-defbm*.rs`.
+ let mut pat_adjustments = vec![];
+ while let ty::Ref(_, inner_ty, inner_mutability) = *expected.kind() {
+ debug!("inspecting {:?}", expected);
+
+ debug!("current discriminant is Ref, inserting implicit deref");
+ // Preserve the reference type. We'll need it later during THIR lowering.
+ pat_adjustments.push(expected);
+
+ expected = inner_ty;
+ def_bm = ty::BindByReference(match def_bm {
+ // If default binding mode is by value, make it `ref` or `ref mut`
+ // (depending on whether we observe `&` or `&mut`).
+ ty::BindByValue(_) |
+ // When `ref mut`, stay a `ref mut` (on `&mut`) or downgrade to `ref` (on `&`).
+ ty::BindByReference(hir::Mutability::Mut) => inner_mutability,
+ // Once a `ref`, always a `ref`.
+ // This is because a `& &mut` cannot mutate the underlying value.
+ ty::BindByReference(m @ hir::Mutability::Not) => m,
+ });
+ }
+
+ if !pat_adjustments.is_empty() {
+ debug!("default binding mode is now {:?}", def_bm);
+ self.inh
+ .typeck_results
+ .borrow_mut()
+ .pat_adjustments_mut()
+ .insert(pat.hir_id, pat_adjustments);
+ }
+
+ (expected, def_bm)
+ }
+
+ fn check_pat_lit(
+ &self,
+ span: Span,
+ lt: &hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ // We've already computed the type above (when checking for a non-ref pat),
+ // so avoid computing it again.
+ let ty = self.node_ty(lt.hir_id);
+
+ // Byte string patterns behave the same way as array patterns
+ // They can denote both statically and dynamically-sized byte arrays.
+ let mut pat_ty = ty;
+ if let hir::ExprKind::Lit(Spanned { node: ast::LitKind::ByteStr(_), .. }) = lt.kind {
+ let expected = self.structurally_resolved_type(span, expected);
+ if let ty::Ref(_, inner_ty, _) = expected.kind()
+ && matches!(inner_ty.kind(), ty::Slice(_))
+ {
+ let tcx = self.tcx;
+ trace!(?lt.hir_id.local_id, "polymorphic byte string lit");
+ self.typeck_results
+ .borrow_mut()
+ .treat_byte_string_as_slice
+ .insert(lt.hir_id.local_id);
+ pat_ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_slice(tcx.types.u8));
+ }
+ }
+
+ // Somewhat surprising: in this case, the subtyping relation goes the
+ // opposite way as the other cases. Actually what we really want is not
+ // a subtyping relation at all but rather that there exists a LUB
+ // (so that they can be compared). However, in practice, constants are
+ // always scalars or strings. For scalars subtyping is irrelevant,
+ // and for strings `ty` is type is `&'static str`, so if we say that
+ //
+ // &'static str <: expected
+ //
+ // then that's equivalent to there existing a LUB.
+ let cause = self.pattern_cause(ti, span);
+ if let Some(mut err) = self.demand_suptype_with_origin(&cause, expected, pat_ty) {
+ err.emit_unless(
+ ti.span
+ .filter(|&s| {
+ // In the case of `if`- and `while`-expressions we've already checked
+ // that `scrutinee: bool`. We know that the pattern is `true`,
+ // so an error here would be a duplicate and from the wrong POV.
+ s.is_desugaring(DesugaringKind::CondTemporary)
+ })
+ .is_some(),
+ );
+ }
+
+ pat_ty
+ }
+
+ fn check_pat_range(
+ &self,
+ span: Span,
+ lhs: Option<&'tcx hir::Expr<'tcx>>,
+ rhs: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let calc_side = |opt_expr: Option<&'tcx hir::Expr<'tcx>>| match opt_expr {
+ None => None,
+ Some(expr) => {
+ let ty = self.check_expr(expr);
+ // Check that the end-point is possibly of numeric or char type.
+ // The early check here is not for correctness, but rather better
+ // diagnostics (e.g. when `&str` is being matched, `expected` will
+ // be peeled to `str` while ty here is still `&str`, if we don't
+ // err early here, a rather confusing unification error will be
+ // emitted instead).
+ let fail =
+ !(ty.is_numeric() || ty.is_char() || ty.is_ty_var() || ty.references_error());
+ Some((fail, ty, expr.span))
+ }
+ };
+ let mut lhs = calc_side(lhs);
+ let mut rhs = calc_side(rhs);
+
+ if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) {
+ // There exists a side that didn't meet our criteria that the end-point
+ // be of a numeric or char type, as checked in `calc_side` above.
+ self.emit_err_pat_range(span, lhs, rhs);
+ return self.tcx.ty_error();
+ }
+
+ // Unify each side with `expected`.
+ // Subtyping doesn't matter here, as the value is some kind of scalar.
+ let demand_eqtype = |x: &mut _, y| {
+ if let Some((ref mut fail, x_ty, x_span)) = *x
+ && let Some(mut err) = self.demand_eqtype_pat_diag(x_span, expected, x_ty, ti)
+ {
+ if let Some((_, y_ty, y_span)) = y {
+ self.endpoint_has_type(&mut err, y_span, y_ty);
+ }
+ err.emit();
+ *fail = true;
+ }
+ };
+ demand_eqtype(&mut lhs, rhs);
+ demand_eqtype(&mut rhs, lhs);
+
+ if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) {
+ return self.tcx.ty_error();
+ }
+
+ // Find the unified type and check if it's of numeric or char type again.
+ // This check is needed if both sides are inference variables.
+ // We require types to be resolved here so that we emit inference failure
+ // rather than "_ is not a char or numeric".
+ let ty = self.structurally_resolved_type(span, expected);
+ if !(ty.is_numeric() || ty.is_char() || ty.references_error()) {
+ if let Some((ref mut fail, _, _)) = lhs {
+ *fail = true;
+ }
+ if let Some((ref mut fail, _, _)) = rhs {
+ *fail = true;
+ }
+ self.emit_err_pat_range(span, lhs, rhs);
+ return self.tcx.ty_error();
+ }
+ ty
+ }
+
+ fn endpoint_has_type(&self, err: &mut Diagnostic, span: Span, ty: Ty<'_>) {
+ if !ty.references_error() {
+ err.span_label(span, &format!("this is of type `{}`", ty));
+ }
+ }
+
+ fn emit_err_pat_range(
+ &self,
+ span: Span,
+ lhs: Option<(bool, Ty<'tcx>, Span)>,
+ rhs: Option<(bool, Ty<'tcx>, Span)>,
+ ) {
+ let span = match (lhs, rhs) {
+ (Some((true, ..)), Some((true, ..))) => span,
+ (Some((true, _, sp)), _) => sp,
+ (_, Some((true, _, sp))) => sp,
+ _ => span_bug!(span, "emit_err_pat_range: no side failed or exists but still error?"),
+ };
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0029,
+ "only `char` and numeric types are allowed in range patterns"
+ );
+ let msg = |ty| {
+ let ty = self.resolve_vars_if_possible(ty);
+ format!("this is of type `{}` but it should be `char` or numeric", ty)
+ };
+ let mut one_side_err = |first_span, first_ty, second: Option<(bool, Ty<'tcx>, Span)>| {
+ err.span_label(first_span, &msg(first_ty));
+ if let Some((_, ty, sp)) = second {
+ let ty = self.resolve_vars_if_possible(ty);
+ self.endpoint_has_type(&mut err, sp, ty);
+ }
+ };
+ match (lhs, rhs) {
+ (Some((true, lhs_ty, lhs_sp)), Some((true, rhs_ty, rhs_sp))) => {
+ err.span_label(lhs_sp, &msg(lhs_ty));
+ err.span_label(rhs_sp, &msg(rhs_ty));
+ }
+ (Some((true, lhs_ty, lhs_sp)), rhs) => one_side_err(lhs_sp, lhs_ty, rhs),
+ (lhs, Some((true, rhs_ty, rhs_sp))) => one_side_err(rhs_sp, rhs_ty, lhs),
+ _ => span_bug!(span, "Impossible, verified above."),
+ }
+ if self.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "In a match expression, only numbers and characters can be matched \
+ against a range. This is because the compiler checks that the range \
+ is non-empty at compile-time, and is unable to evaluate arbitrary \
+ comparison functions. If you want to capture values of an orderable \
+ type between two end-points, you can use a guard.",
+ );
+ }
+ err.emit();
+ }
+
+ fn check_pat_ident(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ ba: hir::BindingAnnotation,
+ var_id: HirId,
+ sub: Option<&'tcx Pat<'tcx>>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ // Determine the binding mode...
+ let bm = match ba {
+ hir::BindingAnnotation::NONE => def_bm,
+ _ => BindingMode::convert(ba),
+ };
+ // ...and store it in a side table:
+ self.inh.typeck_results.borrow_mut().pat_binding_modes_mut().insert(pat.hir_id, bm);
+
+ debug!("check_pat_ident: pat.hir_id={:?} bm={:?}", pat.hir_id, bm);
+
+ let local_ty = self.local_ty(pat.span, pat.hir_id).decl_ty;
+ let eq_ty = match bm {
+ ty::BindByReference(mutbl) => {
+ // If the binding is like `ref x | ref mut x`,
+ // then `x` is assigned a value of type `&M T` where M is the
+ // mutability and T is the expected type.
+ //
+ // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)`
+ // is required. However, we use equality, which is stronger.
+ // See (note_1) for an explanation.
+ self.new_ref_ty(pat.span, mutbl, expected)
+ }
+ // Otherwise, the type of x is the expected type `T`.
+ ty::BindByValue(_) => {
+ // As above, `T <: typeof(x)` is required, but we use equality, see (note_1).
+ expected
+ }
+ };
+ self.demand_eqtype_pat(pat.span, eq_ty, local_ty, ti);
+
+ // If there are multiple arms, make sure they all agree on
+ // what the type of the binding `x` ought to be.
+ if var_id != pat.hir_id {
+ self.check_binding_alt_eq_ty(ba, pat.span, var_id, local_ty, ti);
+ }
+
+ if let Some(p) = sub {
+ self.check_pat(p, expected, def_bm, ti);
+ }
+
+ local_ty
+ }
+
+ fn check_binding_alt_eq_ty(
+ &self,
+ ba: hir::BindingAnnotation,
+ span: Span,
+ var_id: HirId,
+ ty: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) {
+ let var_ty = self.local_ty(span, var_id).decl_ty;
+ if let Some(mut err) = self.demand_eqtype_pat_diag(span, var_ty, ty, ti) {
+ let hir = self.tcx.hir();
+ let var_ty = self.resolve_vars_with_obligations(var_ty);
+ let msg = format!("first introduced with type `{var_ty}` here");
+ err.span_label(hir.span(var_id), msg);
+ let in_match = hir.parent_iter(var_id).any(|(_, n)| {
+ matches!(
+ n,
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Match(.., hir::MatchSource::Normal),
+ ..
+ })
+ )
+ });
+ let pre = if in_match { "in the same arm, " } else { "" };
+ err.note(&format!("{}a binding must have the same type in all alternatives", pre));
+ self.suggest_adding_missing_ref_or_removing_ref(
+ &mut err,
+ span,
+ var_ty,
+ self.resolve_vars_with_obligations(ty),
+ ba,
+ );
+ err.emit();
+ }
+ }
+
+ fn suggest_adding_missing_ref_or_removing_ref(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ba: hir::BindingAnnotation,
+ ) {
+ match (expected.kind(), actual.kind(), ba) {
+ (ty::Ref(_, inner_ty, _), _, hir::BindingAnnotation::NONE)
+ if self.can_eq(self.param_env, *inner_ty, actual).is_ok() =>
+ {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider adding `ref`",
+ "ref ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ (_, ty::Ref(_, inner_ty, _), hir::BindingAnnotation::REF)
+ if self.can_eq(self.param_env, expected, *inner_ty).is_ok() =>
+ {
+ err.span_suggestion_verbose(
+ span.with_hi(span.lo() + BytePos(4)),
+ "consider removing `ref`",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => (),
+ }
+ }
+
+ // Precondition: pat is a Ref(_) pattern
+ fn borrow_pat_suggestion(&self, err: &mut Diagnostic, pat: &Pat<'_>) {
+ let tcx = self.tcx;
+ if let PatKind::Ref(inner, mutbl) = pat.kind
+ && let PatKind::Binding(_, _, binding, ..) = inner.kind {
+ let binding_parent_id = tcx.hir().get_parent_node(pat.hir_id);
+ let binding_parent = tcx.hir().get(binding_parent_id);
+ debug!(?inner, ?pat, ?binding_parent);
+
+ let mutability = match mutbl {
+ ast::Mutability::Mut => "mut",
+ ast::Mutability::Not => "",
+ };
+
+ let mut_var_suggestion = 'block: {
+ if !matches!(mutbl, ast::Mutability::Mut) {
+ break 'block None;
+ }
+
+ let ident_kind = match binding_parent {
+ hir::Node::Param(_) => "parameter",
+ hir::Node::Local(_) => "variable",
+ hir::Node::Arm(_) => "binding",
+
+ // Provide diagnostics only if the parent pattern is struct-like,
+ // i.e. where `mut binding` makes sense
+ hir::Node::Pat(Pat { kind, .. }) => match kind {
+ PatKind::Struct(..)
+ | PatKind::TupleStruct(..)
+ | PatKind::Or(..)
+ | PatKind::Tuple(..)
+ | PatKind::Slice(..) => "binding",
+
+ PatKind::Wild
+ | PatKind::Binding(..)
+ | PatKind::Path(..)
+ | PatKind::Box(..)
+ | PatKind::Ref(..)
+ | PatKind::Lit(..)
+ | PatKind::Range(..) => break 'block None,
+ },
+
+ // Don't provide suggestions in other cases
+ _ => break 'block None,
+ };
+
+ Some((
+ pat.span,
+ format!("to declare a mutable {ident_kind} use"),
+ format!("mut {binding}"),
+ ))
+
+ };
+
+ match binding_parent {
+ // Check that there is explicit type (ie this is not a closure param with inferred type)
+ // so we don't suggest moving something to the type that does not exist
+ hir::Node::Param(hir::Param { ty_span, .. }) if binding.span != *ty_span => {
+ err.multipart_suggestion_verbose(
+ format!("to take parameter `{binding}` by reference, move `&{mutability}` to the type"),
+ vec![
+ (pat.span.until(inner.span), "".to_owned()),
+ (ty_span.shrink_to_lo(), format!("&{}", mutbl.prefix_str())),
+ ],
+ Applicability::MachineApplicable
+ );
+
+ if let Some((sp, msg, sugg)) = mut_var_suggestion {
+ err.span_note(sp, format!("{msg}: `{sugg}`"));
+ }
+ }
+ hir::Node::Param(_) | hir::Node::Arm(_) | hir::Node::Pat(_) => {
+ // rely on match ergonomics or it might be nested `&&pat`
+ err.span_suggestion_verbose(
+ pat.span.until(inner.span),
+ format!("consider removing `&{mutability}` from the pattern"),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+
+ if let Some((sp, msg, sugg)) = mut_var_suggestion {
+ err.span_note(sp, format!("{msg}: `{sugg}`"));
+ }
+ }
+ _ if let Some((sp, msg, sugg)) = mut_var_suggestion => {
+ err.span_suggestion(sp, msg, sugg, Applicability::MachineApplicable);
+ }
+ _ => {} // don't provide suggestions in other cases #55175
+ }
+ }
+ }
+
+ pub fn check_dereferenceable(&self, span: Span, expected: Ty<'tcx>, inner: &Pat<'_>) -> bool {
+ if let PatKind::Binding(..) = inner.kind
+ && let Some(mt) = self.shallow_resolve(expected).builtin_deref(true)
+ && let ty::Dynamic(..) = mt.ty.kind()
+ {
+ // This is "x = SomeTrait" being reduced from
+ // "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
+ let type_str = self.ty_to_string(expected);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0033,
+ "type `{}` cannot be dereferenced",
+ type_str
+ );
+ err.span_label(span, format!("type `{type_str}` cannot be dereferenced"));
+ if self.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ);
+ }
+ err.emit();
+ return false;
+ }
+ true
+ }
+
+ fn check_pat_struct(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ qpath: &hir::QPath<'_>,
+ fields: &'tcx [hir::PatField<'tcx>],
+ has_rest_pat: bool,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ // Resolve the path and check the definition for errors.
+ let Some((variant, pat_ty)) = self.check_struct_path(qpath, pat.hir_id) else {
+ let err = self.tcx.ty_error();
+ for field in fields {
+ let ti = ti;
+ self.check_pat(field.pat, err, def_bm, ti);
+ }
+ return err;
+ };
+
+ // Type-check the path.
+ self.demand_eqtype_pat(pat.span, expected, pat_ty, ti);
+
+ // Type-check subpatterns.
+ if self.check_struct_pat_fields(pat_ty, &pat, variant, fields, has_rest_pat, def_bm, ti) {
+ pat_ty
+ } else {
+ self.tcx.ty_error()
+ }
+ }
+
+ fn check_pat_path(
+ &self,
+ pat: &Pat<'tcx>,
+ qpath: &hir::QPath<'_>,
+ path_resolution: (Res, Option<Ty<'tcx>>, &'tcx [hir::PathSegment<'tcx>]),
+ expected: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ // We have already resolved the path.
+ let (res, opt_ty, segments) = path_resolution;
+ match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ return tcx.ty_error();
+ }
+ Res::Def(DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fictive | CtorKind::Fn), _) => {
+ report_unexpected_variant_res(tcx, res, qpath, pat.span);
+ return tcx.ty_error();
+ }
+ Res::SelfCtor(..)
+ | Res::Def(
+ DefKind::Ctor(_, CtorKind::Const)
+ | DefKind::Const
+ | DefKind::AssocConst
+ | DefKind::ConstParam,
+ _,
+ ) => {} // OK
+ _ => bug!("unexpected pattern resolution: {:?}", res),
+ }
+
+ // Type-check the path.
+ let (pat_ty, pat_res) =
+ self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id);
+ if let Some(err) =
+ self.demand_suptype_with_origin(&self.pattern_cause(ti, pat.span), expected, pat_ty)
+ {
+ self.emit_bad_pat_path(err, pat, res, pat_res, pat_ty, segments);
+ }
+ pat_ty
+ }
+
+ fn maybe_suggest_range_literal(
+ &self,
+ e: &mut Diagnostic,
+ opt_def_id: Option<hir::def_id::DefId>,
+ ident: Ident,
+ ) -> bool {
+ match opt_def_id {
+ Some(def_id) => match self.tcx.hir().get_if_local(def_id) {
+ Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Const(_, body_id), ..
+ })) => match self.tcx.hir().get(body_id.hir_id) {
+ hir::Node::Expr(expr) => {
+ if hir::is_range_literal(expr) {
+ let span = self.tcx.hir().span(body_id.hir_id);
+ if let Ok(snip) = self.tcx.sess.source_map().span_to_snippet(span) {
+ e.span_suggestion_verbose(
+ ident.span,
+ "you may want to move the range into the match block",
+ snip,
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ }
+ _ => (),
+ },
+ _ => (),
+ },
+ _ => (),
+ }
+ false
+ }
+
+ fn emit_bad_pat_path(
+ &self,
+ mut e: DiagnosticBuilder<'_, ErrorGuaranteed>,
+ pat: &hir::Pat<'tcx>,
+ res: Res,
+ pat_res: Res,
+ pat_ty: Ty<'tcx>,
+ segments: &'tcx [hir::PathSegment<'tcx>],
+ ) {
+ let pat_span = pat.span;
+ if let Some(span) = self.tcx.hir().res_span(pat_res) {
+ e.span_label(span, &format!("{} defined here", res.descr()));
+ if let [hir::PathSegment { ident, .. }] = &*segments {
+ e.span_label(
+ pat_span,
+ &format!(
+ "`{}` is interpreted as {} {}, not a new binding",
+ ident,
+ res.article(),
+ res.descr(),
+ ),
+ );
+ match self.tcx.hir().get(self.tcx.hir().get_parent_node(pat.hir_id)) {
+ hir::Node::PatField(..) => {
+ e.span_suggestion_verbose(
+ ident.span.shrink_to_hi(),
+ "bind the struct field to a different name instead",
+ format!(": other_{}", ident.as_str().to_lowercase()),
+ Applicability::HasPlaceholders,
+ );
+ }
+ _ => {
+ let (type_def_id, item_def_id) = match pat_ty.kind() {
+ Adt(def, _) => match res {
+ Res::Def(DefKind::Const, def_id) => (Some(def.did()), Some(def_id)),
+ _ => (None, None),
+ },
+ _ => (None, None),
+ };
+
+ let ranges = &[
+ self.tcx.lang_items().range_struct(),
+ self.tcx.lang_items().range_from_struct(),
+ self.tcx.lang_items().range_to_struct(),
+ self.tcx.lang_items().range_full_struct(),
+ self.tcx.lang_items().range_inclusive_struct(),
+ self.tcx.lang_items().range_to_inclusive_struct(),
+ ];
+ if type_def_id != None && ranges.contains(&type_def_id) {
+ if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) {
+ let msg = "constants only support matching by type, \
+ if you meant to match against a range of values, \
+ consider using a range pattern like `min ..= max` in the match block";
+ e.note(msg);
+ }
+ } else {
+ let msg = "introduce a new binding instead";
+ let sugg = format!("other_{}", ident.as_str().to_lowercase());
+ e.span_suggestion(
+ ident.span,
+ msg,
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ };
+ }
+ }
+ e.emit();
+ }
+
+ fn check_pat_tuple_struct(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ qpath: &'tcx hir::QPath<'tcx>,
+ subpats: &'tcx [Pat<'tcx>],
+ ddpos: hir::DotDotPos,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let on_error = || {
+ for pat in subpats {
+ self.check_pat(pat, tcx.ty_error(), def_bm, ti);
+ }
+ };
+ let report_unexpected_res = |res: Res| {
+ let sm = tcx.sess.source_map();
+ let path_str = sm
+ .span_to_snippet(sm.span_until_char(pat.span, '('))
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s.trim_end()));
+ let msg = format!(
+ "expected tuple struct or tuple variant, found {}{}",
+ res.descr(),
+ path_str
+ );
+
+ let mut err = struct_span_err!(tcx.sess, pat.span, E0164, "{msg}");
+ match res {
+ Res::Def(DefKind::Fn | DefKind::AssocFn, _) => {
+ err.span_label(pat.span, "`fn` calls are not allowed in patterns");
+ err.help(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch18-00-patterns.html",
+ );
+ }
+ _ => {
+ err.span_label(pat.span, "not a tuple variant or struct");
+ }
+ }
+ err.emit();
+ on_error();
+ };
+
+ // Resolve the path and check the definition for errors.
+ let (res, opt_ty, segments) =
+ self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span);
+ if res == Res::Err {
+ self.set_tainted_by_errors();
+ on_error();
+ return self.tcx.ty_error();
+ }
+
+ // Type-check the path.
+ let (pat_ty, res) =
+ self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id);
+ if !pat_ty.is_fn() {
+ report_unexpected_res(res);
+ return tcx.ty_error();
+ }
+
+ let variant = match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ on_error();
+ return tcx.ty_error();
+ }
+ Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) => {
+ report_unexpected_res(res);
+ return tcx.ty_error();
+ }
+ Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) => tcx.expect_variant_res(res),
+ _ => bug!("unexpected pattern resolution: {:?}", res),
+ };
+
+ // Replace constructor type with constructed type for tuple struct patterns.
+ let pat_ty = pat_ty.fn_sig(tcx).output();
+ let pat_ty = pat_ty.no_bound_vars().expect("expected fn type");
+
+ // Type-check the tuple struct pattern against the expected type.
+ let diag = self.demand_eqtype_pat_diag(pat.span, expected, pat_ty, ti);
+ let had_err = if let Some(mut err) = diag {
+ err.emit();
+ true
+ } else {
+ false
+ };
+
+ // Type-check subpatterns.
+ if subpats.len() == variant.fields.len()
+ || subpats.len() < variant.fields.len() && ddpos.as_opt_usize().is_some()
+ {
+ let ty::Adt(_, substs) = pat_ty.kind() else {
+ bug!("unexpected pattern type {:?}", pat_ty);
+ };
+ for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) {
+ let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs);
+ self.check_pat(subpat, field_ty, def_bm, ti);
+
+ self.tcx.check_stability(
+ variant.fields[i].did,
+ Some(pat.hir_id),
+ subpat.span,
+ None,
+ );
+ }
+ } else {
+ // Pattern has wrong number of fields.
+ self.e0023(pat.span, res, qpath, subpats, &variant.fields, expected, had_err);
+ on_error();
+ return tcx.ty_error();
+ }
+ pat_ty
+ }
+
+ fn e0023(
+ &self,
+ pat_span: Span,
+ res: Res,
+ qpath: &hir::QPath<'_>,
+ subpats: &'tcx [Pat<'tcx>],
+ fields: &'tcx [ty::FieldDef],
+ expected: Ty<'tcx>,
+ had_err: bool,
+ ) {
+ let subpats_ending = pluralize!(subpats.len());
+ let fields_ending = pluralize!(fields.len());
+
+ let subpat_spans = if subpats.is_empty() {
+ vec![pat_span]
+ } else {
+ subpats.iter().map(|p| p.span).collect()
+ };
+ let last_subpat_span = *subpat_spans.last().unwrap();
+ let res_span = self.tcx.def_span(res.def_id());
+ let def_ident_span = self.tcx.def_ident_span(res.def_id()).unwrap_or(res_span);
+ let field_def_spans = if fields.is_empty() {
+ vec![res_span]
+ } else {
+ fields.iter().map(|f| f.ident(self.tcx).span).collect()
+ };
+ let last_field_def_span = *field_def_spans.last().unwrap();
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ MultiSpan::from_spans(subpat_spans),
+ E0023,
+ "this pattern has {} field{}, but the corresponding {} has {} field{}",
+ subpats.len(),
+ subpats_ending,
+ res.descr(),
+ fields.len(),
+ fields_ending,
+ );
+ err.span_label(
+ last_subpat_span,
+ &format!("expected {} field{}, found {}", fields.len(), fields_ending, subpats.len()),
+ );
+ if self.tcx.sess.source_map().is_multiline(qpath.span().between(last_subpat_span)) {
+ err.span_label(qpath.span(), "");
+ }
+ if self.tcx.sess.source_map().is_multiline(def_ident_span.between(last_field_def_span)) {
+ err.span_label(def_ident_span, format!("{} defined here", res.descr()));
+ }
+ for span in &field_def_spans[..field_def_spans.len() - 1] {
+ err.span_label(*span, "");
+ }
+ err.span_label(
+ last_field_def_span,
+ &format!("{} has {} field{}", res.descr(), fields.len(), fields_ending),
+ );
+
+ // Identify the case `Some(x, y)` where the expected type is e.g. `Option<(T, U)>`.
+ // More generally, the expected type wants a tuple variant with one field of an
+ // N-arity-tuple, e.g., `V_i((p_0, .., p_N))`. Meanwhile, the user supplied a pattern
+ // with the subpatterns directly in the tuple variant pattern, e.g., `V_i(p_0, .., p_N)`.
+ let missing_parentheses = match (&expected.kind(), fields, had_err) {
+ // #67037: only do this if we could successfully type-check the expected type against
+ // the tuple struct pattern. Otherwise the substs could get out of range on e.g.,
+ // `let P() = U;` where `P != U` with `struct P<T>(T);`.
+ (ty::Adt(_, substs), [field], false) => {
+ let field_ty = self.field_ty(pat_span, field, substs);
+ match field_ty.kind() {
+ ty::Tuple(fields) => fields.len() == subpats.len(),
+ _ => false,
+ }
+ }
+ _ => false,
+ };
+ if missing_parentheses {
+ let (left, right) = match subpats {
+ // This is the zero case; we aim to get the "hi" part of the `QPath`'s
+ // span as the "lo" and then the "hi" part of the pattern's span as the "hi".
+ // This looks like:
+ //
+ // help: missing parentheses
+ // |
+ // L | let A(()) = A(());
+ // | ^ ^
+ [] => (qpath.span().shrink_to_hi(), pat_span),
+ // Easy case. Just take the "lo" of the first sub-pattern and the "hi" of the
+ // last sub-pattern. In the case of `A(x)` the first and last may coincide.
+ // This looks like:
+ //
+ // help: missing parentheses
+ // |
+ // L | let A((x, y)) = A((1, 2));
+ // | ^ ^
+ [first, ..] => (first.span.shrink_to_lo(), subpats.last().unwrap().span),
+ };
+ err.multipart_suggestion(
+ "missing parentheses",
+ vec![(left, "(".to_string()), (right.shrink_to_hi(), ")".to_string())],
+ Applicability::MachineApplicable,
+ );
+ } else if fields.len() > subpats.len() && pat_span != DUMMY_SP {
+ let after_fields_span = pat_span.with_hi(pat_span.hi() - BytePos(1)).shrink_to_hi();
+ let all_fields_span = match subpats {
+ [] => after_fields_span,
+ [field] => field.span,
+ [first, .., last] => first.span.to(last.span),
+ };
+
+ // Check if all the fields in the pattern are wildcards.
+ let all_wildcards = subpats.iter().all(|pat| matches!(pat.kind, PatKind::Wild));
+ let first_tail_wildcard =
+ subpats.iter().enumerate().fold(None, |acc, (pos, pat)| match (acc, &pat.kind) {
+ (None, PatKind::Wild) => Some(pos),
+ (Some(_), PatKind::Wild) => acc,
+ _ => None,
+ });
+ let tail_span = match first_tail_wildcard {
+ None => after_fields_span,
+ Some(0) => subpats[0].span.to(after_fields_span),
+ Some(pos) => subpats[pos - 1].span.shrink_to_hi().to(after_fields_span),
+ };
+
+ // FIXME: heuristic-based suggestion to check current types for where to add `_`.
+ let mut wildcard_sugg = vec!["_"; fields.len() - subpats.len()].join(", ");
+ if !subpats.is_empty() {
+ wildcard_sugg = String::from(", ") + &wildcard_sugg;
+ }
+
+ err.span_suggestion_verbose(
+ after_fields_span,
+ "use `_` to explicitly ignore each field",
+ wildcard_sugg,
+ Applicability::MaybeIncorrect,
+ );
+
+ // Only suggest `..` if more than one field is missing
+ // or the pattern consists of all wildcards.
+ if fields.len() - subpats.len() > 1 || all_wildcards {
+ if subpats.is_empty() || all_wildcards {
+ err.span_suggestion_verbose(
+ all_fields_span,
+ "use `..` to ignore all fields",
+ "..",
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ tail_span,
+ "use `..` to ignore the rest of the fields",
+ ", ..",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ err.emit();
+ }
+
+ fn check_pat_tuple(
+ &self,
+ span: Span,
+ elements: &'tcx [Pat<'tcx>],
+ ddpos: hir::DotDotPos,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let mut expected_len = elements.len();
+ if ddpos.as_opt_usize().is_some() {
+ // Require known type only when `..` is present.
+ if let ty::Tuple(tys) = self.structurally_resolved_type(span, expected).kind() {
+ expected_len = tys.len();
+ }
+ }
+ let max_len = cmp::max(expected_len, elements.len());
+
+ let element_tys_iter = (0..max_len).map(|_| {
+ self.next_ty_var(
+ // FIXME: `MiscVariable` for now -- obtaining the span and name information
+ // from all tuple elements isn't trivial.
+ TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span },
+ )
+ });
+ let element_tys = tcx.mk_type_list(element_tys_iter);
+ let pat_ty = tcx.mk_ty(ty::Tuple(element_tys));
+ if let Some(mut err) = self.demand_eqtype_pat_diag(span, expected, pat_ty, ti) {
+ err.emit();
+ // Walk subpatterns with an expected type of `err` in this case to silence
+ // further errors being emitted when using the bindings. #50333
+ let element_tys_iter = (0..max_len).map(|_| tcx.ty_error());
+ for (_, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
+ self.check_pat(elem, tcx.ty_error(), def_bm, ti);
+ }
+ tcx.mk_tup(element_tys_iter)
+ } else {
+ for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
+ self.check_pat(elem, element_tys[i], def_bm, ti);
+ }
+ pat_ty
+ }
+ }
+
+ fn check_struct_pat_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ pat: &'tcx Pat<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ fields: &'tcx [hir::PatField<'tcx>],
+ has_rest_pat: bool,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> bool {
+ let tcx = self.tcx;
+
+ let ty::Adt(adt, substs) = adt_ty.kind() else {
+ span_bug!(pat.span, "struct pattern is not an ADT");
+ };
+
+ // Index the struct fields' types.
+ let field_map = variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, field)| (field.ident(self.tcx).normalize_to_macros_2_0(), (i, field)))
+ .collect::<FxHashMap<_, _>>();
+
+ // Keep track of which fields have already appeared in the pattern.
+ let mut used_fields = FxHashMap::default();
+ let mut no_field_errors = true;
+
+ let mut inexistent_fields = vec![];
+ // Typecheck each field.
+ for field in fields {
+ let span = field.span;
+ let ident = tcx.adjust_ident(field.ident, variant.def_id);
+ let field_ty = match used_fields.entry(ident) {
+ Occupied(occupied) => {
+ self.error_field_already_bound(span, field.ident, *occupied.get());
+ no_field_errors = false;
+ tcx.ty_error()
+ }
+ Vacant(vacant) => {
+ vacant.insert(span);
+ field_map
+ .get(&ident)
+ .map(|(i, f)| {
+ self.write_field_index(field.hir_id, *i);
+ self.tcx.check_stability(f.did, Some(pat.hir_id), span, None);
+ self.field_ty(span, f, substs)
+ })
+ .unwrap_or_else(|| {
+ inexistent_fields.push(field);
+ no_field_errors = false;
+ tcx.ty_error()
+ })
+ }
+ };
+
+ self.check_pat(field.pat, field_ty, def_bm, ti);
+ }
+
+ let mut unmentioned_fields = variant
+ .fields
+ .iter()
+ .map(|field| (field, field.ident(self.tcx).normalize_to_macros_2_0()))
+ .filter(|(_, ident)| !used_fields.contains_key(ident))
+ .collect::<Vec<_>>();
+
+ let inexistent_fields_err = if !(inexistent_fields.is_empty() || variant.is_recovered())
+ && !inexistent_fields.iter().any(|field| field.ident.name == kw::Underscore)
+ {
+ Some(self.error_inexistent_fields(
+ adt.variant_descr(),
+ &inexistent_fields,
+ &mut unmentioned_fields,
+ variant,
+ substs,
+ ))
+ } else {
+ None
+ };
+
+ // Require `..` if struct has non_exhaustive attribute.
+ let non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local();
+ if non_exhaustive && !has_rest_pat {
+ self.error_foreign_non_exhaustive_spat(pat, adt.variant_descr(), fields.is_empty());
+ }
+
+ let mut unmentioned_err = None;
+ // Report an error if an incorrect number of fields was specified.
+ if adt.is_union() {
+ if fields.len() != 1 {
+ tcx.sess
+ .struct_span_err(pat.span, "union patterns should have exactly one field")
+ .emit();
+ }
+ if has_rest_pat {
+ tcx.sess.struct_span_err(pat.span, "`..` cannot be used in union patterns").emit();
+ }
+ } else if !unmentioned_fields.is_empty() {
+ let accessible_unmentioned_fields: Vec<_> = unmentioned_fields
+ .iter()
+ .copied()
+ .filter(|(field, _)| {
+ field.vis.is_accessible_from(tcx.parent_module(pat.hir_id), tcx)
+ && !matches!(
+ tcx.eval_stability(field.did, None, DUMMY_SP, None),
+ EvalResult::Deny { .. }
+ )
+ // We only want to report the error if it is hidden and not local
+ && !(tcx.is_doc_hidden(field.did) && !field.did.is_local())
+ })
+ .collect();
+
+ if !has_rest_pat {
+ if accessible_unmentioned_fields.is_empty() {
+ unmentioned_err = Some(self.error_no_accessible_fields(pat, fields));
+ } else {
+ unmentioned_err = Some(self.error_unmentioned_fields(
+ pat,
+ &accessible_unmentioned_fields,
+ accessible_unmentioned_fields.len() != unmentioned_fields.len(),
+ fields,
+ ));
+ }
+ } else if non_exhaustive && !accessible_unmentioned_fields.is_empty() {
+ self.lint_non_exhaustive_omitted_patterns(
+ pat,
+ &accessible_unmentioned_fields,
+ adt_ty,
+ )
+ }
+ }
+ match (inexistent_fields_err, unmentioned_err) {
+ (Some(mut i), Some(mut u)) => {
+ if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) {
+ // We don't want to show the nonexistent fields error when this was
+ // `Foo { a, b }` when it should have been `Foo(a, b)`.
+ i.delay_as_bug();
+ u.delay_as_bug();
+ e.emit();
+ } else {
+ i.emit();
+ u.emit();
+ }
+ }
+ (None, Some(mut u)) => {
+ if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) {
+ u.delay_as_bug();
+ e.emit();
+ } else {
+ u.emit();
+ }
+ }
+ (Some(mut err), None) => {
+ err.emit();
+ }
+ (None, None) if let Some(mut err) =
+ self.error_tuple_variant_index_shorthand(variant, pat, fields) =>
+ {
+ err.emit();
+ }
+ (None, None) => {}
+ }
+ no_field_errors
+ }
+
+ fn error_tuple_variant_index_shorthand(
+ &self,
+ variant: &VariantDef,
+ pat: &'_ Pat<'_>,
+ fields: &[hir::PatField<'_>],
+ ) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
+ // if this is a tuple struct, then all field names will be numbers
+ // so if any fields in a struct pattern use shorthand syntax, they will
+ // be invalid identifiers (for example, Foo { 0, 1 }).
+ if let (CtorKind::Fn, PatKind::Struct(qpath, field_patterns, ..)) =
+ (variant.ctor_kind, &pat.kind)
+ {
+ let has_shorthand_field_name = field_patterns.iter().any(|field| field.is_shorthand);
+ if has_shorthand_field_name {
+ let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_qpath(qpath, false)
+ });
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0769,
+ "tuple variant `{path}` written as struct variant",
+ );
+ err.span_suggestion_verbose(
+ qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
+ "use the tuple variant pattern syntax instead",
+ format!("({})", self.get_suggested_tuple_struct_pattern(fields, variant)),
+ Applicability::MaybeIncorrect,
+ );
+ return Some(err);
+ }
+ }
+ None
+ }
+
+ fn error_foreign_non_exhaustive_spat(&self, pat: &Pat<'_>, descr: &str, no_fields: bool) {
+ let sess = self.tcx.sess;
+ let sm = sess.source_map();
+ let sp_brace = sm.end_point(pat.span);
+ let sp_comma = sm.end_point(pat.span.with_hi(sp_brace.hi()));
+ let sugg = if no_fields || sp_brace != sp_comma { ".. }" } else { ", .. }" };
+
+ let mut err = struct_span_err!(
+ sess,
+ pat.span,
+ E0638,
+ "`..` required with {descr} marked as non-exhaustive",
+ );
+ err.span_suggestion_verbose(
+ sp_comma,
+ "add `..` at the end of the field list to ignore all other fields",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ }
+
+ fn error_field_already_bound(&self, span: Span, ident: Ident, other_field: Span) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0025,
+ "field `{}` bound multiple times in the pattern",
+ ident
+ )
+ .span_label(span, format!("multiple uses of `{ident}` in pattern"))
+ .span_label(other_field, format!("first use of `{ident}`"))
+ .emit();
+ }
+
+ fn error_inexistent_fields(
+ &self,
+ kind_name: &str,
+ inexistent_fields: &[&hir::PatField<'tcx>],
+ unmentioned_fields: &mut Vec<(&'tcx ty::FieldDef, Ident)>,
+ variant: &ty::VariantDef,
+ substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let tcx = self.tcx;
+ let (field_names, t, plural) = if inexistent_fields.len() == 1 {
+ (format!("a field named `{}`", inexistent_fields[0].ident), "this", "")
+ } else {
+ (
+ format!(
+ "fields named {}",
+ inexistent_fields
+ .iter()
+ .map(|field| format!("`{}`", field.ident))
+ .collect::<Vec<String>>()
+ .join(", ")
+ ),
+ "these",
+ "s",
+ )
+ };
+ let spans = inexistent_fields.iter().map(|field| field.ident.span).collect::<Vec<_>>();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ spans,
+ E0026,
+ "{} `{}` does not have {}",
+ kind_name,
+ tcx.def_path_str(variant.def_id),
+ field_names
+ );
+ if let Some(pat_field) = inexistent_fields.last() {
+ err.span_label(
+ pat_field.ident.span,
+ format!(
+ "{} `{}` does not have {} field{}",
+ kind_name,
+ tcx.def_path_str(variant.def_id),
+ t,
+ plural
+ ),
+ );
+
+ if unmentioned_fields.len() == 1 {
+ let input =
+ unmentioned_fields.iter().map(|(_, field)| field.name).collect::<Vec<_>>();
+ let suggested_name = find_best_match_for_name(&input, pat_field.ident.name, None);
+ if let Some(suggested_name) = suggested_name {
+ err.span_suggestion(
+ pat_field.ident.span,
+ "a field with a similar name exists",
+ suggested_name,
+ Applicability::MaybeIncorrect,
+ );
+
+ // When we have a tuple struct used with struct we don't want to suggest using
+ // the (valid) struct syntax with numeric field names. Instead we want to
+ // suggest the expected syntax. We infer that this is the case by parsing the
+ // `Ident` into an unsized integer. The suggestion will be emitted elsewhere in
+ // `smart_resolve_context_dependent_help`.
+ if suggested_name.to_ident_string().parse::<usize>().is_err() {
+ // We don't want to throw `E0027` in case we have thrown `E0026` for them.
+ unmentioned_fields.retain(|&(_, x)| x.name != suggested_name);
+ }
+ } else if inexistent_fields.len() == 1 {
+ match pat_field.pat.kind {
+ PatKind::Lit(expr)
+ if !self.can_coerce(
+ self.typeck_results.borrow().expr_ty(expr),
+ self.field_ty(
+ unmentioned_fields[0].1.span,
+ unmentioned_fields[0].0,
+ substs,
+ ),
+ ) => {}
+ _ => {
+ let unmentioned_field = unmentioned_fields[0].1.name;
+ err.span_suggestion_short(
+ pat_field.ident.span,
+ &format!(
+ "`{}` has a field named `{}`",
+ tcx.def_path_str(variant.def_id),
+ unmentioned_field
+ ),
+ unmentioned_field.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+ if tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "This error indicates that a struct pattern attempted to \
+ extract a non-existent field from a struct. Struct fields \
+ are identified by the name used before the colon : so struct \
+ patterns should resemble the declaration of the struct type \
+ being matched.\n\n\
+ If you are using shorthand field patterns but want to refer \
+ to the struct field by a different name, you should rename \
+ it explicitly.",
+ );
+ }
+ err
+ }
+
+ fn error_tuple_variant_as_struct_pat(
+ &self,
+ pat: &Pat<'_>,
+ fields: &'tcx [hir::PatField<'tcx>],
+ variant: &ty::VariantDef,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ if let (CtorKind::Fn, PatKind::Struct(qpath, ..)) = (variant.ctor_kind, &pat.kind) {
+ let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_qpath(qpath, false)
+ });
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0769,
+ "tuple variant `{}` written as struct variant",
+ path
+ );
+ let (sugg, appl) = if fields.len() == variant.fields.len() {
+ (
+ self.get_suggested_tuple_struct_pattern(fields, variant),
+ Applicability::MachineApplicable,
+ )
+ } else {
+ (
+ variant.fields.iter().map(|_| "_").collect::<Vec<&str>>().join(", "),
+ Applicability::MaybeIncorrect,
+ )
+ };
+ err.span_suggestion_verbose(
+ qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
+ "use the tuple variant pattern syntax instead",
+ format!("({})", sugg),
+ appl,
+ );
+ return Some(err);
+ }
+ None
+ }
+
+ fn get_suggested_tuple_struct_pattern(
+ &self,
+ fields: &[hir::PatField<'_>],
+ variant: &VariantDef,
+ ) -> String {
+ let variant_field_idents =
+ variant.fields.iter().map(|f| f.ident(self.tcx)).collect::<Vec<Ident>>();
+ fields
+ .iter()
+ .map(|field| {
+ match self.tcx.sess.source_map().span_to_snippet(field.pat.span) {
+ Ok(f) => {
+ // Field names are numbers, but numbers
+ // are not valid identifiers
+ if variant_field_idents.contains(&field.ident) {
+ String::from("_")
+ } else {
+ f
+ }
+ }
+ Err(_) => rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_pat(field.pat)
+ }),
+ }
+ })
+ .collect::<Vec<String>>()
+ .join(", ")
+ }
+
+ /// Returns a diagnostic reporting a struct pattern which is missing an `..` due to
+ /// inaccessible fields.
+ ///
+ /// ```text
+ /// error: pattern requires `..` due to inaccessible fields
+ /// --> src/main.rs:10:9
+ /// |
+ /// LL | let foo::Foo {} = foo::Foo::default();
+ /// | ^^^^^^^^^^^
+ /// |
+ /// help: add a `..`
+ /// |
+ /// LL | let foo::Foo { .. } = foo::Foo::default();
+ /// | ^^^^^^
+ /// ```
+ fn error_no_accessible_fields(
+ &self,
+ pat: &Pat<'_>,
+ fields: &'tcx [hir::PatField<'tcx>],
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = self
+ .tcx
+ .sess
+ .struct_span_err(pat.span, "pattern requires `..` due to inaccessible fields");
+
+ if let Some(field) = fields.last() {
+ err.span_suggestion_verbose(
+ field.span.shrink_to_hi(),
+ "ignore the inaccessible and unused fields",
+ ", ..",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ let qpath_span = if let PatKind::Struct(qpath, ..) = &pat.kind {
+ qpath.span()
+ } else {
+ bug!("`error_no_accessible_fields` called on non-struct pattern");
+ };
+
+ // Shrink the span to exclude the `foo:Foo` in `foo::Foo { }`.
+ let span = pat.span.with_lo(qpath_span.shrink_to_hi().hi());
+ err.span_suggestion_verbose(
+ span,
+ "ignore the inaccessible and unused fields",
+ " { .. }",
+ Applicability::MachineApplicable,
+ );
+ }
+ err
+ }
+
+ /// Report that a pattern for a `#[non_exhaustive]` struct marked with `non_exhaustive_omitted_patterns`
+ /// is not exhaustive enough.
+ ///
+ /// Nb: the partner lint for enums lives in `compiler/rustc_mir_build/src/thir/pattern/usefulness.rs`.
+ fn lint_non_exhaustive_omitted_patterns(
+ &self,
+ pat: &Pat<'_>,
+ unmentioned_fields: &[(&ty::FieldDef, Ident)],
+ ty: Ty<'tcx>,
+ ) {
+ fn joined_uncovered_patterns(witnesses: &[&Ident]) -> String {
+ const LIMIT: usize = 3;
+ match witnesses {
+ [] => bug!(),
+ [witness] => format!("`{}`", witness),
+ [head @ .., tail] if head.len() < LIMIT => {
+ let head: Vec<_> = head.iter().map(<_>::to_string).collect();
+ format!("`{}` and `{}`", head.join("`, `"), tail)
+ }
+ _ => {
+ let (head, tail) = witnesses.split_at(LIMIT);
+ let head: Vec<_> = head.iter().map(<_>::to_string).collect();
+ format!("`{}` and {} more", head.join("`, `"), tail.len())
+ }
+ }
+ }
+ let joined_patterns = joined_uncovered_patterns(
+ &unmentioned_fields.iter().map(|(_, i)| i).collect::<Vec<_>>(),
+ );
+
+ self.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, pat.hir_id, pat.span, "some fields are not explicitly listed", |lint| {
+ lint.span_label(pat.span, format!("field{} {} not listed", rustc_errors::pluralize!(unmentioned_fields.len()), joined_patterns));
+ lint.help(
+ "ensure that all fields are mentioned explicitly by adding the suggested fields",
+ );
+ lint.note(&format!(
+ "the pattern is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found",
+ ty,
+ ));
+
+ lint
+ });
+ }
+
+ /// Returns a diagnostic reporting a struct pattern which does not mention some fields.
+ ///
+ /// ```text
+ /// error[E0027]: pattern does not mention field `bar`
+ /// --> src/main.rs:15:9
+ /// |
+ /// LL | let foo::Foo {} = foo::Foo::new();
+ /// | ^^^^^^^^^^^ missing field `bar`
+ /// ```
+ fn error_unmentioned_fields(
+ &self,
+ pat: &Pat<'_>,
+ unmentioned_fields: &[(&ty::FieldDef, Ident)],
+ have_inaccessible_fields: bool,
+ fields: &'tcx [hir::PatField<'tcx>],
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let inaccessible = if have_inaccessible_fields { " and inaccessible fields" } else { "" };
+ let field_names = if unmentioned_fields.len() == 1 {
+ format!("field `{}`{}", unmentioned_fields[0].1, inaccessible)
+ } else {
+ let fields = unmentioned_fields
+ .iter()
+ .map(|(_, name)| format!("`{}`", name))
+ .collect::<Vec<String>>()
+ .join(", ");
+ format!("fields {}{}", fields, inaccessible)
+ };
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0027,
+ "pattern does not mention {}",
+ field_names
+ );
+ err.span_label(pat.span, format!("missing {}", field_names));
+ let len = unmentioned_fields.len();
+ let (prefix, postfix, sp) = match fields {
+ [] => match &pat.kind {
+ PatKind::Struct(path, [], false) => {
+ (" { ", " }", path.span().shrink_to_hi().until(pat.span.shrink_to_hi()))
+ }
+ _ => return err,
+ },
+ [.., field] => {
+ // Account for last field having a trailing comma or parse recovery at the tail of
+ // the pattern to avoid invalid suggestion (#78511).
+ let tail = field.span.shrink_to_hi().with_hi(pat.span.hi());
+ match &pat.kind {
+ PatKind::Struct(..) => (", ", " }", tail),
+ _ => return err,
+ }
+ }
+ };
+ err.span_suggestion(
+ sp,
+ &format!(
+ "include the missing field{} in the pattern{}",
+ pluralize!(len),
+ if have_inaccessible_fields { " and ignore the inaccessible fields" } else { "" }
+ ),
+ format!(
+ "{}{}{}{}",
+ prefix,
+ unmentioned_fields
+ .iter()
+ .map(|(_, name)| name.to_string())
+ .collect::<Vec<_>>()
+ .join(", "),
+ if have_inaccessible_fields { ", .." } else { "" },
+ postfix,
+ ),
+ Applicability::MachineApplicable,
+ );
+ err.span_suggestion(
+ sp,
+ &format!(
+ "if you don't care about {these} missing field{s}, you can explicitly ignore {them}",
+ these = pluralize!("this", len),
+ s = pluralize!(len),
+ them = if len == 1 { "it" } else { "them" },
+ ),
+ format!("{}..{}", prefix, postfix),
+ Applicability::MachineApplicable,
+ );
+ err
+ }
+
+ fn check_pat_box(
+ &self,
+ span: Span,
+ inner: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let (box_ty, inner_ty) = if self.check_dereferenceable(span, expected, inner) {
+ // Here, `demand::subtype` is good enough, but I don't
+ // think any errors can be introduced by using `demand::eqtype`.
+ let inner_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: inner.span,
+ });
+ let box_ty = tcx.mk_box(inner_ty);
+ self.demand_eqtype_pat(span, expected, box_ty, ti);
+ (box_ty, inner_ty)
+ } else {
+ let err = tcx.ty_error();
+ (err, err)
+ };
+ self.check_pat(inner, inner_ty, def_bm, ti);
+ box_ty
+ }
+
+ // Precondition: Pat is Ref(inner)
+ fn check_pat_ref(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ inner: &'tcx Pat<'tcx>,
+ mutbl: hir::Mutability,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let expected = self.shallow_resolve(expected);
+ let (rptr_ty, inner_ty) = if self.check_dereferenceable(pat.span, expected, inner) {
+ // `demand::subtype` would be good enough, but using `eqtype` turns
+ // out to be equally general. See (note_1) for details.
+
+ // Take region, inner-type from expected type if we can,
+ // to avoid creating needless variables. This also helps with
+ // the bad interactions of the given hack detailed in (note_1).
+ debug!("check_pat_ref: expected={:?}", expected);
+ match *expected.kind() {
+ ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => (expected, r_ty),
+ _ => {
+ let inner_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: inner.span,
+ });
+ let rptr_ty = self.new_ref_ty(pat.span, mutbl, inner_ty);
+ debug!("check_pat_ref: demanding {:?} = {:?}", expected, rptr_ty);
+ let err = self.demand_eqtype_pat_diag(pat.span, expected, rptr_ty, ti);
+
+ // Look for a case like `fn foo(&foo: u32)` and suggest
+ // `fn foo(foo: &u32)`
+ if let Some(mut err) = err {
+ self.borrow_pat_suggestion(&mut err, pat);
+ err.emit();
+ }
+ (rptr_ty, inner_ty)
+ }
+ }
+ } else {
+ let err = tcx.ty_error();
+ (err, err)
+ };
+ self.check_pat(inner, inner_ty, def_bm, ti);
+ rptr_ty
+ }
+
+ /// Create a reference type with a fresh region variable.
+ fn new_ref_ty(&self, span: Span, mutbl: hir::Mutability, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let region = self.next_region_var(infer::PatternRegion(span));
+ let mt = ty::TypeAndMut { ty, mutbl };
+ self.tcx.mk_ref(region, mt)
+ }
+
+ /// Type check a slice pattern.
+ ///
+ /// Syntactically, these look like `[pat_0, ..., pat_n]`.
+ /// Semantically, we are type checking a pattern with structure:
+ /// ```ignore (not-rust)
+ /// [before_0, ..., before_n, (slice, after_0, ... after_n)?]
+ /// ```
+ /// The type of `slice`, if it is present, depends on the `expected` type.
+ /// If `slice` is missing, then so is `after_i`.
+ /// If `slice` is present, it can still represent 0 elements.
+ fn check_pat_slice(
+ &self,
+ span: Span,
+ before: &'tcx [Pat<'tcx>],
+ slice: Option<&'tcx Pat<'tcx>>,
+ after: &'tcx [Pat<'tcx>],
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let expected = self.structurally_resolved_type(span, expected);
+ let (element_ty, opt_slice_ty, inferred) = match *expected.kind() {
+ // An array, so we might have something like `let [a, b, c] = [0, 1, 2];`.
+ ty::Array(element_ty, len) => {
+ let min = before.len() as u64 + after.len() as u64;
+ let (opt_slice_ty, expected) =
+ self.check_array_pat_len(span, element_ty, expected, slice, len, min);
+ // `opt_slice_ty.is_none()` => `slice.is_none()`.
+ // Note, though, that opt_slice_ty could be `Some(error_ty)`.
+ assert!(opt_slice_ty.is_some() || slice.is_none());
+ (element_ty, opt_slice_ty, expected)
+ }
+ ty::Slice(element_ty) => (element_ty, Some(expected), expected),
+ // The expected type must be an array or slice, but was neither, so error.
+ _ => {
+ if !expected.references_error() {
+ self.error_expected_array_or_slice(span, expected, ti);
+ }
+ let err = self.tcx.ty_error();
+ (err, Some(err), err)
+ }
+ };
+
+ // Type check all the patterns before `slice`.
+ for elt in before {
+ self.check_pat(elt, element_ty, def_bm, ti);
+ }
+ // Type check the `slice`, if present, against its expected type.
+ if let Some(slice) = slice {
+ self.check_pat(slice, opt_slice_ty.unwrap(), def_bm, ti);
+ }
+ // Type check the elements after `slice`, if present.
+ for elt in after {
+ self.check_pat(elt, element_ty, def_bm, ti);
+ }
+ inferred
+ }
+
+ /// Type check the length of an array pattern.
+ ///
+ /// Returns both the type of the variable length pattern (or `None`), and the potentially
+ /// inferred array type. We only return `None` for the slice type if `slice.is_none()`.
+ fn check_array_pat_len(
+ &self,
+ span: Span,
+ element_ty: Ty<'tcx>,
+ arr_ty: Ty<'tcx>,
+ slice: Option<&'tcx Pat<'tcx>>,
+ len: ty::Const<'tcx>,
+ min_len: u64,
+ ) -> (Option<Ty<'tcx>>, Ty<'tcx>) {
+ if let Some(len) = len.try_eval_usize(self.tcx, self.param_env) {
+ // Now we know the length...
+ if slice.is_none() {
+ // ...and since there is no variable-length pattern,
+ // we require an exact match between the number of elements
+ // in the array pattern and as provided by the matched type.
+ if min_len == len {
+ return (None, arr_ty);
+ }
+
+ self.error_scrutinee_inconsistent_length(span, min_len, len);
+ } else if let Some(pat_len) = len.checked_sub(min_len) {
+ // The variable-length pattern was there,
+ // so it has an array type with the remaining elements left as its size...
+ return (Some(self.tcx.mk_array(element_ty, pat_len)), arr_ty);
+ } else {
+ // ...however, in this case, there were no remaining elements.
+ // That is, the slice pattern requires more than the array type offers.
+ self.error_scrutinee_with_rest_inconsistent_length(span, min_len, len);
+ }
+ } else if slice.is_none() {
+ // We have a pattern with a fixed length,
+ // which we can use to infer the length of the array.
+ let updated_arr_ty = self.tcx.mk_array(element_ty, min_len);
+ self.demand_eqtype(span, updated_arr_ty, arr_ty);
+ return (None, updated_arr_ty);
+ } else {
+ // We have a variable-length pattern and don't know the array length.
+ // This happens if we have e.g.,
+ // `let [a, b, ..] = arr` where `arr: [T; N]` where `const N: usize`.
+ self.error_scrutinee_unfixed_length(span);
+ }
+
+ // If we get here, we must have emitted an error.
+ (Some(self.tcx.ty_error()), arr_ty)
+ }
+
+ fn error_scrutinee_inconsistent_length(&self, span: Span, min_len: u64, size: u64) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0527,
+ "pattern requires {} element{} but array has {}",
+ min_len,
+ pluralize!(min_len),
+ size,
+ )
+ .span_label(span, format!("expected {} element{}", size, pluralize!(size)))
+ .emit();
+ }
+
+ fn error_scrutinee_with_rest_inconsistent_length(&self, span: Span, min_len: u64, size: u64) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0528,
+ "pattern requires at least {} element{} but array has {}",
+ min_len,
+ pluralize!(min_len),
+ size,
+ )
+ .span_label(
+ span,
+ format!("pattern cannot match array of {} element{}", size, pluralize!(size),),
+ )
+ .emit();
+ }
+
+ fn error_scrutinee_unfixed_length(&self, span: Span) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0730,
+ "cannot pattern-match on an array without a fixed length",
+ )
+ .emit();
+ }
+
+ fn error_expected_array_or_slice(&self, span: Span, expected_ty: Ty<'tcx>, ti: TopInfo<'tcx>) {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0529,
+ "expected an array or slice, found `{expected_ty}`"
+ );
+ if let ty::Ref(_, ty, _) = expected_ty.kind()
+ && let ty::Array(..) | ty::Slice(..) = ty.kind()
+ {
+ err.help("the semantics of slice patterns changed recently; see issue #62254");
+ } else if Autoderef::new(&self.infcx, self.param_env, self.body_id, span, expected_ty, span)
+ .any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
+ && let (Some(span), true) = (ti.span, ti.origin_expr)
+ && let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span)
+ {
+ let ty = self.resolve_vars_if_possible(ti.expected);
+ let is_slice_or_array_or_vector = self.is_slice_or_array_or_vector(&mut err, snippet.clone(), ty);
+ match is_slice_or_array_or_vector.1.kind() {
+ ty::Adt(adt_def, _)
+ if self.tcx.is_diagnostic_item(sym::Option, adt_def.did())
+ || self.tcx.is_diagnostic_item(sym::Result, adt_def.did()) =>
+ {
+ // Slicing won't work here, but `.as_deref()` might (issue #91328).
+ err.span_suggestion(
+ span,
+ "consider using `as_deref` here",
+ format!("{snippet}.as_deref()"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => ()
+ }
+ if is_slice_or_array_or_vector.0 {
+ err.span_suggestion(
+ span,
+ "consider slicing here",
+ format!("{snippet}[..]"),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ err.span_label(span, format!("pattern cannot match with input type `{expected_ty}`"));
+ err.emit();
+ }
+
+ fn is_slice_or_array_or_vector(
+ &self,
+ err: &mut Diagnostic,
+ snippet: String,
+ ty: Ty<'tcx>,
+ ) -> (bool, Ty<'tcx>) {
+ match ty.kind() {
+ ty::Adt(adt_def, _) if self.tcx.is_diagnostic_item(sym::Vec, adt_def.did()) => {
+ (true, ty)
+ }
+ ty::Ref(_, ty, _) => self.is_slice_or_array_or_vector(err, snippet, *ty),
+ ty::Slice(..) | ty::Array(..) => (true, ty),
+ _ => (false, ty),
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/place_op.rs b/compiler/rustc_hir_typeck/src/place_op.rs
new file mode 100644
index 000000000..ba8cf6926
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/place_op.rs
@@ -0,0 +1,451 @@
+use crate::method::MethodCallee;
+use crate::{has_expected_num_generic_args, FnCtxt, PlaceOp};
+use rustc_ast as ast;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref, PointerCast};
+use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::autoderef::Autoderef;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Type-check `*oprnd_expr` with `oprnd_expr` type-checked already.
+ pub(super) fn lookup_derefing(
+ &self,
+ expr: &hir::Expr<'_>,
+ oprnd_expr: &'tcx hir::Expr<'tcx>,
+ oprnd_ty: Ty<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ if let Some(mt) = oprnd_ty.builtin_deref(true) {
+ return Some(mt.ty);
+ }
+
+ let ok = self.try_overloaded_deref(expr.span, oprnd_ty)?;
+ let method = self.register_infer_ok_obligations(ok);
+ if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
+ self.apply_adjustments(
+ oprnd_expr,
+ vec![Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)),
+ target: method.sig.inputs()[0],
+ }],
+ );
+ } else {
+ span_bug!(expr.span, "input to deref is not a ref?");
+ }
+ let ty = self.make_overloaded_place_return_type(method).ty;
+ self.write_method_call(expr.hir_id, method);
+ Some(ty)
+ }
+
+ /// Type-check `*base_expr[index_expr]` with `base_expr` and `index_expr` type-checked already.
+ pub(super) fn lookup_indexing(
+ &self,
+ expr: &hir::Expr<'_>,
+ base_expr: &'tcx hir::Expr<'tcx>,
+ base_ty: Ty<'tcx>,
+ index_expr: &'tcx hir::Expr<'tcx>,
+ idx_ty: Ty<'tcx>,
+ ) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> {
+ // FIXME(#18741) -- this is almost but not quite the same as the
+ // autoderef that normal method probing does. They could likely be
+ // consolidated.
+
+ let mut autoderef = self.autoderef(base_expr.span, base_ty);
+ let mut result = None;
+ while result.is_none() && autoderef.next().is_some() {
+ result = self.try_index_step(expr, base_expr, &autoderef, idx_ty, index_expr);
+ }
+ self.register_predicates(autoderef.into_obligations());
+ result
+ }
+
+ fn negative_index(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ base_expr: &hir::Expr<'_>,
+ ) -> Option<(Ty<'tcx>, Ty<'tcx>)> {
+ let ty = self.resolve_vars_if_possible(ty);
+ let mut err = self.tcx.sess.struct_span_err(
+ span,
+ &format!("negative integers cannot be used to index on a `{ty}`"),
+ );
+ err.span_label(span, &format!("cannot use a negative integer for indexing on `{ty}`"));
+ if let (hir::ExprKind::Path(..), Ok(snippet)) =
+ (&base_expr.kind, self.tcx.sess.source_map().span_to_snippet(base_expr.span))
+ {
+ // `foo[-1]` to `foo[foo.len() - 1]`
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ &format!(
+ "to access an element starting from the end of the `{ty}`, compute the index",
+ ),
+ format!("{snippet}.len() "),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ Some((self.tcx.ty_error(), self.tcx.ty_error()))
+ }
+
+ /// To type-check `base_expr[index_expr]`, we progressively autoderef
+ /// (and otherwise adjust) `base_expr`, looking for a type which either
+ /// supports builtin indexing or overloaded indexing.
+ /// This loop implements one step in that search; the autoderef loop
+ /// is implemented by `lookup_indexing`.
+ fn try_index_step(
+ &self,
+ expr: &hir::Expr<'_>,
+ base_expr: &hir::Expr<'_>,
+ autoderef: &Autoderef<'a, 'tcx>,
+ index_ty: Ty<'tcx>,
+ index_expr: &hir::Expr<'_>,
+ ) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> {
+ let adjusted_ty =
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+ debug!(
+ "try_index_step(expr={:?}, base_expr={:?}, adjusted_ty={:?}, \
+ index_ty={:?})",
+ expr, base_expr, adjusted_ty, index_ty
+ );
+
+ if let hir::ExprKind::Unary(
+ hir::UnOp::Neg,
+ hir::Expr {
+ kind: hir::ExprKind::Lit(hir::Lit { node: ast::LitKind::Int(..), .. }),
+ ..
+ },
+ ) = index_expr.kind
+ {
+ match adjusted_ty.kind() {
+ ty::Adt(def, _) if self.tcx.is_diagnostic_item(sym::Vec, def.did()) => {
+ return self.negative_index(adjusted_ty, index_expr.span, base_expr);
+ }
+ ty::Slice(_) | ty::Array(_, _) => {
+ return self.negative_index(adjusted_ty, index_expr.span, base_expr);
+ }
+ _ => {}
+ }
+ }
+
+ for unsize in [false, true] {
+ let mut self_ty = adjusted_ty;
+ if unsize {
+ // We only unsize arrays here.
+ if let ty::Array(element_ty, _) = adjusted_ty.kind() {
+ self_ty = self.tcx.mk_slice(*element_ty);
+ } else {
+ continue;
+ }
+ }
+
+ // If some lookup succeeds, write callee into table and extract index/element
+ // type from the method signature.
+ // If some lookup succeeded, install method in table
+ let input_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::AutoDeref,
+ span: base_expr.span,
+ });
+ let method =
+ self.try_overloaded_place_op(expr.span, self_ty, &[input_ty], PlaceOp::Index);
+
+ if let Some(result) = method {
+ debug!("try_index_step: success, using overloaded indexing");
+ let method = self.register_infer_ok_obligations(result);
+
+ let mut adjustments = self.adjust_steps(autoderef);
+ if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)),
+ target: self.tcx.mk_ref(
+ *region,
+ ty::TypeAndMut { mutbl: hir::Mutability::Not, ty: adjusted_ty },
+ ),
+ });
+ } else {
+ span_bug!(expr.span, "input to index is not a ref?");
+ }
+ if unsize {
+ adjustments.push(Adjustment {
+ kind: Adjust::Pointer(PointerCast::Unsize),
+ target: method.sig.inputs()[0],
+ });
+ }
+ self.apply_adjustments(base_expr, adjustments);
+
+ self.write_method_call(expr.hir_id, method);
+
+ return Some((input_ty, self.make_overloaded_place_return_type(method).ty));
+ }
+ }
+
+ None
+ }
+
+ /// Try to resolve an overloaded place op. We only deal with the immutable
+ /// variant here (Deref/Index). In some contexts we would need the mutable
+ /// variant (DerefMut/IndexMut); those would be later converted by
+ /// `convert_place_derefs_to_mutable`.
+ pub(super) fn try_overloaded_place_op(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ arg_tys: &[Ty<'tcx>],
+ op: PlaceOp,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!("try_overloaded_place_op({:?},{:?},{:?})", span, base_ty, op);
+
+ let (imm_tr, imm_op) = match op {
+ PlaceOp::Deref => (self.tcx.lang_items().deref_trait(), sym::deref),
+ PlaceOp::Index => (self.tcx.lang_items().index_trait(), sym::index),
+ };
+
+ // If the lang item was declared incorrectly, stop here so that we don't
+ // run into an ICE (#83893). The error is reported where the lang item is
+ // declared.
+ if !has_expected_num_generic_args(
+ self.tcx,
+ imm_tr,
+ match op {
+ PlaceOp::Deref => 0,
+ PlaceOp::Index => 1,
+ },
+ ) {
+ return None;
+ }
+
+ imm_tr.and_then(|trait_did| {
+ self.lookup_method_in_trait(
+ span,
+ Ident::with_dummy_span(imm_op),
+ trait_did,
+ base_ty,
+ Some(arg_tys),
+ )
+ })
+ }
+
+ fn try_mutable_overloaded_place_op(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ arg_tys: &[Ty<'tcx>],
+ op: PlaceOp,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!("try_mutable_overloaded_place_op({:?},{:?},{:?})", span, base_ty, op);
+
+ let (mut_tr, mut_op) = match op {
+ PlaceOp::Deref => (self.tcx.lang_items().deref_mut_trait(), sym::deref_mut),
+ PlaceOp::Index => (self.tcx.lang_items().index_mut_trait(), sym::index_mut),
+ };
+
+ // If the lang item was declared incorrectly, stop here so that we don't
+ // run into an ICE (#83893). The error is reported where the lang item is
+ // declared.
+ if !has_expected_num_generic_args(
+ self.tcx,
+ mut_tr,
+ match op {
+ PlaceOp::Deref => 0,
+ PlaceOp::Index => 1,
+ },
+ ) {
+ return None;
+ }
+
+ mut_tr.and_then(|trait_did| {
+ self.lookup_method_in_trait(
+ span,
+ Ident::with_dummy_span(mut_op),
+ trait_did,
+ base_ty,
+ Some(arg_tys),
+ )
+ })
+ }
+
+ /// Convert auto-derefs, indices, etc of an expression from `Deref` and `Index`
+ /// into `DerefMut` and `IndexMut` respectively.
+ ///
+ /// This is a second pass of typechecking derefs/indices. We need this because we do not
+ /// always know whether a place needs to be mutable or not in the first pass.
+ /// This happens whether there is an implicit mutable reborrow, e.g. when the type
+ /// is used as the receiver of a method call.
+ pub fn convert_place_derefs_to_mutable(&self, expr: &hir::Expr<'_>) {
+ // Gather up expressions we want to munge.
+ let mut exprs = vec![expr];
+
+ while let hir::ExprKind::Field(ref expr, _)
+ | hir::ExprKind::Index(ref expr, _)
+ | hir::ExprKind::Unary(hir::UnOp::Deref, ref expr) = exprs.last().unwrap().kind
+ {
+ exprs.push(expr);
+ }
+
+ debug!("convert_place_derefs_to_mutable: exprs={:?}", exprs);
+
+ // Fix up autoderefs and derefs.
+ let mut inside_union = false;
+ for (i, &expr) in exprs.iter().rev().enumerate() {
+ debug!("convert_place_derefs_to_mutable: i={} expr={:?}", i, expr);
+
+ let mut source = self.node_ty(expr.hir_id);
+ if matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Deref, _)) {
+ // Clear previous flag; after a pointer indirection it does not apply any more.
+ inside_union = false;
+ }
+ if source.is_union() {
+ inside_union = true;
+ }
+ // Fix up the autoderefs. Autorefs can only occur immediately preceding
+ // overloaded place ops, and will be fixed by them in order to get
+ // the correct region.
+ // Do not mutate adjustments in place, but rather take them,
+ // and replace them after mutating them, to avoid having the
+ // typeck results borrowed during (`deref_mut`) method resolution.
+ let previous_adjustments =
+ self.typeck_results.borrow_mut().adjustments_mut().remove(expr.hir_id);
+ if let Some(mut adjustments) = previous_adjustments {
+ for adjustment in &mut adjustments {
+ if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind
+ && let Some(ok) = self.try_mutable_overloaded_place_op(
+ expr.span,
+ source,
+ &[],
+ PlaceOp::Deref,
+ )
+ {
+ let method = self.register_infer_ok_obligations(ok);
+ if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() {
+ *deref = OverloadedDeref { region, mutbl, span: deref.span };
+ }
+ // If this is a union field, also throw an error for `DerefMut` of `ManuallyDrop` (see RFC 2514).
+ // This helps avoid accidental drops.
+ if inside_union
+ && source.ty_adt_def().map_or(false, |adt| adt.is_manually_drop())
+ {
+ let mut err = self.tcx.sess.struct_span_err(
+ expr.span,
+ "not automatically applying `DerefMut` on `ManuallyDrop` union field",
+ );
+ err.help(
+ "writing to this reference calls the destructor for the old value",
+ );
+ err.help("add an explicit `*` if that is desired, or call `ptr::write` to not run the destructor");
+ err.emit();
+ }
+ }
+ source = adjustment.target;
+ }
+ self.typeck_results.borrow_mut().adjustments_mut().insert(expr.hir_id, adjustments);
+ }
+
+ match expr.kind {
+ hir::ExprKind::Index(base_expr, ..) => {
+ self.convert_place_op_to_mutable(PlaceOp::Index, expr, base_expr);
+ }
+ hir::ExprKind::Unary(hir::UnOp::Deref, base_expr) => {
+ self.convert_place_op_to_mutable(PlaceOp::Deref, expr, base_expr);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ fn convert_place_op_to_mutable(
+ &self,
+ op: PlaceOp,
+ expr: &hir::Expr<'_>,
+ base_expr: &hir::Expr<'_>,
+ ) {
+ debug!("convert_place_op_to_mutable({:?}, {:?}, {:?})", op, expr, base_expr);
+ if !self.typeck_results.borrow().is_method_call(expr) {
+ debug!("convert_place_op_to_mutable - builtin, nothing to do");
+ return;
+ }
+
+ // Need to deref because overloaded place ops take self by-reference.
+ let base_ty = self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted(base_expr)
+ .builtin_deref(false)
+ .expect("place op takes something that is not a ref")
+ .ty;
+
+ let arg_ty = match op {
+ PlaceOp::Deref => None,
+ PlaceOp::Index => {
+ // We would need to recover the `T` used when we resolve `<_ as Index<T>>::index`
+ // in try_index_step. This is the subst at index 1.
+ //
+ // Note: we should *not* use `expr_ty` of index_expr here because autoderef
+ // during coercions can cause type of index_expr to differ from `T` (#72002).
+ // We also could not use `expr_ty_adjusted` of index_expr because reborrowing
+ // during coercions can also cause type of index_expr to differ from `T`,
+ // which can potentially cause regionck failure (#74933).
+ Some(self.typeck_results.borrow().node_substs(expr.hir_id).type_at(1))
+ }
+ };
+ let arg_tys = match arg_ty {
+ None => &[],
+ Some(ref ty) => slice::from_ref(ty),
+ };
+
+ let method = self.try_mutable_overloaded_place_op(expr.span, base_ty, arg_tys, op);
+ let method = match method {
+ Some(ok) => self.register_infer_ok_obligations(ok),
+ // Couldn't find the mutable variant of the place op, keep the
+ // current, immutable version.
+ None => return,
+ };
+ debug!("convert_place_op_to_mutable: method={:?}", method);
+ self.write_method_call(expr.hir_id, method);
+
+ let ty::Ref(region, _, hir::Mutability::Mut) = method.sig.inputs()[0].kind() else {
+ span_bug!(expr.span, "input to mutable place op is not a mut ref?");
+ };
+
+ // Convert the autoref in the base expr to mutable with the correct
+ // region and mutability.
+ let base_expr_ty = self.node_ty(base_expr.hir_id);
+ if let Some(adjustments) =
+ self.typeck_results.borrow_mut().adjustments_mut().get_mut(base_expr.hir_id)
+ {
+ let mut source = base_expr_ty;
+ for adjustment in &mut adjustments[..] {
+ if let Adjust::Borrow(AutoBorrow::Ref(..)) = adjustment.kind {
+ debug!("convert_place_op_to_mutable: converting autoref {:?}", adjustment);
+ let mutbl = AutoBorrowMutability::Mut {
+ // Deref/indexing can be desugared to a method call,
+ // so maybe we could use two-phase here.
+ // See the documentation of AllowTwoPhase for why that's
+ // not the case today.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ };
+ adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(*region, mutbl));
+ adjustment.target = self
+ .tcx
+ .mk_ref(*region, ty::TypeAndMut { ty: source, mutbl: mutbl.into() });
+ }
+ source = adjustment.target;
+ }
+
+ // If we have an autoref followed by unsizing at the end, fix the unsize target.
+ if let [
+ ..,
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
+ Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), ref mut target },
+ ] = adjustments[..]
+ {
+ *target = method.sig.inputs()[0];
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/rvalue_scopes.rs b/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
new file mode 100644
index 000000000..22c9e7961
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
@@ -0,0 +1,83 @@
+use super::FnCtxt;
+use hir::def_id::DefId;
+use hir::Node;
+use rustc_hir as hir;
+use rustc_middle::middle::region::{RvalueCandidateType, Scope, ScopeTree};
+use rustc_middle::ty::RvalueScopes;
+
+/// Applied to an expression `expr` if `expr` -- or something owned or partially owned by
+/// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that
+/// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let`
+/// statement.
+///
+/// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching
+/// `<rvalue>` as `blk_id`:
+///
+/// ```text
+/// ET = *ET
+/// | ET[...]
+/// | ET.f
+/// | (ET)
+/// | <rvalue>
+/// ```
+///
+/// Note: ET is intended to match "rvalues or places based on rvalues".
+fn record_rvalue_scope_rec(
+ rvalue_scopes: &mut RvalueScopes,
+ mut expr: &hir::Expr<'_>,
+ lifetime: Option<Scope>,
+) {
+ loop {
+ // Note: give all the expressions matching `ET` with the
+ // extended temporary lifetime, not just the innermost rvalue,
+ // because in codegen if we must compile e.g., `*rvalue()`
+ // into a temporary, we request the temporary scope of the
+ // outer expression.
+
+ rvalue_scopes.record_rvalue_scope(expr.hir_id.local_id, lifetime);
+
+ match expr.kind {
+ hir::ExprKind::AddrOf(_, _, subexpr)
+ | hir::ExprKind::Unary(hir::UnOp::Deref, subexpr)
+ | hir::ExprKind::Field(subexpr, _)
+ | hir::ExprKind::Index(subexpr, _) => {
+ expr = subexpr;
+ }
+ _ => {
+ return;
+ }
+ }
+ }
+}
+fn record_rvalue_scope(
+ rvalue_scopes: &mut RvalueScopes,
+ expr: &hir::Expr<'_>,
+ candidate: &RvalueCandidateType,
+) {
+ debug!("resolve_rvalue_scope(expr={expr:?}, candidate={candidate:?})");
+ match candidate {
+ RvalueCandidateType::Borrow { lifetime, .. }
+ | RvalueCandidateType::Pattern { lifetime, .. } => {
+ record_rvalue_scope_rec(rvalue_scopes, expr, *lifetime)
+ } // FIXME(@dingxiangfei2009): handle the candidates in the function call arguments
+ }
+}
+
+pub fn resolve_rvalue_scopes<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ scope_tree: &'a ScopeTree,
+ def_id: DefId,
+) -> RvalueScopes {
+ let tcx = &fcx.tcx;
+ let hir_map = tcx.hir();
+ let mut rvalue_scopes = RvalueScopes::new();
+ debug!("start resolving rvalue scopes, def_id={def_id:?}");
+ debug!("rvalue_scope: rvalue_candidates={:?}", scope_tree.rvalue_candidates);
+ for (&hir_id, candidate) in &scope_tree.rvalue_candidates {
+ let Some(Node::Expr(expr)) = hir_map.find(hir_id) else {
+ bug!("hir node does not exist")
+ };
+ record_rvalue_scope(&mut rvalue_scopes, expr, candidate);
+ }
+ rvalue_scopes
+}
diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs
new file mode 100644
index 000000000..4dea40829
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/upvar.rs
@@ -0,0 +1,2274 @@
+//! ### Inferring borrow kinds for upvars
+//!
+//! Whenever there is a closure expression, we need to determine how each
+//! upvar is used. We do this by initially assigning each upvar an
+//! immutable "borrow kind" (see `ty::BorrowKind` for details) and then
+//! "escalating" the kind as needed. The borrow kind proceeds according to
+//! the following lattice:
+//! ```ignore (not-rust)
+//! ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow
+//! ```
+//! So, for example, if we see an assignment `x = 5` to an upvar `x`, we
+//! will promote its borrow kind to mutable borrow. If we see an `&mut x`
+//! we'll do the same. Naturally, this applies not just to the upvar, but
+//! to everything owned by `x`, so the result is the same for something
+//! like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a
+//! struct). These adjustments are performed in
+//! `adjust_upvar_borrow_kind()` (you can trace backwards through the code
+//! from there).
+//!
+//! The fact that we are inferring borrow kinds as we go results in a
+//! semi-hacky interaction with mem-categorization. In particular,
+//! mem-categorization will query the current borrow kind as it
+//! categorizes, and we'll return the *current* value, but this may get
+//! adjusted later. Therefore, in this module, we generally ignore the
+//! borrow kind (and derived mutabilities) that are returned from
+//! mem-categorization, since they may be inaccurate. (Another option
+//! would be to use a unification scheme, where instead of returning a
+//! concrete borrow kind like `ty::ImmBorrow`, we return a
+//! `ty::InferBorrow(upvar_id)` or something like that, but this would
+//! then mean that all later passes would have to check for these figments
+//! and report an error, and it just seems like more mess in the end.)
+
+use super::FnCtxt;
+
+use crate::expr_use_visitor as euv;
+use rustc_errors::{Applicability, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_infer::infer::UpvarRegion;
+use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection, ProjectionKind};
+use rustc_middle::mir::FakeReadCause;
+use rustc_middle::ty::{
+ self, ClosureSizeProfileData, Ty, TyCtxt, TypeckResults, UpvarCapture, UpvarSubsts,
+};
+use rustc_session::lint;
+use rustc_span::sym;
+use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_trait_selection::infer::InferCtxtExt;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_index::vec::Idx;
+use rustc_target::abi::VariantIdx;
+
+use std::iter;
+
+/// Describe the relationship between the paths of two places
+/// eg:
+/// - `foo` is ancestor of `foo.bar.baz`
+/// - `foo.bar.baz` is an descendant of `foo.bar`
+/// - `foo.bar` and `foo.baz` are divergent
+enum PlaceAncestryRelation {
+ Ancestor,
+ Descendant,
+ SamePlace,
+ Divergent,
+}
+
+/// Intermediate format to store a captured `Place` and associated `ty::CaptureInfo`
+/// during capture analysis. Information in this map feeds into the minimum capture
+/// analysis pass.
+type InferredCaptureInformation<'tcx> = Vec<(Place<'tcx>, ty::CaptureInfo)>;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn closure_analyze(&self, body: &'tcx hir::Body<'tcx>) {
+ InferBorrowKindVisitor { fcx: self }.visit_body(body);
+
+ // it's our job to process these.
+ assert!(self.deferred_call_resolutions.borrow().is_empty());
+ }
+}
+
+/// Intermediate format to store the hir_id pointing to the use that resulted in the
+/// corresponding place being captured and a String which contains the captured value's
+/// name (i.e: a.b.c)
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+enum UpvarMigrationInfo {
+ /// We previously captured all of `x`, but now we capture some sub-path.
+ CapturingPrecise { source_expr: Option<hir::HirId>, var_name: String },
+ CapturingNothing {
+ // where the variable appears in the closure (but is not captured)
+ use_span: Span,
+ },
+}
+
+/// Reasons that we might issue a migration warning.
+#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+struct MigrationWarningReason {
+ /// When we used to capture `x` in its entirety, we implemented the auto-trait(s)
+ /// in this vec, but now we don't.
+ auto_traits: Vec<&'static str>,
+
+ /// When we used to capture `x` in its entirety, we would execute some destructors
+ /// at a different time.
+ drop_order: bool,
+}
+
+impl MigrationWarningReason {
+ fn migration_message(&self) -> String {
+ let base = "changes to closure capture in Rust 2021 will affect";
+ if !self.auto_traits.is_empty() && self.drop_order {
+ format!("{} drop order and which traits the closure implements", base)
+ } else if self.drop_order {
+ format!("{} drop order", base)
+ } else {
+ format!("{} which traits the closure implements", base)
+ }
+ }
+}
+
+/// Intermediate format to store information needed to generate a note in the migration lint.
+struct MigrationLintNote {
+ captures_info: UpvarMigrationInfo,
+
+ /// reasons why migration is needed for this capture
+ reason: MigrationWarningReason,
+}
+
+/// Intermediate format to store the hir id of the root variable and a HashSet containing
+/// information on why the root variable should be fully captured
+struct NeededMigration {
+ var_hir_id: hir::HirId,
+ diagnostics_info: Vec<MigrationLintNote>,
+}
+
+struct InferBorrowKindVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for InferBorrowKindVisitor<'a, 'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ match expr.kind {
+ hir::ExprKind::Closure(&hir::Closure { capture_clause, body: body_id, .. }) => {
+ let body = self.fcx.tcx.hir().body(body_id);
+ self.visit_body(body);
+ self.fcx.analyze_closure(expr.hir_id, expr.span, body_id, body, capture_clause);
+ }
+ hir::ExprKind::ConstBlock(anon_const) => {
+ let body = self.fcx.tcx.hir().body(anon_const.body);
+ self.visit_body(body);
+ }
+ _ => {}
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Analysis starting point.
+ #[instrument(skip(self, body), level = "debug")]
+ fn analyze_closure(
+ &self,
+ closure_hir_id: hir::HirId,
+ span: Span,
+ body_id: hir::BodyId,
+ body: &'tcx hir::Body<'tcx>,
+ capture_clause: hir::CaptureBy,
+ ) {
+ // Extract the type of the closure.
+ let ty = self.node_ty(closure_hir_id);
+ let (closure_def_id, substs) = match *ty.kind() {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
+ ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ ty::Error(_) => {
+ // #51714: skip analysis when we have already encountered type errors
+ return;
+ }
+ _ => {
+ span_bug!(
+ span,
+ "type of closure expr {:?} is not a closure {:?}",
+ closure_hir_id,
+ ty
+ );
+ }
+ };
+ let closure_def_id = closure_def_id.expect_local();
+
+ let infer_kind = if let UpvarSubsts::Closure(closure_substs) = substs {
+ self.closure_kind(closure_substs).is_none().then_some(closure_substs)
+ } else {
+ None
+ };
+
+ assert_eq!(self.tcx.hir().body_owner_def_id(body.id()), closure_def_id);
+ let mut delegate = InferBorrowKind {
+ fcx: self,
+ closure_def_id,
+ capture_information: Default::default(),
+ fake_reads: Default::default(),
+ };
+ euv::ExprUseVisitor::new(
+ &mut delegate,
+ &self.infcx,
+ closure_def_id,
+ self.param_env,
+ &self.typeck_results.borrow(),
+ )
+ .consume_body(body);
+
+ debug!(
+ "For closure={:?}, capture_information={:#?}",
+ closure_def_id, delegate.capture_information
+ );
+
+ self.log_capture_analysis_first_pass(closure_def_id, &delegate.capture_information, span);
+
+ let (capture_information, closure_kind, origin) = self
+ .process_collected_capture_information(capture_clause, delegate.capture_information);
+
+ self.compute_min_captures(closure_def_id, capture_information, span);
+
+ let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id);
+
+ if should_do_rust_2021_incompatible_closure_captures_analysis(self.tcx, closure_hir_id) {
+ self.perform_2229_migration_anaysis(closure_def_id, body_id, capture_clause, span);
+ }
+
+ let after_feature_tys = self.final_upvar_tys(closure_def_id);
+
+ // We now fake capture information for all variables that are mentioned within the closure
+ // We do this after handling migrations so that min_captures computes before
+ if !enable_precise_capture(self.tcx, span) {
+ let mut capture_information: InferredCaptureInformation<'tcx> = Default::default();
+
+ if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) {
+ for var_hir_id in upvars.keys() {
+ let place = self.place_for_root_variable(closure_def_id, *var_hir_id);
+
+ debug!("seed place {:?}", place);
+
+ let capture_kind = self.init_capture_kind_for_place(&place, capture_clause);
+ let fake_info = ty::CaptureInfo {
+ capture_kind_expr_id: None,
+ path_expr_id: None,
+ capture_kind,
+ };
+
+ capture_information.push((place, fake_info));
+ }
+ }
+
+ // This will update the min captures based on this new fake information.
+ self.compute_min_captures(closure_def_id, capture_information, span);
+ }
+
+ let before_feature_tys = self.final_upvar_tys(closure_def_id);
+
+ if let Some(closure_substs) = infer_kind {
+ // Unify the (as yet unbound) type variable in the closure
+ // substs with the kind we inferred.
+ let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ self.demand_eqtype(span, closure_kind.to_ty(self.tcx), closure_kind_ty);
+
+ // If we have an origin, store it.
+ if let Some(origin) = origin {
+ let origin = if enable_precise_capture(self.tcx, span) {
+ (origin.0, origin.1)
+ } else {
+ (origin.0, Place { projections: vec![], ..origin.1 })
+ };
+
+ self.typeck_results
+ .borrow_mut()
+ .closure_kind_origins_mut()
+ .insert(closure_hir_id, origin);
+ }
+ }
+
+ self.log_closure_min_capture_info(closure_def_id, span);
+
+ // Now that we've analyzed the closure, we know how each
+ // variable is borrowed, and we know what traits the closure
+ // implements (Fn vs FnMut etc). We now have some updates to do
+ // with that information.
+ //
+ // Note that no closure type C may have an upvar of type C
+ // (though it may reference itself via a trait object). This
+ // results from the desugaring of closures to a struct like
+ // `Foo<..., UV0...UVn>`. If one of those upvars referenced
+ // C, then the type would have infinite size (and the
+ // inference algorithm will reject it).
+
+ // Equate the type variables for the upvars with the actual types.
+ let final_upvar_tys = self.final_upvar_tys(closure_def_id);
+ debug!(
+ "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}",
+ closure_hir_id, substs, final_upvar_tys
+ );
+
+ // Build a tuple (U0..Un) of the final upvar types U0..Un
+ // and unify the upvar tuple type in the closure with it:
+ let final_tupled_upvars_type = self.tcx.mk_tup(final_upvar_tys.iter());
+ self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type);
+
+ let fake_reads = delegate
+ .fake_reads
+ .into_iter()
+ .map(|(place, cause, hir_id)| (place, cause, hir_id))
+ .collect();
+ self.typeck_results.borrow_mut().closure_fake_reads.insert(closure_def_id, fake_reads);
+
+ if self.tcx.sess.opts.unstable_opts.profile_closures {
+ self.typeck_results.borrow_mut().closure_size_eval.insert(
+ closure_def_id,
+ ClosureSizeProfileData {
+ before_feature_tys: self.tcx.mk_tup(before_feature_tys.into_iter()),
+ after_feature_tys: self.tcx.mk_tup(after_feature_tys.into_iter()),
+ },
+ );
+ }
+
+ // If we are also inferred the closure kind here,
+ // process any deferred resolutions.
+ let deferred_call_resolutions = self.remove_deferred_call_resolutions(closure_def_id);
+ for deferred_call_resolution in deferred_call_resolutions {
+ deferred_call_resolution.resolve(self);
+ }
+ }
+
+ // Returns a list of `Ty`s for each upvar.
+ fn final_upvar_tys(&self, closure_id: LocalDefId) -> Vec<Ty<'tcx>> {
+ self.typeck_results
+ .borrow()
+ .closure_min_captures_flattened(closure_id)
+ .map(|captured_place| {
+ let upvar_ty = captured_place.place.ty();
+ let capture = captured_place.info.capture_kind;
+
+ debug!(
+ "final_upvar_tys: place={:?} upvar_ty={:?} capture={:?}, mutability={:?}",
+ captured_place.place, upvar_ty, capture, captured_place.mutability,
+ );
+
+ apply_capture_kind_on_capture_ty(self.tcx, upvar_ty, capture, captured_place.region)
+ })
+ .collect()
+ }
+
+ /// Adjusts the closure capture information to ensure that the operations aren't unsafe,
+ /// and that the path can be captured with required capture kind (depending on use in closure,
+ /// move closure etc.)
+ ///
+ /// Returns the set of adjusted information along with the inferred closure kind and span
+ /// associated with the closure kind inference.
+ ///
+ /// Note that we *always* infer a minimal kind, even if
+ /// we don't always *use* that in the final result (i.e., sometimes
+ /// we've taken the closure kind from the expectations instead, and
+ /// for generators we don't even implement the closure traits
+ /// really).
+ ///
+ /// If we inferred that the closure needs to be FnMut/FnOnce, last element of the returned tuple
+ /// contains a `Some()` with the `Place` that caused us to do so.
+ fn process_collected_capture_information(
+ &self,
+ capture_clause: hir::CaptureBy,
+ capture_information: InferredCaptureInformation<'tcx>,
+ ) -> (InferredCaptureInformation<'tcx>, ty::ClosureKind, Option<(Span, Place<'tcx>)>) {
+ let mut closure_kind = ty::ClosureKind::LATTICE_BOTTOM;
+ let mut origin: Option<(Span, Place<'tcx>)> = None;
+
+ let processed = capture_information
+ .into_iter()
+ .map(|(place, mut capture_info)| {
+ // Apply rules for safety before inferring closure kind
+ let (place, capture_kind) =
+ restrict_capture_precision(place, capture_info.capture_kind);
+
+ let (place, capture_kind) = truncate_capture_for_optimization(place, capture_kind);
+
+ let usage_span = if let Some(usage_expr) = capture_info.path_expr_id {
+ self.tcx.hir().span(usage_expr)
+ } else {
+ unreachable!()
+ };
+
+ let updated = match capture_kind {
+ ty::UpvarCapture::ByValue => match closure_kind {
+ ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
+ (ty::ClosureKind::FnOnce, Some((usage_span, place.clone())))
+ }
+ // If closure is already FnOnce, don't update
+ ty::ClosureKind::FnOnce => (closure_kind, origin.take()),
+ },
+
+ ty::UpvarCapture::ByRef(
+ ty::BorrowKind::MutBorrow | ty::BorrowKind::UniqueImmBorrow,
+ ) => {
+ match closure_kind {
+ ty::ClosureKind::Fn => {
+ (ty::ClosureKind::FnMut, Some((usage_span, place.clone())))
+ }
+ // Don't update the origin
+ ty::ClosureKind::FnMut | ty::ClosureKind::FnOnce => {
+ (closure_kind, origin.take())
+ }
+ }
+ }
+
+ _ => (closure_kind, origin.take()),
+ };
+
+ closure_kind = updated.0;
+ origin = updated.1;
+
+ let (place, capture_kind) = match capture_clause {
+ hir::CaptureBy::Value => adjust_for_move_closure(place, capture_kind),
+ hir::CaptureBy::Ref => adjust_for_non_move_closure(place, capture_kind),
+ };
+
+ // This restriction needs to be applied after we have handled adjustments for `move`
+ // closures. We want to make sure any adjustment that might make us move the place into
+ // the closure gets handled.
+ let (place, capture_kind) =
+ restrict_precision_for_drop_types(self, place, capture_kind, usage_span);
+
+ capture_info.capture_kind = capture_kind;
+ (place, capture_info)
+ })
+ .collect();
+
+ (processed, closure_kind, origin)
+ }
+
+ /// Analyzes the information collected by `InferBorrowKind` to compute the min number of
+ /// Places (and corresponding capture kind) that we need to keep track of to support all
+ /// the required captured paths.
+ ///
+ ///
+ /// Note: If this function is called multiple times for the same closure, it will update
+ /// the existing min_capture map that is stored in TypeckResults.
+ ///
+ /// Eg:
+ /// ```
+ /// #[derive(Debug)]
+ /// struct Point { x: i32, y: i32 }
+ ///
+ /// let s = String::from("s"); // hir_id_s
+ /// let mut p = Point { x: 2, y: -2 }; // his_id_p
+ /// let c = || {
+ /// println!("{s:?}"); // L1
+ /// p.x += 10; // L2
+ /// println!("{}" , p.y); // L3
+ /// println!("{p:?}"); // L4
+ /// drop(s); // L5
+ /// };
+ /// ```
+ /// and let hir_id_L1..5 be the expressions pointing to use of a captured variable on
+ /// the lines L1..5 respectively.
+ ///
+ /// InferBorrowKind results in a structure like this:
+ ///
+ /// ```ignore (illustrative)
+ /// {
+ /// Place(base: hir_id_s, projections: [], ....) -> {
+ /// capture_kind_expr: hir_id_L5,
+ /// path_expr_id: hir_id_L5,
+ /// capture_kind: ByValue
+ /// },
+ /// Place(base: hir_id_p, projections: [Field(0, 0)], ...) -> {
+ /// capture_kind_expr: hir_id_L2,
+ /// path_expr_id: hir_id_L2,
+ /// capture_kind: ByValue
+ /// },
+ /// Place(base: hir_id_p, projections: [Field(1, 0)], ...) -> {
+ /// capture_kind_expr: hir_id_L3,
+ /// path_expr_id: hir_id_L3,
+ /// capture_kind: ByValue
+ /// },
+ /// Place(base: hir_id_p, projections: [], ...) -> {
+ /// capture_kind_expr: hir_id_L4,
+ /// path_expr_id: hir_id_L4,
+ /// capture_kind: ByValue
+ /// },
+ /// }
+ /// ```
+ ///
+ /// After the min capture analysis, we get:
+ /// ```ignore (illustrative)
+ /// {
+ /// hir_id_s -> [
+ /// Place(base: hir_id_s, projections: [], ....) -> {
+ /// capture_kind_expr: hir_id_L5,
+ /// path_expr_id: hir_id_L5,
+ /// capture_kind: ByValue
+ /// },
+ /// ],
+ /// hir_id_p -> [
+ /// Place(base: hir_id_p, projections: [], ...) -> {
+ /// capture_kind_expr: hir_id_L2,
+ /// path_expr_id: hir_id_L4,
+ /// capture_kind: ByValue
+ /// },
+ /// ],
+ /// }
+ /// ```
+ fn compute_min_captures(
+ &self,
+ closure_def_id: LocalDefId,
+ capture_information: InferredCaptureInformation<'tcx>,
+ closure_span: Span,
+ ) {
+ if capture_information.is_empty() {
+ return;
+ }
+
+ let mut typeck_results = self.typeck_results.borrow_mut();
+
+ let mut root_var_min_capture_list =
+ typeck_results.closure_min_captures.remove(&closure_def_id).unwrap_or_default();
+
+ for (mut place, capture_info) in capture_information.into_iter() {
+ let var_hir_id = match place.base {
+ PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected upvar, found={:?}", base),
+ };
+
+ let Some(min_cap_list) = root_var_min_capture_list.get_mut(&var_hir_id) else {
+ let mutability = self.determine_capture_mutability(&typeck_results, &place);
+ let min_cap_list = vec![ty::CapturedPlace {
+ place,
+ info: capture_info,
+ mutability,
+ region: None,
+ }];
+ root_var_min_capture_list.insert(var_hir_id, min_cap_list);
+ continue;
+ };
+
+ // Go through each entry in the current list of min_captures
+ // - if ancestor is found, update it's capture kind to account for current place's
+ // capture information.
+ //
+ // - if descendant is found, remove it from the list, and update the current place's
+ // capture information to account for the descendant's capture kind.
+ //
+ // We can never be in a case where the list contains both an ancestor and a descendant
+ // Also there can only be ancestor but in case of descendants there might be
+ // multiple.
+
+ let mut descendant_found = false;
+ let mut updated_capture_info = capture_info;
+ min_cap_list.retain(|possible_descendant| {
+ match determine_place_ancestry_relation(&place, &possible_descendant.place) {
+ // current place is ancestor of possible_descendant
+ PlaceAncestryRelation::Ancestor => {
+ descendant_found = true;
+
+ let mut possible_descendant = possible_descendant.clone();
+ let backup_path_expr_id = updated_capture_info.path_expr_id;
+
+ // Truncate the descendant (already in min_captures) to be same as the ancestor to handle any
+ // possible change in capture mode.
+ truncate_place_to_len_and_update_capture_kind(
+ &mut possible_descendant.place,
+ &mut possible_descendant.info.capture_kind,
+ place.projections.len(),
+ );
+
+ updated_capture_info =
+ determine_capture_info(updated_capture_info, possible_descendant.info);
+
+ // we need to keep the ancestor's `path_expr_id`
+ updated_capture_info.path_expr_id = backup_path_expr_id;
+ false
+ }
+
+ _ => true,
+ }
+ });
+
+ let mut ancestor_found = false;
+ if !descendant_found {
+ for possible_ancestor in min_cap_list.iter_mut() {
+ match determine_place_ancestry_relation(&place, &possible_ancestor.place) {
+ PlaceAncestryRelation::SamePlace => {
+ ancestor_found = true;
+ possible_ancestor.info = determine_capture_info(
+ possible_ancestor.info,
+ updated_capture_info,
+ );
+
+ // Only one related place will be in the list.
+ break;
+ }
+ // current place is descendant of possible_ancestor
+ PlaceAncestryRelation::Descendant => {
+ ancestor_found = true;
+ let backup_path_expr_id = possible_ancestor.info.path_expr_id;
+
+ // Truncate the descendant (current place) to be same as the ancestor to handle any
+ // possible change in capture mode.
+ truncate_place_to_len_and_update_capture_kind(
+ &mut place,
+ &mut updated_capture_info.capture_kind,
+ possible_ancestor.place.projections.len(),
+ );
+
+ possible_ancestor.info = determine_capture_info(
+ possible_ancestor.info,
+ updated_capture_info,
+ );
+
+ // we need to keep the ancestor's `path_expr_id`
+ possible_ancestor.info.path_expr_id = backup_path_expr_id;
+
+ // Only one related place will be in the list.
+ break;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Only need to insert when we don't have an ancestor in the existing min capture list
+ if !ancestor_found {
+ let mutability = self.determine_capture_mutability(&typeck_results, &place);
+ let captured_place = ty::CapturedPlace {
+ place,
+ info: updated_capture_info,
+ mutability,
+ region: None,
+ };
+ min_cap_list.push(captured_place);
+ }
+ }
+
+ // For each capture that is determined to be captured by ref, add region info.
+ for (_, captures) in &mut root_var_min_capture_list {
+ for capture in captures {
+ match capture.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => {
+ let PlaceBase::Upvar(upvar_id) = capture.place.base else { bug!("expected upvar") };
+ let origin = UpvarRegion(upvar_id, closure_span);
+ let upvar_region = self.next_region_var(origin);
+ capture.region = Some(upvar_region);
+ }
+ _ => (),
+ }
+ }
+ }
+
+ debug!(
+ "For closure={:?}, min_captures before sorting={:?}",
+ closure_def_id, root_var_min_capture_list
+ );
+
+ // Now that we have the minimized list of captures, sort the captures by field id.
+ // This causes the closure to capture the upvars in the same order as the fields are
+ // declared which is also the drop order. Thus, in situations where we capture all the
+ // fields of some type, the observable drop order will remain the same as it previously
+ // was even though we're dropping each capture individually.
+ // See https://github.com/rust-lang/project-rfc-2229/issues/42 and
+ // `src/test/ui/closures/2229_closure_analysis/preserve_field_drop_order.rs`.
+ for (_, captures) in &mut root_var_min_capture_list {
+ captures.sort_by(|capture1, capture2| {
+ for (p1, p2) in capture1.place.projections.iter().zip(&capture2.place.projections) {
+ // We do not need to look at the `Projection.ty` fields here because at each
+ // step of the iteration, the projections will either be the same and therefore
+ // the types must be as well or the current projection will be different and
+ // we will return the result of comparing the field indexes.
+ match (p1.kind, p2.kind) {
+ // Paths are the same, continue to next loop.
+ (ProjectionKind::Deref, ProjectionKind::Deref) => {}
+ (ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _))
+ if i1 == i2 => {}
+
+ // Fields are different, compare them.
+ (ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _)) => {
+ return i1.cmp(&i2);
+ }
+
+ // We should have either a pair of `Deref`s or a pair of `Field`s.
+ // Anything else is a bug.
+ (
+ l @ (ProjectionKind::Deref | ProjectionKind::Field(..)),
+ r @ (ProjectionKind::Deref | ProjectionKind::Field(..)),
+ ) => bug!(
+ "ProjectionKinds Deref and Field were mismatched: ({:?}, {:?})",
+ l,
+ r
+ ),
+ (
+ l @ (ProjectionKind::Index
+ | ProjectionKind::Subslice
+ | ProjectionKind::Deref
+ | ProjectionKind::Field(..)),
+ r @ (ProjectionKind::Index
+ | ProjectionKind::Subslice
+ | ProjectionKind::Deref
+ | ProjectionKind::Field(..)),
+ ) => bug!(
+ "ProjectionKinds Index or Subslice were unexpected: ({:?}, {:?})",
+ l,
+ r
+ ),
+ }
+ }
+
+ unreachable!(
+ "we captured two identical projections: capture1 = {:?}, capture2 = {:?}",
+ capture1, capture2
+ );
+ });
+ }
+
+ debug!(
+ "For closure={:?}, min_captures after sorting={:#?}",
+ closure_def_id, root_var_min_capture_list
+ );
+ typeck_results.closure_min_captures.insert(closure_def_id, root_var_min_capture_list);
+ }
+
+ /// Perform the migration analysis for RFC 2229, and emit lint
+ /// `disjoint_capture_drop_reorder` if needed.
+ fn perform_2229_migration_anaysis(
+ &self,
+ closure_def_id: LocalDefId,
+ body_id: hir::BodyId,
+ capture_clause: hir::CaptureBy,
+ span: Span,
+ ) {
+ let (need_migrations, reasons) = self.compute_2229_migrations(
+ closure_def_id,
+ span,
+ capture_clause,
+ self.typeck_results.borrow().closure_min_captures.get(&closure_def_id),
+ );
+
+ if !need_migrations.is_empty() {
+ let (migration_string, migrated_variables_concat) =
+ migration_suggestion_for_2229(self.tcx, &need_migrations);
+
+ let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id);
+ let closure_head_span = self.tcx.def_span(closure_def_id);
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
+ closure_hir_id,
+ closure_head_span,
+ reasons.migration_message(),
+ |lint| {
+ for NeededMigration { var_hir_id, diagnostics_info } in &need_migrations {
+ // Labels all the usage of the captured variable and why they are responsible
+ // for migration being needed
+ for lint_note in diagnostics_info.iter() {
+ match &lint_note.captures_info {
+ UpvarMigrationInfo::CapturingPrecise { source_expr: Some(capture_expr_id), var_name: captured_name } => {
+ let cause_span = self.tcx.hir().span(*capture_expr_id);
+ lint.span_label(cause_span, format!("in Rust 2018, this closure captures all of `{}`, but in Rust 2021, it will only capture `{}`",
+ self.tcx.hir().name(*var_hir_id),
+ captured_name,
+ ));
+ }
+ UpvarMigrationInfo::CapturingNothing { use_span } => {
+ lint.span_label(*use_span, format!("in Rust 2018, this causes the closure to capture `{}`, but in Rust 2021, it has no effect",
+ self.tcx.hir().name(*var_hir_id),
+ ));
+ }
+
+ _ => { }
+ }
+
+ // Add a label pointing to where a captured variable affected by drop order
+ // is dropped
+ if lint_note.reason.drop_order {
+ let drop_location_span = drop_location_span(self.tcx, closure_hir_id);
+
+ match &lint_note.captures_info {
+ UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => {
+ lint.span_label(drop_location_span, format!("in Rust 2018, `{}` is dropped here, but in Rust 2021, only `{}` will be dropped here as part of the closure",
+ self.tcx.hir().name(*var_hir_id),
+ captured_name,
+ ));
+ }
+ UpvarMigrationInfo::CapturingNothing { use_span: _ } => {
+ lint.span_label(drop_location_span, format!("in Rust 2018, `{v}` is dropped here along with the closure, but in Rust 2021 `{v}` is not part of the closure",
+ v = self.tcx.hir().name(*var_hir_id),
+ ));
+ }
+ }
+ }
+
+ // Add a label explaining why a closure no longer implements a trait
+ for &missing_trait in &lint_note.reason.auto_traits {
+ // not capturing something anymore cannot cause a trait to fail to be implemented:
+ match &lint_note.captures_info {
+ UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => {
+ let var_name = self.tcx.hir().name(*var_hir_id);
+ lint.span_label(closure_head_span, format!("\
+ in Rust 2018, this closure implements {missing_trait} \
+ as `{var_name}` implements {missing_trait}, but in Rust 2021, \
+ this closure will no longer implement {missing_trait} \
+ because `{var_name}` is not fully captured \
+ and `{captured_name}` does not implement {missing_trait}"));
+ }
+
+ // Cannot happen: if we don't capture a variable, we impl strictly more traits
+ UpvarMigrationInfo::CapturingNothing { use_span } => span_bug!(*use_span, "missing trait from not capturing something"),
+ }
+ }
+ }
+ }
+ lint.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
+
+ let diagnostic_msg = format!(
+ "add a dummy let to cause {} to be fully captured",
+ migrated_variables_concat
+ );
+
+ let closure_span = self.tcx.hir().span_with_body(closure_hir_id);
+ let mut closure_body_span = {
+ // If the body was entirely expanded from a macro
+ // invocation, i.e. the body is not contained inside the
+ // closure span, then we walk up the expansion until we
+ // find the span before the expansion.
+ let s = self.tcx.hir().span_with_body(body_id.hir_id);
+ s.find_ancestor_inside(closure_span).unwrap_or(s)
+ };
+
+ if let Ok(mut s) = self.tcx.sess.source_map().span_to_snippet(closure_body_span) {
+ if s.starts_with('$') {
+ // Looks like a macro fragment. Try to find the real block.
+ if let Some(hir::Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Block(block, ..), ..
+ })) = self.tcx.hir().find(body_id.hir_id) {
+ // If the body is a block (with `{..}`), we use the span of that block.
+ // E.g. with a `|| $body` expanded from a `m!({ .. })`, we use `{ .. }`, and not `$body`.
+ // Since we know it's a block, we know we can insert the `let _ = ..` without
+ // breaking the macro syntax.
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(block.span) {
+ closure_body_span = block.span;
+ s = snippet;
+ }
+ }
+ }
+
+ let mut lines = s.lines();
+ let line1 = lines.next().unwrap_or_default();
+
+ if line1.trim_end() == "{" {
+ // This is a multi-line closure with just a `{` on the first line,
+ // so we put the `let` on its own line.
+ // We take the indentation from the next non-empty line.
+ let line2 = lines.find(|line| !line.is_empty()).unwrap_or_default();
+ let indent = line2.split_once(|c: char| !c.is_whitespace()).unwrap_or_default().0;
+ lint.span_suggestion(
+ closure_body_span.with_lo(closure_body_span.lo() + BytePos::from_usize(line1.len())).shrink_to_lo(),
+ &diagnostic_msg,
+ format!("\n{indent}{migration_string};"),
+ Applicability::MachineApplicable,
+ );
+ } else if line1.starts_with('{') {
+ // This is a closure with its body wrapped in
+ // braces, but with more than just the opening
+ // brace on the first line. We put the `let`
+ // directly after the `{`.
+ lint.span_suggestion(
+ closure_body_span.with_lo(closure_body_span.lo() + BytePos(1)).shrink_to_lo(),
+ &diagnostic_msg,
+ format!(" {migration_string};"),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // This is a closure without braces around the body.
+ // We add braces to add the `let` before the body.
+ lint.multipart_suggestion(
+ &diagnostic_msg,
+ vec![
+ (closure_body_span.shrink_to_lo(), format!("{{ {migration_string}; ")),
+ (closure_body_span.shrink_to_hi(), " }".to_string()),
+ ],
+ Applicability::MachineApplicable
+ );
+ }
+ } else {
+ lint.span_suggestion(
+ closure_span,
+ &diagnostic_msg,
+ migration_string,
+ Applicability::HasPlaceholders
+ );
+ }
+
+ lint
+ },
+ );
+ }
+ }
+
+ /// Combines all the reasons for 2229 migrations
+ fn compute_2229_migrations_reasons(
+ &self,
+ auto_trait_reasons: FxHashSet<&'static str>,
+ drop_order: bool,
+ ) -> MigrationWarningReason {
+ let mut reasons = MigrationWarningReason::default();
+
+ reasons.auto_traits.extend(auto_trait_reasons);
+ reasons.drop_order = drop_order;
+
+ // `auto_trait_reasons` are in hashset order, so sort them to put the
+ // diagnostics we emit later in a cross-platform-consistent order.
+ reasons.auto_traits.sort_unstable();
+
+ reasons
+ }
+
+ /// Figures out the list of root variables (and their types) that aren't completely
+ /// captured by the closure when `capture_disjoint_fields` is enabled and auto-traits
+ /// differ between the root variable and the captured paths.
+ ///
+ /// Returns a tuple containing a HashMap of CapturesInfo that maps to a HashSet of trait names
+ /// if migration is needed for traits for the provided var_hir_id, otherwise returns None
+ fn compute_2229_migrations_for_trait(
+ &self,
+ min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
+ var_hir_id: hir::HirId,
+ closure_clause: hir::CaptureBy,
+ ) -> Option<FxHashMap<UpvarMigrationInfo, FxHashSet<&'static str>>> {
+ let auto_traits_def_id = vec![
+ self.tcx.lang_items().clone_trait(),
+ self.tcx.lang_items().sync_trait(),
+ self.tcx.get_diagnostic_item(sym::Send),
+ self.tcx.lang_items().unpin_trait(),
+ self.tcx.get_diagnostic_item(sym::unwind_safe_trait),
+ self.tcx.get_diagnostic_item(sym::ref_unwind_safe_trait),
+ ];
+ const AUTO_TRAITS: [&str; 6] =
+ ["`Clone`", "`Sync`", "`Send`", "`Unpin`", "`UnwindSafe`", "`RefUnwindSafe`"];
+
+ let root_var_min_capture_list = min_captures.and_then(|m| m.get(&var_hir_id))?;
+
+ let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id));
+
+ let ty = match closure_clause {
+ hir::CaptureBy::Value => ty, // For move closure the capture kind should be by value
+ hir::CaptureBy::Ref => {
+ // For non move closure the capture kind is the max capture kind of all captures
+ // according to the ordering ImmBorrow < UniqueImmBorrow < MutBorrow < ByValue
+ let mut max_capture_info = root_var_min_capture_list.first().unwrap().info;
+ for capture in root_var_min_capture_list.iter() {
+ max_capture_info = determine_capture_info(max_capture_info, capture.info);
+ }
+
+ apply_capture_kind_on_capture_ty(
+ self.tcx,
+ ty,
+ max_capture_info.capture_kind,
+ Some(self.tcx.lifetimes.re_erased),
+ )
+ }
+ };
+
+ let mut obligations_should_hold = Vec::new();
+ // Checks if a root variable implements any of the auto traits
+ for check_trait in auto_traits_def_id.iter() {
+ obligations_should_hold.push(
+ check_trait
+ .map(|check_trait| {
+ self.infcx
+ .type_implements_trait(
+ check_trait,
+ ty,
+ self.tcx.mk_substs_trait(ty, &[]),
+ self.param_env,
+ )
+ .must_apply_modulo_regions()
+ })
+ .unwrap_or(false),
+ );
+ }
+
+ let mut problematic_captures = FxHashMap::default();
+ // Check whether captured fields also implement the trait
+ for capture in root_var_min_capture_list.iter() {
+ let ty = apply_capture_kind_on_capture_ty(
+ self.tcx,
+ capture.place.ty(),
+ capture.info.capture_kind,
+ Some(self.tcx.lifetimes.re_erased),
+ );
+
+ // Checks if a capture implements any of the auto traits
+ let mut obligations_holds_for_capture = Vec::new();
+ for check_trait in auto_traits_def_id.iter() {
+ obligations_holds_for_capture.push(
+ check_trait
+ .map(|check_trait| {
+ self.infcx
+ .type_implements_trait(
+ check_trait,
+ ty,
+ self.tcx.mk_substs_trait(ty, &[]),
+ self.param_env,
+ )
+ .must_apply_modulo_regions()
+ })
+ .unwrap_or(false),
+ );
+ }
+
+ let mut capture_problems = FxHashSet::default();
+
+ // Checks if for any of the auto traits, one or more trait is implemented
+ // by the root variable but not by the capture
+ for (idx, _) in obligations_should_hold.iter().enumerate() {
+ if !obligations_holds_for_capture[idx] && obligations_should_hold[idx] {
+ capture_problems.insert(AUTO_TRAITS[idx]);
+ }
+ }
+
+ if !capture_problems.is_empty() {
+ problematic_captures.insert(
+ UpvarMigrationInfo::CapturingPrecise {
+ source_expr: capture.info.path_expr_id,
+ var_name: capture.to_string(self.tcx),
+ },
+ capture_problems,
+ );
+ }
+ }
+ if !problematic_captures.is_empty() {
+ return Some(problematic_captures);
+ }
+ None
+ }
+
+ /// Figures out the list of root variables (and their types) that aren't completely
+ /// captured by the closure when `capture_disjoint_fields` is enabled and drop order of
+ /// some path starting at that root variable **might** be affected.
+ ///
+ /// The output list would include a root variable if:
+ /// - It would have been moved into the closure when `capture_disjoint_fields` wasn't
+ /// enabled, **and**
+ /// - It wasn't completely captured by the closure, **and**
+ /// - One of the paths starting at this root variable, that is not captured needs Drop.
+ ///
+ /// This function only returns a HashSet of CapturesInfo for significant drops. If there
+ /// are no significant drops than None is returned
+ #[instrument(level = "debug", skip(self))]
+ fn compute_2229_migrations_for_drop(
+ &self,
+ closure_def_id: LocalDefId,
+ closure_span: Span,
+ min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
+ closure_clause: hir::CaptureBy,
+ var_hir_id: hir::HirId,
+ ) -> Option<FxHashSet<UpvarMigrationInfo>> {
+ let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id));
+
+ if !ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id)) {
+ debug!("does not have significant drop");
+ return None;
+ }
+
+ let Some(root_var_min_capture_list) = min_captures.and_then(|m| m.get(&var_hir_id)) else {
+ // The upvar is mentioned within the closure but no path starting from it is
+ // used. This occurs when you have (e.g.)
+ //
+ // ```
+ // let x = move || {
+ // let _ = y;
+ // });
+ // ```
+ debug!("no path starting from it is used");
+
+
+ match closure_clause {
+ // Only migrate if closure is a move closure
+ hir::CaptureBy::Value => {
+ let mut diagnostics_info = FxHashSet::default();
+ let upvars = self.tcx.upvars_mentioned(closure_def_id).expect("must be an upvar");
+ let upvar = upvars[&var_hir_id];
+ diagnostics_info.insert(UpvarMigrationInfo::CapturingNothing { use_span: upvar.span });
+ return Some(diagnostics_info);
+ }
+ hir::CaptureBy::Ref => {}
+ }
+
+ return None;
+ };
+ debug!(?root_var_min_capture_list);
+
+ let mut projections_list = Vec::new();
+ let mut diagnostics_info = FxHashSet::default();
+
+ for captured_place in root_var_min_capture_list.iter() {
+ match captured_place.info.capture_kind {
+ // Only care about captures that are moved into the closure
+ ty::UpvarCapture::ByValue => {
+ projections_list.push(captured_place.place.projections.as_slice());
+ diagnostics_info.insert(UpvarMigrationInfo::CapturingPrecise {
+ source_expr: captured_place.info.path_expr_id,
+ var_name: captured_place.to_string(self.tcx),
+ });
+ }
+ ty::UpvarCapture::ByRef(..) => {}
+ }
+ }
+
+ debug!(?projections_list);
+ debug!(?diagnostics_info);
+
+ let is_moved = !projections_list.is_empty();
+ debug!(?is_moved);
+
+ let is_not_completely_captured =
+ root_var_min_capture_list.iter().any(|capture| !capture.place.projections.is_empty());
+ debug!(?is_not_completely_captured);
+
+ if is_moved
+ && is_not_completely_captured
+ && self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ ty,
+ projections_list,
+ )
+ {
+ return Some(diagnostics_info);
+ }
+
+ None
+ }
+
+ /// Figures out the list of root variables (and their types) that aren't completely
+ /// captured by the closure when `capture_disjoint_fields` is enabled and either drop
+ /// order of some path starting at that root variable **might** be affected or auto-traits
+ /// differ between the root variable and the captured paths.
+ ///
+ /// The output list would include a root variable if:
+ /// - It would have been moved into the closure when `capture_disjoint_fields` wasn't
+ /// enabled, **and**
+ /// - It wasn't completely captured by the closure, **and**
+ /// - One of the paths starting at this root variable, that is not captured needs Drop **or**
+ /// - One of the paths captured does not implement all the auto-traits its root variable
+ /// implements.
+ ///
+ /// Returns a tuple containing a vector of MigrationDiagnosticInfo, as well as a String
+ /// containing the reason why root variables whose HirId is contained in the vector should
+ /// be captured
+ #[instrument(level = "debug", skip(self))]
+ fn compute_2229_migrations(
+ &self,
+ closure_def_id: LocalDefId,
+ closure_span: Span,
+ closure_clause: hir::CaptureBy,
+ min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
+ ) -> (Vec<NeededMigration>, MigrationWarningReason) {
+ let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) else {
+ return (Vec::new(), MigrationWarningReason::default());
+ };
+
+ let mut need_migrations = Vec::new();
+ let mut auto_trait_migration_reasons = FxHashSet::default();
+ let mut drop_migration_needed = false;
+
+ // Perform auto-trait analysis
+ for (&var_hir_id, _) in upvars.iter() {
+ let mut diagnostics_info = Vec::new();
+
+ let auto_trait_diagnostic = if let Some(diagnostics_info) =
+ self.compute_2229_migrations_for_trait(min_captures, var_hir_id, closure_clause)
+ {
+ diagnostics_info
+ } else {
+ FxHashMap::default()
+ };
+
+ let drop_reorder_diagnostic = if let Some(diagnostics_info) = self
+ .compute_2229_migrations_for_drop(
+ closure_def_id,
+ closure_span,
+ min_captures,
+ closure_clause,
+ var_hir_id,
+ ) {
+ drop_migration_needed = true;
+ diagnostics_info
+ } else {
+ FxHashSet::default()
+ };
+
+ // Combine all the captures responsible for needing migrations into one HashSet
+ let mut capture_diagnostic = drop_reorder_diagnostic.clone();
+ for key in auto_trait_diagnostic.keys() {
+ capture_diagnostic.insert(key.clone());
+ }
+
+ let mut capture_diagnostic = capture_diagnostic.into_iter().collect::<Vec<_>>();
+ capture_diagnostic.sort();
+ for captures_info in capture_diagnostic {
+ // Get the auto trait reasons of why migration is needed because of that capture, if there are any
+ let capture_trait_reasons =
+ if let Some(reasons) = auto_trait_diagnostic.get(&captures_info) {
+ reasons.clone()
+ } else {
+ FxHashSet::default()
+ };
+
+ // Check if migration is needed because of drop reorder as a result of that capture
+ let capture_drop_reorder_reason = drop_reorder_diagnostic.contains(&captures_info);
+
+ // Combine all the reasons of why the root variable should be captured as a result of
+ // auto trait implementation issues
+ auto_trait_migration_reasons.extend(capture_trait_reasons.iter().copied());
+
+ diagnostics_info.push(MigrationLintNote {
+ captures_info,
+ reason: self.compute_2229_migrations_reasons(
+ capture_trait_reasons,
+ capture_drop_reorder_reason,
+ ),
+ });
+ }
+
+ if !diagnostics_info.is_empty() {
+ need_migrations.push(NeededMigration { var_hir_id, diagnostics_info });
+ }
+ }
+ (
+ need_migrations,
+ self.compute_2229_migrations_reasons(
+ auto_trait_migration_reasons,
+ drop_migration_needed,
+ ),
+ )
+ }
+
+ /// This is a helper function to `compute_2229_migrations_precise_pass`. Provided the type
+ /// of a root variable and a list of captured paths starting at this root variable (expressed
+ /// using list of `Projection` slices), it returns true if there is a path that is not
+ /// captured starting at this root variable that implements Drop.
+ ///
+ /// The way this function works is at a given call it looks at type `base_path_ty` of some base
+ /// path say P and then list of projection slices which represent the different captures moved
+ /// into the closure starting off of P.
+ ///
+ /// This will make more sense with an example:
+ ///
+ /// ```rust
+ /// #![feature(capture_disjoint_fields)]
+ ///
+ /// struct FancyInteger(i32); // This implements Drop
+ ///
+ /// struct Point { x: FancyInteger, y: FancyInteger }
+ /// struct Color;
+ ///
+ /// struct Wrapper { p: Point, c: Color }
+ ///
+ /// fn f(w: Wrapper) {
+ /// let c = || {
+ /// // Closure captures w.p.x and w.c by move.
+ /// };
+ ///
+ /// c();
+ /// }
+ /// ```
+ ///
+ /// If `capture_disjoint_fields` wasn't enabled the closure would've moved `w` instead of the
+ /// precise paths. If we look closely `w.p.y` isn't captured which implements Drop and
+ /// therefore Drop ordering would change and we want this function to return true.
+ ///
+ /// Call stack to figure out if we need to migrate for `w` would look as follows:
+ ///
+ /// Our initial base path is just `w`, and the paths captured from it are `w[p, x]` and
+ /// `w[c]`.
+ /// Notation:
+ /// - Ty(place): Type of place
+ /// - `(a, b)`: Represents the function parameters `base_path_ty` and `captured_by_move_projs`
+ /// respectively.
+ /// ```ignore (illustrative)
+ /// (Ty(w), [ &[p, x], &[c] ])
+ /// // |
+ /// // ----------------------------
+ /// // | |
+ /// // v v
+ /// (Ty(w.p), [ &[x] ]) (Ty(w.c), [ &[] ]) // I(1)
+ /// // | |
+ /// // v v
+ /// (Ty(w.p), [ &[x] ]) false
+ /// // |
+ /// // |
+ /// // -------------------------------
+ /// // | |
+ /// // v v
+ /// (Ty((w.p).x), [ &[] ]) (Ty((w.p).y), []) // IMP 2
+ /// // | |
+ /// // v v
+ /// false NeedsSignificantDrop(Ty(w.p.y))
+ /// // |
+ /// // v
+ /// true
+ /// ```
+ ///
+ /// IMP 1 `(Ty(w.c), [ &[] ])`: Notice the single empty slice inside `captured_projs`.
+ /// This implies that the `w.c` is completely captured by the closure.
+ /// Since drop for this path will be called when the closure is
+ /// dropped we don't need to migrate for it.
+ ///
+ /// IMP 2 `(Ty((w.p).y), [])`: Notice that `captured_projs` is empty. This implies that this
+ /// path wasn't captured by the closure. Also note that even
+ /// though we didn't capture this path, the function visits it,
+ /// which is kind of the point of this function. We then return
+ /// if the type of `w.p.y` implements Drop, which in this case is
+ /// true.
+ ///
+ /// Consider another example:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// struct X;
+ /// impl Drop for X {}
+ ///
+ /// struct Y(X);
+ /// impl Drop for Y {}
+ ///
+ /// fn foo() {
+ /// let y = Y(X);
+ /// let c = || move(y.0);
+ /// }
+ /// ```
+ ///
+ /// Note that `y.0` is captured by the closure. When this function is called for `y`, it will
+ /// return true, because even though all paths starting at `y` are captured, `y` itself
+ /// implements Drop which will be affected since `y` isn't completely captured.
+ fn has_significant_drop_outside_of_captures(
+ &self,
+ closure_def_id: LocalDefId,
+ closure_span: Span,
+ base_path_ty: Ty<'tcx>,
+ captured_by_move_projs: Vec<&[Projection<'tcx>]>,
+ ) -> bool {
+ let needs_drop =
+ |ty: Ty<'tcx>| ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id));
+
+ let is_drop_defined_for_ty = |ty: Ty<'tcx>| {
+ let drop_trait = self.tcx.require_lang_item(hir::LangItem::Drop, Some(closure_span));
+ let ty_params = self.tcx.mk_substs_trait(base_path_ty, &[]);
+ self.infcx
+ .type_implements_trait(
+ drop_trait,
+ ty,
+ ty_params,
+ self.tcx.param_env(closure_def_id),
+ )
+ .must_apply_modulo_regions()
+ };
+
+ let is_drop_defined_for_ty = is_drop_defined_for_ty(base_path_ty);
+
+ // If there is a case where no projection is applied on top of current place
+ // then there must be exactly one capture corresponding to such a case. Note that this
+ // represents the case of the path being completely captured by the variable.
+ //
+ // eg. If `a.b` is captured and we are processing `a.b`, then we can't have the closure also
+ // capture `a.b.c`, because that violates min capture.
+ let is_completely_captured = captured_by_move_projs.iter().any(|projs| projs.is_empty());
+
+ assert!(!is_completely_captured || (captured_by_move_projs.len() == 1));
+
+ if is_completely_captured {
+ // The place is captured entirely, so doesn't matter if needs dtor, it will be drop
+ // when the closure is dropped.
+ return false;
+ }
+
+ if captured_by_move_projs.is_empty() {
+ return needs_drop(base_path_ty);
+ }
+
+ if is_drop_defined_for_ty {
+ // If drop is implemented for this type then we need it to be fully captured,
+ // and we know it is not completely captured because of the previous checks.
+
+ // Note that this is a bug in the user code that will be reported by the
+ // borrow checker, since we can't move out of drop types.
+
+ // The bug exists in the user's code pre-migration, and we don't migrate here.
+ return false;
+ }
+
+ match base_path_ty.kind() {
+ // Observations:
+ // - `captured_by_move_projs` is not empty. Therefore we can call
+ // `captured_by_move_projs.first().unwrap()` safely.
+ // - All entries in `captured_by_move_projs` have at least one projection.
+ // Therefore we can call `captured_by_move_projs.first().unwrap().first().unwrap()` safely.
+
+ // We don't capture derefs in case of move captures, which would have be applied to
+ // access any further paths.
+ ty::Adt(def, _) if def.is_box() => unreachable!(),
+ ty::Ref(..) => unreachable!(),
+ ty::RawPtr(..) => unreachable!(),
+
+ ty::Adt(def, substs) => {
+ // Multi-variant enums are captured in entirety,
+ // which would've been handled in the case of single empty slice in `captured_by_move_projs`.
+ assert_eq!(def.variants().len(), 1);
+
+ // Only Field projections can be applied to a non-box Adt.
+ assert!(
+ captured_by_move_projs.iter().all(|projs| matches!(
+ projs.first().unwrap().kind,
+ ProjectionKind::Field(..)
+ ))
+ );
+ def.variants().get(VariantIdx::new(0)).unwrap().fields.iter().enumerate().any(
+ |(i, field)| {
+ let paths_using_field = captured_by_move_projs
+ .iter()
+ .filter_map(|projs| {
+ if let ProjectionKind::Field(field_idx, _) =
+ projs.first().unwrap().kind
+ {
+ if (field_idx as usize) == i { Some(&projs[1..]) } else { None }
+ } else {
+ unreachable!();
+ }
+ })
+ .collect();
+
+ let after_field_ty = field.ty(self.tcx, substs);
+ self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ after_field_ty,
+ paths_using_field,
+ )
+ },
+ )
+ }
+
+ ty::Tuple(fields) => {
+ // Only Field projections can be applied to a tuple.
+ assert!(
+ captured_by_move_projs.iter().all(|projs| matches!(
+ projs.first().unwrap().kind,
+ ProjectionKind::Field(..)
+ ))
+ );
+
+ fields.iter().enumerate().any(|(i, element_ty)| {
+ let paths_using_field = captured_by_move_projs
+ .iter()
+ .filter_map(|projs| {
+ if let ProjectionKind::Field(field_idx, _) = projs.first().unwrap().kind
+ {
+ if (field_idx as usize) == i { Some(&projs[1..]) } else { None }
+ } else {
+ unreachable!();
+ }
+ })
+ .collect();
+
+ self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ element_ty,
+ paths_using_field,
+ )
+ })
+ }
+
+ // Anything else would be completely captured and therefore handled already.
+ _ => unreachable!(),
+ }
+ }
+
+ fn init_capture_kind_for_place(
+ &self,
+ place: &Place<'tcx>,
+ capture_clause: hir::CaptureBy,
+ ) -> ty::UpvarCapture {
+ match capture_clause {
+ // In case of a move closure if the data is accessed through a reference we
+ // want to capture by ref to allow precise capture using reborrows.
+ //
+ // If the data will be moved out of this place, then the place will be truncated
+ // at the first Deref in `adjust_upvar_borrow_kind_for_consume` and then moved into
+ // the closure.
+ hir::CaptureBy::Value if !place.deref_tys().any(Ty::is_ref) => {
+ ty::UpvarCapture::ByValue
+ }
+ hir::CaptureBy::Value | hir::CaptureBy::Ref => ty::UpvarCapture::ByRef(ty::ImmBorrow),
+ }
+ }
+
+ fn place_for_root_variable(
+ &self,
+ closure_def_id: LocalDefId,
+ var_hir_id: hir::HirId,
+ ) -> Place<'tcx> {
+ let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id);
+
+ Place {
+ base_ty: self.node_ty(var_hir_id),
+ base: PlaceBase::Upvar(upvar_id),
+ projections: Default::default(),
+ }
+ }
+
+ fn should_log_capture_analysis(&self, closure_def_id: LocalDefId) -> bool {
+ self.tcx.has_attr(closure_def_id.to_def_id(), sym::rustc_capture_analysis)
+ }
+
+ fn log_capture_analysis_first_pass(
+ &self,
+ closure_def_id: LocalDefId,
+ capture_information: &InferredCaptureInformation<'tcx>,
+ closure_span: Span,
+ ) {
+ if self.should_log_capture_analysis(closure_def_id) {
+ let mut diag =
+ self.tcx.sess.struct_span_err(closure_span, "First Pass analysis includes:");
+ for (place, capture_info) in capture_information {
+ let capture_str = construct_capture_info_string(self.tcx, place, capture_info);
+ let output_str = format!("Capturing {capture_str}");
+
+ let span =
+ capture_info.path_expr_id.map_or(closure_span, |e| self.tcx.hir().span(e));
+ diag.span_note(span, &output_str);
+ }
+ diag.emit();
+ }
+ }
+
+ fn log_closure_min_capture_info(&self, closure_def_id: LocalDefId, closure_span: Span) {
+ if self.should_log_capture_analysis(closure_def_id) {
+ if let Some(min_captures) =
+ self.typeck_results.borrow().closure_min_captures.get(&closure_def_id)
+ {
+ let mut diag =
+ self.tcx.sess.struct_span_err(closure_span, "Min Capture analysis includes:");
+
+ for (_, min_captures_for_var) in min_captures {
+ for capture in min_captures_for_var {
+ let place = &capture.place;
+ let capture_info = &capture.info;
+
+ let capture_str =
+ construct_capture_info_string(self.tcx, place, capture_info);
+ let output_str = format!("Min Capture {capture_str}");
+
+ if capture.info.path_expr_id != capture.info.capture_kind_expr_id {
+ let path_span = capture_info
+ .path_expr_id
+ .map_or(closure_span, |e| self.tcx.hir().span(e));
+ let capture_kind_span = capture_info
+ .capture_kind_expr_id
+ .map_or(closure_span, |e| self.tcx.hir().span(e));
+
+ let mut multi_span: MultiSpan =
+ MultiSpan::from_spans(vec![path_span, capture_kind_span]);
+
+ let capture_kind_label =
+ construct_capture_kind_reason_string(self.tcx, place, capture_info);
+ let path_label = construct_path_string(self.tcx, place);
+
+ multi_span.push_span_label(path_span, path_label);
+ multi_span.push_span_label(capture_kind_span, capture_kind_label);
+
+ diag.span_note(multi_span, &output_str);
+ } else {
+ let span = capture_info
+ .path_expr_id
+ .map_or(closure_span, |e| self.tcx.hir().span(e));
+
+ diag.span_note(span, &output_str);
+ };
+ }
+ }
+ diag.emit();
+ }
+ }
+ }
+
+ /// A captured place is mutable if
+ /// 1. Projections don't include a Deref of an immut-borrow, **and**
+ /// 2. PlaceBase is mut or projections include a Deref of a mut-borrow.
+ fn determine_capture_mutability(
+ &self,
+ typeck_results: &'a TypeckResults<'tcx>,
+ place: &Place<'tcx>,
+ ) -> hir::Mutability {
+ let var_hir_id = match place.base {
+ PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ _ => unreachable!(),
+ };
+
+ let bm = *typeck_results.pat_binding_modes().get(var_hir_id).expect("missing binding mode");
+
+ let mut is_mutbl = match bm {
+ ty::BindByValue(mutability) => mutability,
+ ty::BindByReference(_) => hir::Mutability::Not,
+ };
+
+ for pointer_ty in place.deref_tys() {
+ match pointer_ty.kind() {
+ // We don't capture derefs of raw ptrs
+ ty::RawPtr(_) => unreachable!(),
+
+ // Dereferencing a mut-ref allows us to mut the Place if we don't deref
+ // an immut-ref after on top of this.
+ ty::Ref(.., hir::Mutability::Mut) => is_mutbl = hir::Mutability::Mut,
+
+ // The place isn't mutable once we dereference an immutable reference.
+ ty::Ref(.., hir::Mutability::Not) => return hir::Mutability::Not,
+
+ // Dereferencing a box doesn't change mutability
+ ty::Adt(def, ..) if def.is_box() => {}
+
+ unexpected_ty => bug!("deref of unexpected pointer type {:?}", unexpected_ty),
+ }
+ }
+
+ is_mutbl
+ }
+}
+
+/// Truncate the capture so that the place being borrowed is in accordance with RFC 1240,
+/// which states that it's unsafe to take a reference into a struct marked `repr(packed)`.
+fn restrict_repr_packed_field_ref_capture<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ mut place: Place<'tcx>,
+ mut curr_borrow_kind: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let pos = place.projections.iter().enumerate().position(|(i, p)| {
+ let ty = place.ty_before_projection(i);
+
+ // Return true for fields of packed structs, unless those fields have alignment 1.
+ match p.kind {
+ ProjectionKind::Field(..) => match ty.kind() {
+ ty::Adt(def, _) if def.repr().packed() => {
+ // We erase regions here because they cannot be hashed
+ match tcx.layout_of(param_env.and(tcx.erase_regions(p.ty))) {
+ Ok(layout) if layout.align.abi.bytes() == 1 => {
+ // if the alignment is 1, the type can't be further
+ // disaligned.
+ debug!(
+ "restrict_repr_packed_field_ref_capture: ({:?}) - align = 1",
+ place
+ );
+ false
+ }
+ _ => {
+ debug!("restrict_repr_packed_field_ref_capture: ({:?}) - true", place);
+ true
+ }
+ }
+ }
+
+ _ => false,
+ },
+ _ => false,
+ }
+ });
+
+ if let Some(pos) = pos {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_borrow_kind, pos);
+ }
+
+ (place, curr_borrow_kind)
+}
+
+/// Returns a Ty that applies the specified capture kind on the provided capture Ty
+fn apply_capture_kind_on_capture_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ capture_kind: UpvarCapture,
+ region: Option<ty::Region<'tcx>>,
+) -> Ty<'tcx> {
+ match capture_kind {
+ ty::UpvarCapture::ByValue => ty,
+ ty::UpvarCapture::ByRef(kind) => {
+ tcx.mk_ref(region.unwrap(), ty::TypeAndMut { ty: ty, mutbl: kind.to_mutbl_lossy() })
+ }
+ }
+}
+
+/// Returns the Span of where the value with the provided HirId would be dropped
+fn drop_location_span<'tcx>(tcx: TyCtxt<'tcx>, hir_id: hir::HirId) -> Span {
+ let owner_id = tcx.hir().get_enclosing_scope(hir_id).unwrap();
+
+ let owner_node = tcx.hir().get(owner_id);
+ let owner_span = match owner_node {
+ hir::Node::Item(item) => match item.kind {
+ hir::ItemKind::Fn(_, _, owner_id) => tcx.hir().span(owner_id.hir_id),
+ _ => {
+ bug!("Drop location span error: need to handle more ItemKind '{:?}'", item.kind);
+ }
+ },
+ hir::Node::Block(block) => tcx.hir().span(block.hir_id),
+ hir::Node::TraitItem(item) => tcx.hir().span(item.hir_id()),
+ hir::Node::ImplItem(item) => tcx.hir().span(item.hir_id()),
+ _ => {
+ bug!("Drop location span error: need to handle more Node '{:?}'", owner_node);
+ }
+ };
+ tcx.sess.source_map().end_point(owner_span)
+}
+
+struct InferBorrowKind<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+
+ // The def-id of the closure whose kind and upvar accesses are being inferred.
+ closure_def_id: LocalDefId,
+
+ /// For each Place that is captured by the closure, we track the minimal kind of
+ /// access we need (ref, ref mut, move, etc) and the expression that resulted in such access.
+ ///
+ /// Consider closure where s.str1 is captured via an ImmutableBorrow and
+ /// s.str2 via a MutableBorrow
+ ///
+ /// ```rust,no_run
+ /// struct SomeStruct { str1: String, str2: String };
+ ///
+ /// // Assume that the HirId for the variable definition is `V1`
+ /// let mut s = SomeStruct { str1: format!("s1"), str2: format!("s2") };
+ ///
+ /// let fix_s = |new_s2| {
+ /// // Assume that the HirId for the expression `s.str1` is `E1`
+ /// println!("Updating SomeStruct with str1={0}", s.str1);
+ /// // Assume that the HirId for the expression `*s.str2` is `E2`
+ /// s.str2 = new_s2;
+ /// };
+ /// ```
+ ///
+ /// For closure `fix_s`, (at a high level) the map contains
+ ///
+ /// ```ignore (illustrative)
+ /// Place { V1, [ProjectionKind::Field(Index=0, Variant=0)] } : CaptureKind { E1, ImmutableBorrow }
+ /// Place { V1, [ProjectionKind::Field(Index=1, Variant=0)] } : CaptureKind { E2, MutableBorrow }
+ /// ```
+ capture_information: InferredCaptureInformation<'tcx>,
+ fake_reads: Vec<(Place<'tcx>, FakeReadCause, hir::HirId)>,
+}
+
+impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
+ fn fake_read(
+ &mut self,
+ place: &PlaceWithHirId<'tcx>,
+ cause: FakeReadCause,
+ diag_expr_id: hir::HirId,
+ ) {
+ let PlaceBase::Upvar(_) = place.place.base else { return };
+
+ // We need to restrict Fake Read precision to avoid fake reading unsafe code,
+ // such as deref of a raw pointer.
+ let dummy_capture_kind = ty::UpvarCapture::ByRef(ty::BorrowKind::ImmBorrow);
+
+ let (place, _) = restrict_capture_precision(place.place.clone(), dummy_capture_kind);
+
+ let (place, _) = restrict_repr_packed_field_ref_capture(
+ self.fcx.tcx,
+ self.fcx.param_env,
+ place,
+ dummy_capture_kind,
+ );
+ self.fake_reads.push((place, cause, diag_expr_id));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return };
+ assert_eq!(self.closure_def_id, upvar_id.closure_expr_id);
+
+ self.capture_information.push((
+ place_with_id.place.clone(),
+ ty::CaptureInfo {
+ capture_kind_expr_id: Some(diag_expr_id),
+ path_expr_id: Some(diag_expr_id),
+ capture_kind: ty::UpvarCapture::ByValue,
+ },
+ ));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn borrow(
+ &mut self,
+ place_with_id: &PlaceWithHirId<'tcx>,
+ diag_expr_id: hir::HirId,
+ bk: ty::BorrowKind,
+ ) {
+ let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return };
+ assert_eq!(self.closure_def_id, upvar_id.closure_expr_id);
+
+ // The region here will get discarded/ignored
+ let capture_kind = ty::UpvarCapture::ByRef(bk);
+
+ // We only want repr packed restriction to be applied to reading references into a packed
+ // struct, and not when the data is being moved. Therefore we call this method here instead
+ // of in `restrict_capture_precision`.
+ let (place, mut capture_kind) = restrict_repr_packed_field_ref_capture(
+ self.fcx.tcx,
+ self.fcx.param_env,
+ place_with_id.place.clone(),
+ capture_kind,
+ );
+
+ // Raw pointers don't inherit mutability
+ if place_with_id.place.deref_tys().any(Ty::is_unsafe_ptr) {
+ capture_kind = ty::UpvarCapture::ByRef(ty::BorrowKind::ImmBorrow);
+ }
+
+ self.capture_information.push((
+ place,
+ ty::CaptureInfo {
+ capture_kind_expr_id: Some(diag_expr_id),
+ path_expr_id: Some(diag_expr_id),
+ capture_kind,
+ },
+ ));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ self.borrow(assignee_place, diag_expr_id, ty::BorrowKind::MutBorrow);
+ }
+}
+
+/// Rust doesn't permit moving fields out of a type that implements drop
+fn restrict_precision_for_drop_types<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ mut place: Place<'tcx>,
+ mut curr_mode: ty::UpvarCapture,
+ span: Span,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let is_copy_type = fcx.infcx.type_is_copy_modulo_regions(fcx.param_env, place.ty(), span);
+
+ if let (false, UpvarCapture::ByValue) = (is_copy_type, curr_mode) {
+ for i in 0..place.projections.len() {
+ match place.ty_before_projection(i).kind() {
+ ty::Adt(def, _) if def.destructor(fcx.tcx).is_some() => {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i);
+ break;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ (place, curr_mode)
+}
+
+/// Truncate `place` so that an `unsafe` block isn't required to capture it.
+/// - No projections are applied to raw pointers, since these require unsafe blocks. We capture
+/// them completely.
+/// - No projections are applied on top of Union ADTs, since these require unsafe blocks.
+fn restrict_precision_for_unsafe<'tcx>(
+ mut place: Place<'tcx>,
+ mut curr_mode: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ if place.base_ty.is_unsafe_ptr() {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, 0);
+ }
+
+ if place.base_ty.is_union() {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, 0);
+ }
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ if proj.ty.is_unsafe_ptr() {
+ // Don't apply any projections on top of an unsafe ptr.
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1);
+ break;
+ }
+
+ if proj.ty.is_union() {
+ // Don't capture precise fields of a union.
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1);
+ break;
+ }
+ }
+
+ (place, curr_mode)
+}
+
+/// Truncate projections so that following rules are obeyed by the captured `place`:
+/// - No Index projections are captured, since arrays are captured completely.
+/// - No unsafe block is required to capture `place`
+/// Returns the truncated place and updated capture mode.
+fn restrict_capture_precision<'tcx>(
+ place: Place<'tcx>,
+ curr_mode: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let (mut place, mut curr_mode) = restrict_precision_for_unsafe(place, curr_mode);
+
+ if place.projections.is_empty() {
+ // Nothing to do here
+ return (place, curr_mode);
+ }
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ match proj.kind {
+ ProjectionKind::Index => {
+ // Arrays are completely captured, so we drop Index projections
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i);
+ return (place, curr_mode);
+ }
+ ProjectionKind::Deref => {}
+ ProjectionKind::Field(..) => {} // ignore
+ ProjectionKind::Subslice => {} // We never capture this
+ }
+ }
+
+ (place, curr_mode)
+}
+
+/// Truncate deref of any reference.
+fn adjust_for_move_closure<'tcx>(
+ mut place: Place<'tcx>,
+ mut kind: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let first_deref = place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref);
+
+ if let Some(idx) = first_deref {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx);
+ }
+
+ (place, ty::UpvarCapture::ByValue)
+}
+
+/// Adjust closure capture just that if taking ownership of data, only move data
+/// from enclosing stack frame.
+fn adjust_for_non_move_closure<'tcx>(
+ mut place: Place<'tcx>,
+ mut kind: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let contains_deref =
+ place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref);
+
+ match kind {
+ ty::UpvarCapture::ByValue => {
+ if let Some(idx) = contains_deref {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx);
+ }
+ }
+
+ ty::UpvarCapture::ByRef(..) => {}
+ }
+
+ (place, kind)
+}
+
+fn construct_place_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String {
+ let variable_name = match place.base {
+ PlaceBase::Upvar(upvar_id) => var_name(tcx, upvar_id.var_path.hir_id).to_string(),
+ _ => bug!("Capture_information should only contain upvars"),
+ };
+
+ let mut projections_str = String::new();
+ for (i, item) in place.projections.iter().enumerate() {
+ let proj = match item.kind {
+ ProjectionKind::Field(a, b) => format!("({:?}, {:?})", a, b),
+ ProjectionKind::Deref => String::from("Deref"),
+ ProjectionKind::Index => String::from("Index"),
+ ProjectionKind::Subslice => String::from("Subslice"),
+ };
+ if i != 0 {
+ projections_str.push(',');
+ }
+ projections_str.push_str(proj.as_str());
+ }
+
+ format!("{variable_name}[{projections_str}]")
+}
+
+fn construct_capture_kind_reason_string<'tcx>(
+ tcx: TyCtxt<'_>,
+ place: &Place<'tcx>,
+ capture_info: &ty::CaptureInfo,
+) -> String {
+ let place_str = construct_place_string(tcx, place);
+
+ let capture_kind_str = match capture_info.capture_kind {
+ ty::UpvarCapture::ByValue => "ByValue".into(),
+ ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind),
+ };
+
+ format!("{place_str} captured as {capture_kind_str} here")
+}
+
+fn construct_path_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String {
+ let place_str = construct_place_string(tcx, place);
+
+ format!("{place_str} used here")
+}
+
+fn construct_capture_info_string<'tcx>(
+ tcx: TyCtxt<'_>,
+ place: &Place<'tcx>,
+ capture_info: &ty::CaptureInfo,
+) -> String {
+ let place_str = construct_place_string(tcx, place);
+
+ let capture_kind_str = match capture_info.capture_kind {
+ ty::UpvarCapture::ByValue => "ByValue".into(),
+ ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind),
+ };
+ format!("{place_str} -> {capture_kind_str}")
+}
+
+fn var_name(tcx: TyCtxt<'_>, var_hir_id: hir::HirId) -> Symbol {
+ tcx.hir().name(var_hir_id)
+}
+
+#[instrument(level = "debug", skip(tcx))]
+fn should_do_rust_2021_incompatible_closure_captures_analysis(
+ tcx: TyCtxt<'_>,
+ closure_id: hir::HirId,
+) -> bool {
+ if tcx.sess.rust_2021() {
+ return false;
+ }
+
+ let (level, _) =
+ tcx.lint_level_at_node(lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES, closure_id);
+
+ !matches!(level, lint::Level::Allow)
+}
+
+/// Return a two string tuple (s1, s2)
+/// - s1: Line of code that is needed for the migration: eg: `let _ = (&x, ...)`.
+/// - s2: Comma separated names of the variables being migrated.
+fn migration_suggestion_for_2229(
+ tcx: TyCtxt<'_>,
+ need_migrations: &[NeededMigration],
+) -> (String, String) {
+ let need_migrations_variables = need_migrations
+ .iter()
+ .map(|NeededMigration { var_hir_id: v, .. }| var_name(tcx, *v))
+ .collect::<Vec<_>>();
+
+ let migration_ref_concat =
+ need_migrations_variables.iter().map(|v| format!("&{v}")).collect::<Vec<_>>().join(", ");
+
+ let migration_string = if 1 == need_migrations.len() {
+ format!("let _ = {migration_ref_concat}")
+ } else {
+ format!("let _ = ({migration_ref_concat})")
+ };
+
+ let migrated_variables_concat =
+ need_migrations_variables.iter().map(|v| format!("`{v}`")).collect::<Vec<_>>().join(", ");
+
+ (migration_string, migrated_variables_concat)
+}
+
+/// Helper function to determine if we need to escalate CaptureKind from
+/// CaptureInfo A to B and returns the escalated CaptureInfo.
+/// (Note: CaptureInfo contains CaptureKind and an expression that led to capture it in that way)
+///
+/// If both `CaptureKind`s are considered equivalent, then the CaptureInfo is selected based
+/// on the `CaptureInfo` containing an associated `capture_kind_expr_id`.
+///
+/// It is the caller's duty to figure out which path_expr_id to use.
+///
+/// If both the CaptureKind and Expression are considered to be equivalent,
+/// then `CaptureInfo` A is preferred. This can be useful in cases where we want to prioritize
+/// expressions reported back to the user as part of diagnostics based on which appears earlier
+/// in the closure. This can be achieved simply by calling
+/// `determine_capture_info(existing_info, current_info)`. This works out because the
+/// expressions that occur earlier in the closure body than the current expression are processed before.
+/// Consider the following example
+/// ```rust,no_run
+/// struct Point { x: i32, y: i32 }
+/// let mut p = Point { x: 10, y: 10 };
+///
+/// let c = || {
+/// p.x += 10;
+/// // ^ E1 ^
+/// // ...
+/// // More code
+/// // ...
+/// p.x += 10; // E2
+/// // ^ E2 ^
+/// };
+/// ```
+/// `CaptureKind` associated with both `E1` and `E2` will be ByRef(MutBorrow),
+/// and both have an expression associated, however for diagnostics we prefer reporting
+/// `E1` since it appears earlier in the closure body. When `E2` is being processed we
+/// would've already handled `E1`, and have an existing capture_information for it.
+/// Calling `determine_capture_info(existing_info_e1, current_info_e2)` will return
+/// `existing_info_e1` in this case, allowing us to point to `E1` in case of diagnostics.
+fn determine_capture_info(
+ capture_info_a: ty::CaptureInfo,
+ capture_info_b: ty::CaptureInfo,
+) -> ty::CaptureInfo {
+ // If the capture kind is equivalent then, we don't need to escalate and can compare the
+ // expressions.
+ let eq_capture_kind = match (capture_info_a.capture_kind, capture_info_b.capture_kind) {
+ (ty::UpvarCapture::ByValue, ty::UpvarCapture::ByValue) => true,
+ (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => ref_a == ref_b,
+ (ty::UpvarCapture::ByValue, _) | (ty::UpvarCapture::ByRef(_), _) => false,
+ };
+
+ if eq_capture_kind {
+ match (capture_info_a.capture_kind_expr_id, capture_info_b.capture_kind_expr_id) {
+ (Some(_), _) | (None, None) => capture_info_a,
+ (None, Some(_)) => capture_info_b,
+ }
+ } else {
+ // We select the CaptureKind which ranks higher based the following priority order:
+ // ByValue > MutBorrow > UniqueImmBorrow > ImmBorrow
+ match (capture_info_a.capture_kind, capture_info_b.capture_kind) {
+ (ty::UpvarCapture::ByValue, _) => capture_info_a,
+ (_, ty::UpvarCapture::ByValue) => capture_info_b,
+ (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => {
+ match (ref_a, ref_b) {
+ // Take LHS:
+ (ty::UniqueImmBorrow | ty::MutBorrow, ty::ImmBorrow)
+ | (ty::MutBorrow, ty::UniqueImmBorrow) => capture_info_a,
+
+ // Take RHS:
+ (ty::ImmBorrow, ty::UniqueImmBorrow | ty::MutBorrow)
+ | (ty::UniqueImmBorrow, ty::MutBorrow) => capture_info_b,
+
+ (ty::ImmBorrow, ty::ImmBorrow)
+ | (ty::UniqueImmBorrow, ty::UniqueImmBorrow)
+ | (ty::MutBorrow, ty::MutBorrow) => {
+ bug!("Expected unequal capture kinds");
+ }
+ }
+ }
+ }
+ }
+}
+
+/// Truncates `place` to have up to `len` projections.
+/// `curr_mode` is the current required capture kind for the place.
+/// Returns the truncated `place` and the updated required capture kind.
+///
+/// Note: Capture kind changes from `MutBorrow` to `UniqueImmBorrow` if the truncated part of the `place`
+/// contained `Deref` of `&mut`.
+fn truncate_place_to_len_and_update_capture_kind<'tcx>(
+ place: &mut Place<'tcx>,
+ curr_mode: &mut ty::UpvarCapture,
+ len: usize,
+) {
+ let is_mut_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Mut));
+
+ // If the truncated part of the place contains `Deref` of a `&mut` then convert MutBorrow ->
+ // UniqueImmBorrow
+ // Note that if the place contained Deref of a raw pointer it would've not been MutBorrow, so
+ // we don't need to worry about that case here.
+ match curr_mode {
+ ty::UpvarCapture::ByRef(ty::BorrowKind::MutBorrow) => {
+ for i in len..place.projections.len() {
+ if place.projections[i].kind == ProjectionKind::Deref
+ && is_mut_ref(place.ty_before_projection(i))
+ {
+ *curr_mode = ty::UpvarCapture::ByRef(ty::BorrowKind::UniqueImmBorrow);
+ break;
+ }
+ }
+ }
+
+ ty::UpvarCapture::ByRef(..) => {}
+ ty::UpvarCapture::ByValue => {}
+ }
+
+ place.projections.truncate(len);
+}
+
+/// Determines the Ancestry relationship of Place A relative to Place B
+///
+/// `PlaceAncestryRelation::Ancestor` implies Place A is ancestor of Place B
+/// `PlaceAncestryRelation::Descendant` implies Place A is descendant of Place B
+/// `PlaceAncestryRelation::Divergent` implies neither of them is the ancestor of the other.
+fn determine_place_ancestry_relation<'tcx>(
+ place_a: &Place<'tcx>,
+ place_b: &Place<'tcx>,
+) -> PlaceAncestryRelation {
+ // If Place A and Place B, don't start off from the same root variable, they are divergent.
+ if place_a.base != place_b.base {
+ return PlaceAncestryRelation::Divergent;
+ }
+
+ // Assume of length of projections_a = n
+ let projections_a = &place_a.projections;
+
+ // Assume of length of projections_b = m
+ let projections_b = &place_b.projections;
+
+ let same_initial_projections =
+ iter::zip(projections_a, projections_b).all(|(proj_a, proj_b)| proj_a.kind == proj_b.kind);
+
+ if same_initial_projections {
+ use std::cmp::Ordering;
+
+ // First min(n, m) projections are the same
+ // Select Ancestor/Descendant
+ match projections_b.len().cmp(&projections_a.len()) {
+ Ordering::Greater => PlaceAncestryRelation::Ancestor,
+ Ordering::Equal => PlaceAncestryRelation::SamePlace,
+ Ordering::Less => PlaceAncestryRelation::Descendant,
+ }
+ } else {
+ PlaceAncestryRelation::Divergent
+ }
+}
+
+/// Reduces the precision of the captured place when the precision doesn't yield any benefit from
+/// borrow checking perspective, allowing us to save us on the size of the capture.
+///
+///
+/// Fields that are read through a shared reference will always be read via a shared ref or a copy,
+/// and therefore capturing precise paths yields no benefit. This optimization truncates the
+/// rightmost deref of the capture if the deref is applied to a shared ref.
+///
+/// Reason we only drop the last deref is because of the following edge case:
+///
+/// ```
+/// # struct A { field_of_a: Box<i32> }
+/// # struct B {}
+/// # struct C<'a>(&'a i32);
+/// struct MyStruct<'a> {
+/// a: &'static A,
+/// b: B,
+/// c: C<'a>,
+/// }
+///
+/// fn foo<'a, 'b>(m: &'a MyStruct<'b>) -> impl FnMut() + 'static {
+/// || drop(&*m.a.field_of_a)
+/// // Here we really do want to capture `*m.a` because that outlives `'static`
+///
+/// // If we capture `m`, then the closure no longer outlives `'static'
+/// // it is constrained to `'a`
+/// }
+/// ```
+fn truncate_capture_for_optimization<'tcx>(
+ mut place: Place<'tcx>,
+ mut curr_mode: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let is_shared_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Not));
+
+ // Find the right-most deref (if any). All the projections that come after this
+ // are fields or other "in-place pointer adjustments"; these refer therefore to
+ // data owned by whatever pointer is being dereferenced here.
+ let idx = place.projections.iter().rposition(|proj| ProjectionKind::Deref == proj.kind);
+
+ match idx {
+ // If that pointer is a shared reference, then we don't need those fields.
+ Some(idx) if is_shared_ref(place.ty_before_projection(idx)) => {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, idx + 1)
+ }
+ None | Some(_) => {}
+ }
+
+ (place, curr_mode)
+}
+
+/// Precise capture is enabled if the feature gate `capture_disjoint_fields` is enabled or if
+/// user is using Rust Edition 2021 or higher.
+///
+/// `span` is the span of the closure.
+fn enable_precise_capture(tcx: TyCtxt<'_>, span: Span) -> bool {
+ // We use span here to ensure that if the closure was generated by a macro with a different
+ // edition.
+ tcx.features().capture_disjoint_fields || span.rust_2021()
+}
diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs
new file mode 100644
index 000000000..1e26daa9c
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/writeback.rs
@@ -0,0 +1,807 @@
+// Type resolution: the phase that finds all the types in the AST with
+// unresolved type variables and replaces "ty_var" types with their
+// substitutions.
+
+use crate::FnCtxt;
+use hir::def_id::LocalDefId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::hir::place::Place as HirPlace;
+use rustc_middle::mir::FakeReadCause;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable};
+use rustc_middle::ty::TypeckResults;
+use rustc_middle::ty::{self, ClosureSizeProfileData, Ty, TyCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+use std::mem;
+use std::ops::ControlFlow;
+
+///////////////////////////////////////////////////////////////////////////
+// Entry point
+
+// During type inference, partially inferred types are
+// represented using Type variables (ty::Infer). These don't appear in
+// the final TypeckResults since all of the types should have been
+// inferred once typeck is done.
+// When type inference is running however, having to update the typeck
+// typeck results every time a new type is inferred would be unreasonably slow,
+// so instead all of the replacement happens at the end in
+// resolve_type_vars_in_body, which creates a new TypeTables which
+// doesn't contain any inference types.
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn resolve_type_vars_in_body(
+ &self,
+ body: &'tcx hir::Body<'tcx>,
+ ) -> &'tcx ty::TypeckResults<'tcx> {
+ let item_id = self.tcx.hir().body_owner(body.id());
+ let item_def_id = self.tcx.hir().local_def_id(item_id);
+
+ // This attribute causes us to dump some writeback information
+ // in the form of errors, which is used for unit tests.
+ let rustc_dump_user_substs =
+ self.tcx.has_attr(item_def_id.to_def_id(), sym::rustc_dump_user_substs);
+
+ let mut wbcx = WritebackCx::new(self, body, rustc_dump_user_substs);
+ for param in body.params {
+ wbcx.visit_node_id(param.pat.span, param.hir_id);
+ }
+ // Type only exists for constants and statics, not functions.
+ match self.tcx.hir().body_owner_kind(item_def_id) {
+ hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => {
+ wbcx.visit_node_id(body.value.span, item_id);
+ }
+ hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => (),
+ }
+ wbcx.visit_body(body);
+ wbcx.visit_min_capture_map();
+ wbcx.eval_closure_size();
+ wbcx.visit_fake_reads_map();
+ wbcx.visit_closures();
+ wbcx.visit_liberated_fn_sigs();
+ wbcx.visit_fru_field_types();
+ wbcx.visit_opaque_types();
+ wbcx.visit_coercion_casts();
+ wbcx.visit_user_provided_tys();
+ wbcx.visit_user_provided_sigs();
+ wbcx.visit_generator_interior_types();
+
+ wbcx.typeck_results.rvalue_scopes =
+ mem::take(&mut self.typeck_results.borrow_mut().rvalue_scopes);
+
+ let used_trait_imports =
+ mem::take(&mut self.typeck_results.borrow_mut().used_trait_imports);
+ debug!("used_trait_imports({:?}) = {:?}", item_def_id, used_trait_imports);
+ wbcx.typeck_results.used_trait_imports = used_trait_imports;
+
+ wbcx.typeck_results.treat_byte_string_as_slice =
+ mem::take(&mut self.typeck_results.borrow_mut().treat_byte_string_as_slice);
+
+ if self.is_tainted_by_errors() {
+ // FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted.
+ wbcx.typeck_results.tainted_by_errors =
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ }
+
+ debug!("writeback: typeck results for {:?} are {:#?}", item_def_id, wbcx.typeck_results);
+
+ self.tcx.arena.alloc(wbcx.typeck_results)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// The Writeback context. This visitor walks the HIR, checking the
+// fn-specific typeck results to find references to types or regions. It
+// resolves those regions to remove inference variables and writes the
+// final result back into the master typeck results in the tcx. Here and
+// there, it applies a few ad-hoc checks that were not convenient to
+// do elsewhere.
+
+struct WritebackCx<'cx, 'tcx> {
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
+
+ typeck_results: ty::TypeckResults<'tcx>,
+
+ body: &'tcx hir::Body<'tcx>,
+
+ rustc_dump_user_substs: bool,
+}
+
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn new(
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
+ body: &'tcx hir::Body<'tcx>,
+ rustc_dump_user_substs: bool,
+ ) -> WritebackCx<'cx, 'tcx> {
+ let owner = body.id().hir_id.owner;
+
+ WritebackCx {
+ fcx,
+ typeck_results: ty::TypeckResults::new(owner),
+ body,
+ rustc_dump_user_substs,
+ }
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.fcx.tcx
+ }
+
+ fn write_ty_to_typeck_results(&mut self, hir_id: hir::HirId, ty: Ty<'tcx>) {
+ debug!("write_ty_to_typeck_results({:?}, {:?})", hir_id, ty);
+ assert!(!ty.needs_infer() && !ty.has_placeholders() && !ty.has_free_regions());
+ self.typeck_results.node_types_mut().insert(hir_id, ty);
+ }
+
+ // Hacky hack: During type-checking, we treat *all* operators
+ // as potentially overloaded. But then, during writeback, if
+ // we observe that something like `a+b` is (known to be)
+ // operating on scalars, we clear the overload.
+ fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr<'_>) {
+ match e.kind {
+ hir::ExprKind::Unary(hir::UnOp::Neg | hir::UnOp::Not, inner) => {
+ let inner_ty = self.fcx.node_ty(inner.hir_id);
+ let inner_ty = self.fcx.resolve_vars_if_possible(inner_ty);
+
+ if inner_ty.is_scalar() {
+ let mut typeck_results = self.fcx.typeck_results.borrow_mut();
+ typeck_results.type_dependent_defs_mut().remove(e.hir_id);
+ typeck_results.node_substs_mut().remove(e.hir_id);
+ }
+ }
+ hir::ExprKind::Binary(ref op, lhs, rhs) | hir::ExprKind::AssignOp(ref op, lhs, rhs) => {
+ let lhs_ty = self.fcx.node_ty(lhs.hir_id);
+ let lhs_ty = self.fcx.resolve_vars_if_possible(lhs_ty);
+
+ let rhs_ty = self.fcx.node_ty(rhs.hir_id);
+ let rhs_ty = self.fcx.resolve_vars_if_possible(rhs_ty);
+
+ if lhs_ty.is_scalar() && rhs_ty.is_scalar() {
+ let mut typeck_results = self.fcx.typeck_results.borrow_mut();
+ typeck_results.type_dependent_defs_mut().remove(e.hir_id);
+ typeck_results.node_substs_mut().remove(e.hir_id);
+
+ match e.kind {
+ hir::ExprKind::Binary(..) => {
+ if !op.node.is_by_value() {
+ let mut adjustments = typeck_results.adjustments_mut();
+ if let Some(a) = adjustments.get_mut(lhs.hir_id) {
+ a.pop();
+ }
+ if let Some(a) = adjustments.get_mut(rhs.hir_id) {
+ a.pop();
+ }
+ }
+ }
+ hir::ExprKind::AssignOp(..)
+ if let Some(a) = typeck_results.adjustments_mut().get_mut(lhs.hir_id) =>
+ {
+ a.pop();
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // (ouz-a 1005988): Normally `[T] : std::ops::Index<usize>` should be normalized
+ // into [T] but currently `Where` clause stops the normalization process for it,
+ // here we compare types of expr and base in a code without `Where` clause they would be equal
+ // if they are not we don't modify the expr, hence we bypass the ICE
+ fn is_builtin_index(
+ &mut self,
+ typeck_results: &TypeckResults<'tcx>,
+ e: &hir::Expr<'_>,
+ base_ty: Ty<'tcx>,
+ index_ty: Ty<'tcx>,
+ ) -> bool {
+ if let Some(elem_ty) = base_ty.builtin_index() {
+ let Some(exp_ty) = typeck_results.expr_ty_opt(e) else {return false;};
+ let resolved_exp_ty = self.resolve(exp_ty, &e.span);
+
+ elem_ty == resolved_exp_ty && index_ty == self.fcx.tcx.types.usize
+ } else {
+ false
+ }
+ }
+
+ // Similar to operators, indexing is always assumed to be overloaded
+ // Here, correct cases where an indexing expression can be simplified
+ // to use builtin indexing because the index type is known to be
+ // usize-ish
+ fn fix_index_builtin_expr(&mut self, e: &hir::Expr<'_>) {
+ if let hir::ExprKind::Index(ref base, ref index) = e.kind {
+ let mut typeck_results = self.fcx.typeck_results.borrow_mut();
+
+ // All valid indexing looks like this; might encounter non-valid indexes at this point.
+ let base_ty = typeck_results
+ .expr_ty_adjusted_opt(base)
+ .map(|t| self.fcx.resolve_vars_if_possible(t).kind());
+ if base_ty.is_none() {
+ // When encountering `return [0][0]` outside of a `fn` body we can encounter a base
+ // that isn't in the type table. We assume more relevant errors have already been
+ // emitted, so we delay an ICE if none have. (#64638)
+ self.tcx().sess.delay_span_bug(e.span, &format!("bad base: `{:?}`", base));
+ }
+ if let Some(ty::Ref(_, base_ty, _)) = base_ty {
+ let index_ty = typeck_results.expr_ty_adjusted_opt(index).unwrap_or_else(|| {
+ // When encountering `return [0][0]` outside of a `fn` body we would attempt
+ // to access an nonexistent index. We assume that more relevant errors will
+ // already have been emitted, so we only gate on this with an ICE if no
+ // error has been emitted. (#64638)
+ self.fcx.tcx.ty_error_with_message(
+ e.span,
+ &format!("bad index {:?} for base: `{:?}`", index, base),
+ )
+ });
+ let index_ty = self.fcx.resolve_vars_if_possible(index_ty);
+ let resolved_base_ty = self.resolve(*base_ty, &base.span);
+
+ if self.is_builtin_index(&typeck_results, e, resolved_base_ty, index_ty) {
+ // Remove the method call record
+ typeck_results.type_dependent_defs_mut().remove(e.hir_id);
+ typeck_results.node_substs_mut().remove(e.hir_id);
+
+ if let Some(a) = typeck_results.adjustments_mut().get_mut(base.hir_id) {
+ // Discard the need for a mutable borrow
+
+ // Extra adjustment made when indexing causes a drop
+ // of size information - we need to get rid of it
+ // Since this is "after" the other adjustment to be
+ // discarded, we do an extra `pop()`
+ if let Some(Adjustment {
+ kind: Adjust::Pointer(PointerCast::Unsize), ..
+ }) = a.pop()
+ {
+ // So the borrow discard actually happens here
+ a.pop();
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Impl of Visitor for Resolver
+//
+// This is the master code which walks the AST. It delegates most of
+// the heavy lifting to the generic visit and resolve functions
+// below. In general, a function is made into a `visitor` if it must
+// traffic in node-ids or update typeck results in the type context etc.
+
+impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> {
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ self.fix_scalar_builtin_expr(e);
+ self.fix_index_builtin_expr(e);
+
+ match e.kind {
+ hir::ExprKind::Closure(&hir::Closure { body, .. }) => {
+ let body = self.fcx.tcx.hir().body(body);
+ for param in body.params {
+ self.visit_node_id(e.span, param.hir_id);
+ }
+
+ self.visit_body(body);
+ }
+ hir::ExprKind::Struct(_, fields, _) => {
+ for field in fields {
+ self.visit_field_id(field.hir_id);
+ }
+ }
+ hir::ExprKind::Field(..) => {
+ self.visit_field_id(e.hir_id);
+ }
+ hir::ExprKind::ConstBlock(anon_const) => {
+ self.visit_node_id(e.span, anon_const.hir_id);
+
+ let body = self.tcx().hir().body(anon_const.body);
+ self.visit_body(body);
+ }
+ _ => {}
+ }
+
+ self.visit_node_id(e.span, e.hir_id);
+ intravisit::walk_expr(self, e);
+ }
+
+ fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam<'tcx>) {
+ match &p.kind {
+ hir::GenericParamKind::Lifetime { .. } => {
+ // Nothing to write back here
+ }
+ hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => {
+ self.tcx().sess.delay_span_bug(p.span, format!("unexpected generic param: {p:?}"));
+ }
+ }
+ }
+
+ fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) {
+ self.visit_node_id(b.span, b.hir_id);
+ intravisit::walk_block(self, b);
+ }
+
+ fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ match p.kind {
+ hir::PatKind::Binding(..) => {
+ let typeck_results = self.fcx.typeck_results.borrow();
+ if let Some(bm) =
+ typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span)
+ {
+ self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm);
+ }
+ }
+ hir::PatKind::Struct(_, fields, _) => {
+ for field in fields {
+ self.visit_field_id(field.hir_id);
+ }
+ }
+ _ => {}
+ };
+
+ self.visit_pat_adjustments(p.span, p.hir_id);
+
+ self.visit_node_id(p.span, p.hir_id);
+ intravisit::walk_pat(self, p);
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ intravisit::walk_local(self, l);
+ let var_ty = self.fcx.local_ty(l.span, l.hir_id).decl_ty;
+ let var_ty = self.resolve(var_ty, &l.span);
+ self.write_ty_to_typeck_results(l.hir_id, var_ty);
+ }
+
+ fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) {
+ intravisit::walk_ty(self, hir_ty);
+ let ty = self.fcx.node_ty(hir_ty.hir_id);
+ let ty = self.resolve(ty, &hir_ty.span);
+ self.write_ty_to_typeck_results(hir_ty.hir_id, ty);
+ }
+
+ fn visit_infer(&mut self, inf: &'tcx hir::InferArg) {
+ intravisit::walk_inf(self, inf);
+ // Ignore cases where the inference is a const.
+ if let Some(ty) = self.fcx.node_ty_opt(inf.hir_id) {
+ let ty = self.resolve(ty, &inf.span);
+ self.write_ty_to_typeck_results(inf.hir_id, ty);
+ }
+ }
+}
+
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn eval_closure_size(&mut self) {
+ let mut res: FxHashMap<LocalDefId, ClosureSizeProfileData<'tcx>> = Default::default();
+ for (&closure_def_id, data) in self.fcx.typeck_results.borrow().closure_size_eval.iter() {
+ let closure_hir_id = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
+
+ let data = self.resolve(*data, &closure_hir_id);
+
+ res.insert(closure_def_id, data);
+ }
+
+ self.typeck_results.closure_size_eval = res;
+ }
+ fn visit_min_capture_map(&mut self) {
+ let mut min_captures_wb = ty::MinCaptureInformationMap::with_capacity_and_hasher(
+ self.fcx.typeck_results.borrow().closure_min_captures.len(),
+ Default::default(),
+ );
+ for (&closure_def_id, root_min_captures) in
+ self.fcx.typeck_results.borrow().closure_min_captures.iter()
+ {
+ let mut root_var_map_wb = ty::RootVariableMinCaptureList::with_capacity_and_hasher(
+ root_min_captures.len(),
+ Default::default(),
+ );
+ for (var_hir_id, min_list) in root_min_captures.iter() {
+ let min_list_wb = min_list
+ .iter()
+ .map(|captured_place| {
+ let locatable = captured_place.info.path_expr_id.unwrap_or_else(|| {
+ self.tcx().hir().local_def_id_to_hir_id(closure_def_id)
+ });
+
+ self.resolve(captured_place.clone(), &locatable)
+ })
+ .collect();
+ root_var_map_wb.insert(*var_hir_id, min_list_wb);
+ }
+ min_captures_wb.insert(closure_def_id, root_var_map_wb);
+ }
+
+ self.typeck_results.closure_min_captures = min_captures_wb;
+ }
+
+ fn visit_fake_reads_map(&mut self) {
+ let mut resolved_closure_fake_reads: FxHashMap<
+ LocalDefId,
+ Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>,
+ > = Default::default();
+ for (&closure_def_id, fake_reads) in
+ self.fcx.typeck_results.borrow().closure_fake_reads.iter()
+ {
+ let mut resolved_fake_reads = Vec::<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>::new();
+ for (place, cause, hir_id) in fake_reads.iter() {
+ let locatable = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
+
+ let resolved_fake_read = self.resolve(place.clone(), &locatable);
+ resolved_fake_reads.push((resolved_fake_read, *cause, *hir_id));
+ }
+ resolved_closure_fake_reads.insert(closure_def_id, resolved_fake_reads);
+ }
+ self.typeck_results.closure_fake_reads = resolved_closure_fake_reads;
+ }
+
+ fn visit_closures(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ for (id, origin) in fcx_typeck_results.closure_kind_origins().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id: *id };
+ let place_span = origin.0;
+ let place = self.resolve(origin.1.clone(), &place_span);
+ self.typeck_results.closure_kind_origins_mut().insert(hir_id, (place_span, place));
+ }
+ }
+
+ fn visit_coercion_casts(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ let fcx_coercion_casts = fcx_typeck_results.coercion_casts();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+
+ for local_id in fcx_coercion_casts {
+ self.typeck_results.set_coercion_cast(*local_id);
+ }
+ }
+
+ fn visit_user_provided_tys(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ let mut errors_buffer = Vec::new();
+ for (&local_id, c_ty) in fcx_typeck_results.user_provided_types().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
+
+ if cfg!(debug_assertions) && c_ty.needs_infer() {
+ span_bug!(
+ hir_id.to_span(self.fcx.tcx),
+ "writeback: `{:?}` has inference variables",
+ c_ty
+ );
+ };
+
+ self.typeck_results.user_provided_types_mut().insert(hir_id, *c_ty);
+
+ if let ty::UserType::TypeOf(_, user_substs) = c_ty.value {
+ if self.rustc_dump_user_substs {
+ // This is a unit-testing mechanism.
+ let span = self.tcx().hir().span(hir_id);
+ // We need to buffer the errors in order to guarantee a consistent
+ // order when emitting them.
+ let err = self
+ .tcx()
+ .sess
+ .struct_span_err(span, &format!("user substs: {:?}", user_substs));
+ err.buffer(&mut errors_buffer);
+ }
+ }
+ }
+
+ if !errors_buffer.is_empty() {
+ errors_buffer.sort_by_key(|diag| diag.span.primary_span());
+ for mut diag in errors_buffer {
+ self.tcx().sess.diagnostic().emit_diagnostic(&mut diag);
+ }
+ }
+ }
+
+ fn visit_user_provided_sigs(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+
+ for (&def_id, c_sig) in fcx_typeck_results.user_provided_sigs.iter() {
+ if cfg!(debug_assertions) && c_sig.needs_infer() {
+ span_bug!(
+ self.fcx.tcx.hir().span_if_local(def_id).unwrap(),
+ "writeback: `{:?}` has inference variables",
+ c_sig
+ );
+ };
+
+ self.typeck_results.user_provided_sigs.insert(def_id, *c_sig);
+ }
+ }
+
+ fn visit_generator_interior_types(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ self.typeck_results.generator_interior_types =
+ fcx_typeck_results.generator_interior_types.clone();
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_opaque_types(&mut self) {
+ let opaque_types =
+ self.fcx.infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ for (opaque_type_key, decl) in opaque_types {
+ let hidden_type = self.resolve(decl.hidden_type, &decl.hidden_type.span);
+ let opaque_type_key = self.resolve(opaque_type_key, &decl.hidden_type.span);
+
+ struct RecursionChecker {
+ def_id: LocalDefId,
+ }
+ impl<'tcx> ty::TypeVisitor<'tcx> for RecursionChecker {
+ type BreakTy = ();
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Opaque(def_id, _) = *t.kind() {
+ if def_id == self.def_id.to_def_id() {
+ return ControlFlow::Break(());
+ }
+ }
+ t.super_visit_with(self)
+ }
+ }
+ if hidden_type
+ .visit_with(&mut RecursionChecker { def_id: opaque_type_key.def_id })
+ .is_break()
+ {
+ continue;
+ }
+
+ let hidden_type = hidden_type.remap_generic_params_to_declaration_params(
+ opaque_type_key,
+ self.fcx.infcx.tcx,
+ true,
+ decl.origin,
+ );
+
+ self.typeck_results.concrete_opaque_types.insert(opaque_type_key.def_id, hidden_type);
+ }
+ }
+
+ fn visit_field_id(&mut self, hir_id: hir::HirId) {
+ if let Some(index) = self.fcx.typeck_results.borrow_mut().field_indices_mut().remove(hir_id)
+ {
+ self.typeck_results.field_indices_mut().insert(hir_id, index);
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn visit_node_id(&mut self, span: Span, hir_id: hir::HirId) {
+ // Export associated path extensions and method resolutions.
+ if let Some(def) =
+ self.fcx.typeck_results.borrow_mut().type_dependent_defs_mut().remove(hir_id)
+ {
+ self.typeck_results.type_dependent_defs_mut().insert(hir_id, def);
+ }
+
+ // Resolve any borrowings for the node with id `node_id`
+ self.visit_adjustments(span, hir_id);
+
+ // Resolve the type of the node with id `node_id`
+ let n_ty = self.fcx.node_ty(hir_id);
+ let n_ty = self.resolve(n_ty, &span);
+ self.write_ty_to_typeck_results(hir_id, n_ty);
+ debug!(?n_ty);
+
+ // Resolve any substitutions
+ if let Some(substs) = self.fcx.typeck_results.borrow().node_substs_opt(hir_id) {
+ let substs = self.resolve(substs, &span);
+ debug!("write_substs_to_tcx({:?}, {:?})", hir_id, substs);
+ assert!(!substs.needs_infer() && !substs.has_placeholders());
+ self.typeck_results.node_substs_mut().insert(hir_id, substs);
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn visit_adjustments(&mut self, span: Span, hir_id: hir::HirId) {
+ let adjustment = self.fcx.typeck_results.borrow_mut().adjustments_mut().remove(hir_id);
+ match adjustment {
+ None => {
+ debug!("no adjustments for node");
+ }
+
+ Some(adjustment) => {
+ let resolved_adjustment = self.resolve(adjustment, &span);
+ debug!(?resolved_adjustment);
+ self.typeck_results.adjustments_mut().insert(hir_id, resolved_adjustment);
+ }
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn visit_pat_adjustments(&mut self, span: Span, hir_id: hir::HirId) {
+ let adjustment = self.fcx.typeck_results.borrow_mut().pat_adjustments_mut().remove(hir_id);
+ match adjustment {
+ None => {
+ debug!("no pat_adjustments for node");
+ }
+
+ Some(adjustment) => {
+ let resolved_adjustment = self.resolve(adjustment, &span);
+ debug!(?resolved_adjustment);
+ self.typeck_results.pat_adjustments_mut().insert(hir_id, resolved_adjustment);
+ }
+ }
+ }
+
+ fn visit_liberated_fn_sigs(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ for (&local_id, &fn_sig) in fcx_typeck_results.liberated_fn_sigs().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
+ let fn_sig = self.resolve(fn_sig, &hir_id);
+ self.typeck_results.liberated_fn_sigs_mut().insert(hir_id, fn_sig);
+ }
+ }
+
+ fn visit_fru_field_types(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ for (&local_id, ftys) in fcx_typeck_results.fru_field_types().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
+ let ftys = self.resolve(ftys.clone(), &hir_id);
+ self.typeck_results.fru_field_types_mut().insert(hir_id, ftys);
+ }
+ }
+
+ fn resolve<T>(&mut self, x: T, span: &dyn Locatable) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let mut resolver = Resolver::new(self.fcx, span, self.body);
+ let x = x.fold_with(&mut resolver);
+ if cfg!(debug_assertions) && x.needs_infer() {
+ span_bug!(span.to_span(self.fcx.tcx), "writeback: `{:?}` has inference variables", x);
+ }
+
+ // We may have introduced e.g. `ty::Error`, if inference failed, make sure
+ // to mark the `TypeckResults` as tainted in that case, so that downstream
+ // users of the typeck results don't produce extra errors, or worse, ICEs.
+ if resolver.replaced_with_error {
+ // FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted.
+ self.typeck_results.tainted_by_errors =
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ }
+
+ x
+ }
+}
+
+pub(crate) trait Locatable {
+ fn to_span(&self, tcx: TyCtxt<'_>) -> Span;
+}
+
+impl Locatable for Span {
+ fn to_span(&self, _: TyCtxt<'_>) -> Span {
+ *self
+ }
+}
+
+impl Locatable for hir::HirId {
+ fn to_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.hir().span(*self)
+ }
+}
+
+/// The Resolver. This is the type folding engine that detects
+/// unresolved types and so forth.
+struct Resolver<'cx, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
+ span: &'cx dyn Locatable,
+ body: &'tcx hir::Body<'tcx>,
+
+ /// Set to `true` if any `Ty` or `ty::Const` had to be replaced with an `Error`.
+ replaced_with_error: bool,
+}
+
+impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
+ fn new(
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
+ span: &'cx dyn Locatable,
+ body: &'tcx hir::Body<'tcx>,
+ ) -> Resolver<'cx, 'tcx> {
+ Resolver { tcx: fcx.tcx, infcx: fcx, span, body, replaced_with_error: false }
+ }
+
+ fn report_error(&self, p: impl Into<ty::GenericArg<'tcx>>) {
+ if !self.tcx.sess.has_errors().is_some() {
+ self.infcx
+ .err_ctxt()
+ .emit_inference_failure_err(
+ Some(self.body.id()),
+ self.span.to_span(self.tcx),
+ p.into(),
+ E0282,
+ false,
+ )
+ .emit();
+ }
+ }
+}
+
+struct EraseEarlyRegions<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for EraseEarlyRegions<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.has_type_flags(ty::TypeFlags::HAS_FREE_REGIONS) {
+ ty.super_fold_with(self)
+ } else {
+ ty
+ }
+ }
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ if r.is_late_bound() { r } else { self.tcx.lifetimes.re_erased }
+ }
+}
+
+impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match self.infcx.fully_resolve(t) {
+ Ok(t) => {
+ // Do not anonymize late-bound regions
+ // (e.g. keep `for<'a>` named `for<'a>`).
+ // This allows NLL to generate error messages that
+ // refer to the higher-ranked lifetime names written by the user.
+ EraseEarlyRegions { tcx: self.tcx }.fold_ty(t)
+ }
+ Err(_) => {
+ debug!("Resolver::fold_ty: input type `{:?}` not fully resolvable", t);
+ self.report_error(t);
+ self.replaced_with_error = true;
+ self.tcx().ty_error()
+ }
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ debug_assert!(!r.is_late_bound(), "Should not be resolving bound region.");
+ self.tcx.lifetimes.re_erased
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match self.infcx.fully_resolve(ct) {
+ Ok(ct) => self.tcx.erase_regions(ct),
+ Err(_) => {
+ debug!("Resolver::fold_const: input const `{:?}` not fully resolvable", ct);
+ self.report_error(ct);
+ self.replaced_with_error = true;
+ self.tcx().const_error(ct.ty())
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// During type check, we store promises with the result of trait
+// lookup rather than the actual results (because the results are not
+// necessarily available immediately). These routines unwind the
+// promises. It is expected that we will have already reported any
+// errors that may be encountered, so if the promises store an error,
+// a dummy result is returned.