From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_typeck/Cargo.toml | 32 + compiler/rustc_typeck/README.md | 5 + compiler/rustc_typeck/src/astconv/errors.rs | 410 +++ compiler/rustc_typeck/src/astconv/generics.rs | 664 ++++ compiler/rustc_typeck/src/astconv/mod.rs | 3091 ++++++++++++++++++ compiler/rustc_typeck/src/bounds.rs | 90 + compiler/rustc_typeck/src/check/_match.rs | 529 +++ compiler/rustc_typeck/src/check/autoderef.rs | 78 + compiler/rustc_typeck/src/check/callee.rs | 675 ++++ compiler/rustc_typeck/src/check/cast.rs | 1072 +++++++ compiler/rustc_typeck/src/check/check.rs | 1712 ++++++++++ compiler/rustc_typeck/src/check/closure.rs | 805 +++++ compiler/rustc_typeck/src/check/coercion.rs | 1804 +++++++++++ compiler/rustc_typeck/src/check/compare_method.rs | 1547 +++++++++ compiler/rustc_typeck/src/check/demand.rs | 1442 +++++++++ compiler/rustc_typeck/src/check/diverges.rs | 78 + compiler/rustc_typeck/src/check/dropck.rs | 327 ++ compiler/rustc_typeck/src/check/expectation.rs | 122 + compiler/rustc_typeck/src/check/expr.rs | 2824 ++++++++++++++++ compiler/rustc_typeck/src/check/fallback.rs | 398 +++ compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs | 1510 +++++++++ .../rustc_typeck/src/check/fn_ctxt/arg_matrix.rs | 376 +++ compiler/rustc_typeck/src/check/fn_ctxt/checks.rs | 1900 +++++++++++ compiler/rustc_typeck/src/check/fn_ctxt/mod.rs | 296 ++ .../rustc_typeck/src/check/fn_ctxt/suggestions.rs | 912 ++++++ compiler/rustc_typeck/src/check/gather_locals.rs | 160 + .../rustc_typeck/src/check/generator_interior.rs | 632 ++++ .../src/check/generator_interior/drop_ranges.rs | 309 ++ .../generator_interior/drop_ranges/cfg_build.rs | 560 ++++ .../drop_ranges/cfg_propagate.rs | 92 + .../drop_ranges/cfg_visualize.rs | 91 + .../drop_ranges/record_consumed_borrow.rs | 232 ++ compiler/rustc_typeck/src/check/inherited.rs | 183 ++ compiler/rustc_typeck/src/check/intrinsic.rs | 517 +++ compiler/rustc_typeck/src/check/intrinsicck.rs | 530 +++ compiler/rustc_typeck/src/check/method/confirm.rs | 582 ++++ compiler/rustc_typeck/src/check/method/mod.rs | 658 ++++ .../rustc_typeck/src/check/method/prelude2021.rs | 419 +++ compiler/rustc_typeck/src/check/method/probe.rs | 1932 +++++++++++ compiler/rustc_typeck/src/check/method/suggest.rs | 2286 +++++++++++++ compiler/rustc_typeck/src/check/mod.rs | 970 ++++++ compiler/rustc_typeck/src/check/op.rs | 1076 +++++++ compiler/rustc_typeck/src/check/pat.rs | 2142 +++++++++++++ compiler/rustc_typeck/src/check/place_op.rs | 451 +++ compiler/rustc_typeck/src/check/region.rs | 837 +++++ compiler/rustc_typeck/src/check/regionck.rs | 47 + compiler/rustc_typeck/src/check/rvalue_scopes.rs | 83 + compiler/rustc_typeck/src/check/upvar.rs | 2272 +++++++++++++ compiler/rustc_typeck/src/check/wfcheck.rs | 1973 ++++++++++++ compiler/rustc_typeck/src/check/writeback.rs | 783 +++++ compiler/rustc_typeck/src/check_unused.rs | 196 ++ compiler/rustc_typeck/src/coherence/builtin.rs | 603 ++++ .../rustc_typeck/src/coherence/inherent_impls.rs | 249 ++ .../src/coherence/inherent_impls_overlap.rs | 307 ++ compiler/rustc_typeck/src/coherence/mod.rs | 237 ++ compiler/rustc_typeck/src/coherence/orphan.rs | 507 +++ compiler/rustc_typeck/src/coherence/unsafety.rs | 66 + compiler/rustc_typeck/src/collect.rs | 3361 ++++++++++++++++++++ compiler/rustc_typeck/src/collect/item_bounds.rs | 102 + compiler/rustc_typeck/src/collect/type_of.rs | 877 +++++ .../rustc_typeck/src/constrained_generic_params.rs | 221 ++ compiler/rustc_typeck/src/errors.rs | 326 ++ compiler/rustc_typeck/src/expr_use_visitor.rs | 914 ++++++ compiler/rustc_typeck/src/hir_wf_check.rs | 188 ++ compiler/rustc_typeck/src/impl_wf_check.rs | 228 ++ .../src/impl_wf_check/min_specialization.rs | 439 +++ compiler/rustc_typeck/src/lib.rs | 579 ++++ compiler/rustc_typeck/src/mem_categorization.rs | 786 +++++ compiler/rustc_typeck/src/outlives/explicit.rs | 69 + .../rustc_typeck/src/outlives/implicit_infer.rs | 300 ++ compiler/rustc_typeck/src/outlives/mod.rs | 130 + .../rustc_typeck/src/outlives/outlives_bounds.rs | 90 + compiler/rustc_typeck/src/outlives/test.rs | 21 + compiler/rustc_typeck/src/outlives/utils.rs | 175 + compiler/rustc_typeck/src/structured_errors.rs | 42 + .../missing_cast_for_variadic_arg.rs | 61 + .../src/structured_errors/sized_unsized_cast.rs | 62 + .../wrong_number_of_generic_args.rs | 890 ++++++ compiler/rustc_typeck/src/variance/constraints.rs | 449 +++ compiler/rustc_typeck/src/variance/mod.rs | 63 + compiler/rustc_typeck/src/variance/solve.rs | 135 + compiler/rustc_typeck/src/variance/terms.rs | 145 + compiler/rustc_typeck/src/variance/test.rs | 14 + compiler/rustc_typeck/src/variance/xform.rs | 22 + 84 files changed, 55370 insertions(+) create mode 100644 compiler/rustc_typeck/Cargo.toml create mode 100644 compiler/rustc_typeck/README.md create mode 100644 compiler/rustc_typeck/src/astconv/errors.rs create mode 100644 compiler/rustc_typeck/src/astconv/generics.rs create mode 100644 compiler/rustc_typeck/src/astconv/mod.rs create mode 100644 compiler/rustc_typeck/src/bounds.rs create mode 100644 compiler/rustc_typeck/src/check/_match.rs create mode 100644 compiler/rustc_typeck/src/check/autoderef.rs create mode 100644 compiler/rustc_typeck/src/check/callee.rs create mode 100644 compiler/rustc_typeck/src/check/cast.rs create mode 100644 compiler/rustc_typeck/src/check/check.rs create mode 100644 compiler/rustc_typeck/src/check/closure.rs create mode 100644 compiler/rustc_typeck/src/check/coercion.rs create mode 100644 compiler/rustc_typeck/src/check/compare_method.rs create mode 100644 compiler/rustc_typeck/src/check/demand.rs create mode 100644 compiler/rustc_typeck/src/check/diverges.rs create mode 100644 compiler/rustc_typeck/src/check/dropck.rs create mode 100644 compiler/rustc_typeck/src/check/expectation.rs create mode 100644 compiler/rustc_typeck/src/check/expr.rs create mode 100644 compiler/rustc_typeck/src/check/fallback.rs create mode 100644 compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs create mode 100644 compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs create mode 100644 compiler/rustc_typeck/src/check/fn_ctxt/checks.rs create mode 100644 compiler/rustc_typeck/src/check/fn_ctxt/mod.rs create mode 100644 compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs create mode 100644 compiler/rustc_typeck/src/check/gather_locals.rs create mode 100644 compiler/rustc_typeck/src/check/generator_interior.rs create mode 100644 compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs create mode 100644 compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs create mode 100644 compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs create mode 100644 compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs create mode 100644 compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs create mode 100644 compiler/rustc_typeck/src/check/inherited.rs create mode 100644 compiler/rustc_typeck/src/check/intrinsic.rs create mode 100644 compiler/rustc_typeck/src/check/intrinsicck.rs create mode 100644 compiler/rustc_typeck/src/check/method/confirm.rs create mode 100644 compiler/rustc_typeck/src/check/method/mod.rs create mode 100644 compiler/rustc_typeck/src/check/method/prelude2021.rs create mode 100644 compiler/rustc_typeck/src/check/method/probe.rs create mode 100644 compiler/rustc_typeck/src/check/method/suggest.rs create mode 100644 compiler/rustc_typeck/src/check/mod.rs create mode 100644 compiler/rustc_typeck/src/check/op.rs create mode 100644 compiler/rustc_typeck/src/check/pat.rs create mode 100644 compiler/rustc_typeck/src/check/place_op.rs create mode 100644 compiler/rustc_typeck/src/check/region.rs create mode 100644 compiler/rustc_typeck/src/check/regionck.rs create mode 100644 compiler/rustc_typeck/src/check/rvalue_scopes.rs create mode 100644 compiler/rustc_typeck/src/check/upvar.rs create mode 100644 compiler/rustc_typeck/src/check/wfcheck.rs create mode 100644 compiler/rustc_typeck/src/check/writeback.rs create mode 100644 compiler/rustc_typeck/src/check_unused.rs create mode 100644 compiler/rustc_typeck/src/coherence/builtin.rs create mode 100644 compiler/rustc_typeck/src/coherence/inherent_impls.rs create mode 100644 compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs create mode 100644 compiler/rustc_typeck/src/coherence/mod.rs create mode 100644 compiler/rustc_typeck/src/coherence/orphan.rs create mode 100644 compiler/rustc_typeck/src/coherence/unsafety.rs create mode 100644 compiler/rustc_typeck/src/collect.rs create mode 100644 compiler/rustc_typeck/src/collect/item_bounds.rs create mode 100644 compiler/rustc_typeck/src/collect/type_of.rs create mode 100644 compiler/rustc_typeck/src/constrained_generic_params.rs create mode 100644 compiler/rustc_typeck/src/errors.rs create mode 100644 compiler/rustc_typeck/src/expr_use_visitor.rs create mode 100644 compiler/rustc_typeck/src/hir_wf_check.rs create mode 100644 compiler/rustc_typeck/src/impl_wf_check.rs create mode 100644 compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs create mode 100644 compiler/rustc_typeck/src/lib.rs create mode 100644 compiler/rustc_typeck/src/mem_categorization.rs create mode 100644 compiler/rustc_typeck/src/outlives/explicit.rs create mode 100644 compiler/rustc_typeck/src/outlives/implicit_infer.rs create mode 100644 compiler/rustc_typeck/src/outlives/mod.rs create mode 100644 compiler/rustc_typeck/src/outlives/outlives_bounds.rs create mode 100644 compiler/rustc_typeck/src/outlives/test.rs create mode 100644 compiler/rustc_typeck/src/outlives/utils.rs create mode 100644 compiler/rustc_typeck/src/structured_errors.rs create mode 100644 compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs create mode 100644 compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs create mode 100644 compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs create mode 100644 compiler/rustc_typeck/src/variance/constraints.rs create mode 100644 compiler/rustc_typeck/src/variance/mod.rs create mode 100644 compiler/rustc_typeck/src/variance/solve.rs create mode 100644 compiler/rustc_typeck/src/variance/terms.rs create mode 100644 compiler/rustc_typeck/src/variance/test.rs create mode 100644 compiler/rustc_typeck/src/variance/xform.rs (limited to 'compiler/rustc_typeck') diff --git a/compiler/rustc_typeck/Cargo.toml b/compiler/rustc_typeck/Cargo.toml new file mode 100644 index 000000000..faf52e269 --- /dev/null +++ b/compiler/rustc_typeck/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "rustc_typeck" +version = "0.0.0" +edition = "2021" + +[lib] +test = false +doctest = false + +[dependencies] +rustc_arena = { path = "../rustc_arena" } +tracing = "0.1" +rustc_macros = { path = "../rustc_macros" } +rustc_middle = { path = "../rustc_middle" } +rustc_attr = { path = "../rustc_attr" } +rustc_data_structures = { path = "../rustc_data_structures" } +rustc_errors = { path = "../rustc_errors" } +rustc_graphviz = { path = "../rustc_graphviz" } +rustc_hir = { path = "../rustc_hir" } +rustc_hir_pretty = { path = "../rustc_hir_pretty" } +rustc_target = { path = "../rustc_target" } +rustc_session = { path = "../rustc_session" } +smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } +rustc_ast = { path = "../rustc_ast" } +rustc_span = { path = "../rustc_span" } +rustc_index = { path = "../rustc_index" } +rustc_infer = { path = "../rustc_infer" } +rustc_trait_selection = { path = "../rustc_trait_selection" } +rustc_ty_utils = { path = "../rustc_ty_utils" } +rustc_lint = { path = "../rustc_lint" } +rustc_serialize = { path = "../rustc_serialize" } +rustc_type_ir = { path = "../rustc_type_ir" } diff --git a/compiler/rustc_typeck/README.md b/compiler/rustc_typeck/README.md new file mode 100644 index 000000000..b61dbd8c9 --- /dev/null +++ b/compiler/rustc_typeck/README.md @@ -0,0 +1,5 @@ +For high-level intro to how type checking works in rustc, see the +[type checking] chapter of the [rustc dev guide]. + +[type checking]: https://rustc-dev-guide.rust-lang.org/type-checking.html +[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/ diff --git a/compiler/rustc_typeck/src/astconv/errors.rs b/compiler/rustc_typeck/src/astconv/errors.rs new file mode 100644 index 000000000..ff39bf361 --- /dev/null +++ b/compiler/rustc_typeck/src/astconv/errors.rs @@ -0,0 +1,410 @@ +use crate::astconv::AstConv; +use crate::errors::{ManualImplementation, MissingTypeParams}; +use rustc_data_structures::fx::FxHashMap; +use rustc_errors::{pluralize, struct_span_err, Applicability, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_middle::ty; +use rustc_session::parse::feature_err; +use rustc_span::lev_distance::find_best_match_for_name; +use rustc_span::symbol::{sym, Ident}; +use rustc_span::{Span, Symbol, DUMMY_SP}; + +use std::collections::BTreeSet; + +impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { + /// On missing type parameters, emit an E0393 error and provide a structured suggestion using + /// the type parameter's name as a placeholder. + pub(crate) fn complain_about_missing_type_params( + &self, + missing_type_params: Vec, + def_id: DefId, + span: Span, + empty_generic_args: bool, + ) { + if missing_type_params.is_empty() { + return; + } + + self.tcx().sess.emit_err(MissingTypeParams { + span, + def_span: self.tcx().def_span(def_id), + missing_type_params, + empty_generic_args, + }); + } + + /// When the code is using the `Fn` traits directly, instead of the `Fn(A) -> B` syntax, emit + /// an error and attempt to build a reasonable structured suggestion. + pub(crate) fn complain_about_internal_fn_trait( + &self, + span: Span, + trait_def_id: DefId, + trait_segment: &'_ hir::PathSegment<'_>, + is_impl: bool, + ) { + if self.tcx().features().unboxed_closures { + return; + } + + let trait_def = self.tcx().trait_def(trait_def_id); + if !trait_def.paren_sugar { + if trait_segment.args().parenthesized { + // For now, require that parenthetical notation be used only with `Fn()` etc. + let mut err = feature_err( + &self.tcx().sess.parse_sess, + sym::unboxed_closures, + span, + "parenthetical notation is only stable when used with `Fn`-family traits", + ); + err.emit(); + } + + return; + } + + let sess = self.tcx().sess; + + if !trait_segment.args().parenthesized { + // For now, require that parenthetical notation be used only with `Fn()` etc. + let mut err = feature_err( + &sess.parse_sess, + sym::unboxed_closures, + span, + "the precise format of `Fn`-family traits' type parameters is subject to change", + ); + // Do not suggest the other syntax if we are in trait impl: + // the desugaring would contain an associated type constraint. + if !is_impl { + let args = trait_segment + .args + .as_ref() + .and_then(|args| args.args.get(0)) + .and_then(|arg| match arg { + hir::GenericArg::Type(ty) => match ty.kind { + hir::TyKind::Tup(t) => t + .iter() + .map(|e| sess.source_map().span_to_snippet(e.span)) + .collect::, _>>() + .map(|a| a.join(", ")), + _ => sess.source_map().span_to_snippet(ty.span), + } + .map(|s| format!("({})", s)) + .ok(), + _ => None, + }) + .unwrap_or_else(|| "()".to_string()); + let ret = trait_segment + .args() + .bindings + .iter() + .find_map(|b| match (b.ident.name == sym::Output, &b.kind) { + (true, hir::TypeBindingKind::Equality { term }) => { + let span = match term { + hir::Term::Ty(ty) => ty.span, + hir::Term::Const(c) => self.tcx().hir().span(c.hir_id), + }; + sess.source_map().span_to_snippet(span).ok() + } + _ => None, + }) + .unwrap_or_else(|| "()".to_string()); + err.span_suggestion( + span, + "use parenthetical notation instead", + format!("{}{} -> {}", trait_segment.ident, args, ret), + Applicability::MaybeIncorrect, + ); + } + err.emit(); + } + + if is_impl { + let trait_name = self.tcx().def_path_str(trait_def_id); + self.tcx().sess.emit_err(ManualImplementation { span, trait_name }); + } + } + + pub(crate) fn complain_about_assoc_type_not_found( + &self, + all_candidates: impl Fn() -> I, + ty_param_name: &str, + assoc_name: Ident, + span: Span, + ) -> ErrorGuaranteed + where + I: Iterator>, + { + // The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a + // valid span, so we point at the whole path segment instead. + let span = if assoc_name.span != DUMMY_SP { assoc_name.span } else { span }; + let mut err = struct_span_err!( + self.tcx().sess, + span, + E0220, + "associated type `{}` not found for `{}`", + assoc_name, + ty_param_name + ); + + let all_candidate_names: Vec<_> = all_candidates() + .flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order()) + .filter_map( + |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None }, + ) + .collect(); + + if let (Some(suggested_name), true) = ( + find_best_match_for_name(&all_candidate_names, assoc_name.name, None), + assoc_name.span != DUMMY_SP, + ) { + err.span_suggestion( + assoc_name.span, + "there is an associated type with a similar name", + suggested_name, + Applicability::MaybeIncorrect, + ); + return err.emit(); + } + + // If we didn't find a good item in the supertraits (or couldn't get + // the supertraits), like in ItemCtxt, then look more generally from + // all visible traits. If there's one clear winner, just suggest that. + + let visible_traits: Vec<_> = self + .tcx() + .all_traits() + .filter(|trait_def_id| { + let viz = self.tcx().visibility(*trait_def_id); + if let Some(def_id) = self.item_def_id() { + viz.is_accessible_from(def_id, self.tcx()) + } else { + viz.is_visible_locally() + } + }) + .collect(); + + let wider_candidate_names: Vec<_> = visible_traits + .iter() + .flat_map(|trait_def_id| { + self.tcx().associated_items(*trait_def_id).in_definition_order() + }) + .filter_map( + |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None }, + ) + .collect(); + + if let (Some(suggested_name), true) = ( + find_best_match_for_name(&wider_candidate_names, assoc_name.name, None), + assoc_name.span != DUMMY_SP, + ) { + if let [best_trait] = visible_traits + .iter() + .filter(|trait_def_id| { + self.tcx() + .associated_items(*trait_def_id) + .filter_by_name_unhygienic(suggested_name) + .any(|item| item.kind == ty::AssocKind::Type) + }) + .collect::>()[..] + { + err.span_label( + assoc_name.span, + format!( + "there is a similarly named associated type `{suggested_name}` in the trait `{}`", + self.tcx().def_path_str(*best_trait) + ), + ); + return err.emit(); + } + } + + err.span_label(span, format!("associated type `{}` not found", assoc_name)); + err.emit() + } + + /// When there are any missing associated types, emit an E0191 error and attempt to supply a + /// reasonable suggestion on how to write it. For the case of multiple associated types in the + /// same trait bound have the same name (as they come from different supertraits), we instead + /// emit a generic note suggesting using a `where` clause to constraint instead. + pub(crate) fn complain_about_missing_associated_types( + &self, + associated_types: FxHashMap>, + potential_assoc_types: Vec, + trait_bounds: &[hir::PolyTraitRef<'_>], + ) { + if associated_types.values().all(|v| v.is_empty()) { + return; + } + let tcx = self.tcx(); + // FIXME: Marked `mut` so that we can replace the spans further below with a more + // appropriate one, but this should be handled earlier in the span assignment. + let mut associated_types: FxHashMap> = associated_types + .into_iter() + .map(|(span, def_ids)| { + (span, def_ids.into_iter().map(|did| tcx.associated_item(did)).collect()) + }) + .collect(); + let mut names = vec![]; + + // Account for things like `dyn Foo + 'a`, like in tests `issue-22434.rs` and + // `issue-22560.rs`. + let mut trait_bound_spans: Vec = vec![]; + for (span, items) in &associated_types { + if !items.is_empty() { + trait_bound_spans.push(*span); + } + for assoc_item in items { + let trait_def_id = assoc_item.container_id(tcx); + names.push(format!( + "`{}` (from trait `{}`)", + assoc_item.name, + tcx.def_path_str(trait_def_id), + )); + } + } + if let ([], [bound]) = (&potential_assoc_types[..], &trait_bounds) { + match bound.trait_ref.path.segments { + // FIXME: `trait_ref.path.span` can point to a full path with multiple + // segments, even though `trait_ref.path.segments` is of length `1`. Work + // around that bug here, even though it should be fixed elsewhere. + // This would otherwise cause an invalid suggestion. For an example, look at + // `src/test/ui/issues/issue-28344.rs` where instead of the following: + // + // error[E0191]: the value of the associated type `Output` + // (from trait `std::ops::BitXor`) must be specified + // --> $DIR/issue-28344.rs:4:17 + // | + // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8); + // | ^^^^^^ help: specify the associated type: + // | `BitXor` + // + // we would output: + // + // error[E0191]: the value of the associated type `Output` + // (from trait `std::ops::BitXor`) must be specified + // --> $DIR/issue-28344.rs:4:17 + // | + // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8); + // | ^^^^^^^^^^^^^ help: specify the associated type: + // | `BitXor::bitor` + [segment] if segment.args.is_none() => { + trait_bound_spans = vec![segment.ident.span]; + associated_types = associated_types + .into_iter() + .map(|(_, items)| (segment.ident.span, items)) + .collect(); + } + _ => {} + } + } + names.sort(); + trait_bound_spans.sort(); + let mut err = struct_span_err!( + tcx.sess, + trait_bound_spans, + E0191, + "the value of the associated type{} {} must be specified", + pluralize!(names.len()), + names.join(", "), + ); + let mut suggestions = vec![]; + let mut types_count = 0; + let mut where_constraints = vec![]; + let mut already_has_generics_args_suggestion = false; + for (span, assoc_items) in &associated_types { + let mut names: FxHashMap<_, usize> = FxHashMap::default(); + for item in assoc_items { + types_count += 1; + *names.entry(item.name).or_insert(0) += 1; + } + let mut dupes = false; + for item in assoc_items { + let prefix = if names[&item.name] > 1 { + let trait_def_id = item.container_id(tcx); + dupes = true; + format!("{}::", tcx.def_path_str(trait_def_id)) + } else { + String::new() + }; + if let Some(sp) = tcx.hir().span_if_local(item.def_id) { + err.span_label(sp, format!("`{}{}` defined here", prefix, item.name)); + } + } + if potential_assoc_types.len() == assoc_items.len() { + // When the amount of missing associated types equals the number of + // extra type arguments present. A suggesting to replace the generic args with + // associated types is already emitted. + already_has_generics_args_suggestion = true; + } else if let (Ok(snippet), false) = + (tcx.sess.source_map().span_to_snippet(*span), dupes) + { + let types: Vec<_> = + assoc_items.iter().map(|item| format!("{} = Type", item.name)).collect(); + let code = if snippet.ends_with('>') { + // The user wrote `Trait<'a>` or similar and we don't have a type we can + // suggest, but at least we can clue them to the correct syntax + // `Trait<'a, Item = Type>` while accounting for the `<'a>` in the + // suggestion. + format!("{}, {}>", &snippet[..snippet.len() - 1], types.join(", ")) + } else { + // The user wrote `Iterator`, so we don't have a type we can suggest, but at + // least we can clue them to the correct syntax `Iterator`. + format!("{}<{}>", snippet, types.join(", ")) + }; + suggestions.push((*span, code)); + } else if dupes { + where_constraints.push(*span); + } + } + let where_msg = "consider introducing a new type parameter, adding `where` constraints \ + using the fully-qualified path to the associated types"; + if !where_constraints.is_empty() && suggestions.is_empty() { + // If there are duplicates associated type names and a single trait bound do not + // use structured suggestion, it means that there are multiple supertraits with + // the same associated type name. + err.help(where_msg); + } + if suggestions.len() != 1 || already_has_generics_args_suggestion { + // We don't need this label if there's an inline suggestion, show otherwise. + for (span, assoc_items) in &associated_types { + let mut names: FxHashMap<_, usize> = FxHashMap::default(); + for item in assoc_items { + types_count += 1; + *names.entry(item.name).or_insert(0) += 1; + } + let mut label = vec![]; + for item in assoc_items { + let postfix = if names[&item.name] > 1 { + let trait_def_id = item.container_id(tcx); + format!(" (from trait `{}`)", tcx.def_path_str(trait_def_id)) + } else { + String::new() + }; + label.push(format!("`{}`{}", item.name, postfix)); + } + if !label.is_empty() { + err.span_label( + *span, + format!( + "associated type{} {} must be specified", + pluralize!(label.len()), + label.join(", "), + ), + ); + } + } + } + if !suggestions.is_empty() { + err.multipart_suggestion( + &format!("specify the associated type{}", pluralize!(types_count)), + suggestions, + Applicability::HasPlaceholders, + ); + if !where_constraints.is_empty() { + err.span_help(where_constraints, where_msg); + } + } + err.emit(); + } +} diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs new file mode 100644 index 000000000..40aa27a29 --- /dev/null +++ b/compiler/rustc_typeck/src/astconv/generics.rs @@ -0,0 +1,664 @@ +use super::IsMethodCall; +use crate::astconv::{ + AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch, + GenericArgCountResult, GenericArgPosition, +}; +use crate::errors::AssocTypeBindingNotAllowed; +use crate::structured_errors::{GenericArgsInfo, StructuredDiagnostic, WrongNumberOfGenericArgs}; +use rustc_ast::ast::ParamKindOrd; +use rustc_errors::{struct_span_err, Applicability, Diagnostic, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def::{DefKind, Res}; +use rustc_hir::def_id::DefId; +use rustc_hir::GenericArg; +use rustc_infer::infer::TyCtxtInferExt; +use rustc_middle::ty::{ + self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, IsSuggestable, Ty, TyCtxt, +}; +use rustc_session::lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS; +use rustc_span::{symbol::kw, Span}; +use smallvec::SmallVec; + +impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { + /// Report an error that a generic argument did not match the generic parameter that was + /// expected. + fn generic_arg_mismatch_err( + tcx: TyCtxt<'_>, + arg: &GenericArg<'_>, + param: &GenericParamDef, + possible_ordering_error: bool, + help: Option<&str>, + ) { + let sess = tcx.sess; + let mut err = struct_span_err!( + sess, + arg.span(), + E0747, + "{} provided when a {} was expected", + arg.descr(), + param.kind.descr(), + ); + + if let GenericParamDefKind::Const { .. } = param.kind { + if matches!(arg, GenericArg::Type(hir::Ty { kind: hir::TyKind::Infer, .. })) { + err.help("const arguments cannot yet be inferred with `_`"); + if sess.is_nightly_build() { + err.help( + "add `#![feature(generic_arg_infer)]` to the crate attributes to enable", + ); + } + } + } + + let add_braces_suggestion = |arg: &GenericArg<'_>, err: &mut Diagnostic| { + let suggestions = vec![ + (arg.span().shrink_to_lo(), String::from("{ ")), + (arg.span().shrink_to_hi(), String::from(" }")), + ]; + err.multipart_suggestion( + "if this generic argument was intended as a const parameter, \ + surround it with braces", + suggestions, + Applicability::MaybeIncorrect, + ); + }; + + // Specific suggestion set for diagnostics + match (arg, ¶m.kind) { + ( + GenericArg::Type(hir::Ty { + kind: hir::TyKind::Path(rustc_hir::QPath::Resolved(_, path)), + .. + }), + GenericParamDefKind::Const { .. }, + ) => match path.res { + Res::Err => { + add_braces_suggestion(arg, &mut err); + err.set_primary_message( + "unresolved item provided when a constant was expected", + ) + .emit(); + return; + } + Res::Def(DefKind::TyParam, src_def_id) => { + if let Some(param_local_id) = param.def_id.as_local() { + let param_name = tcx.hir().ty_param_name(param_local_id); + let param_type = tcx.infer_ctxt().enter(|infcx| { + infcx.resolve_numeric_literals_with_default(tcx.type_of(param.def_id)) + }); + if param_type.is_suggestable(tcx, false) { + err.span_suggestion( + tcx.def_span(src_def_id), + "consider changing this type parameter to be a `const` generic", + format!("const {}: {}", param_name, param_type), + Applicability::MaybeIncorrect, + ); + }; + } + } + _ => add_braces_suggestion(arg, &mut err), + }, + ( + GenericArg::Type(hir::Ty { kind: hir::TyKind::Path(_), .. }), + GenericParamDefKind::Const { .. }, + ) => add_braces_suggestion(arg, &mut err), + ( + GenericArg::Type(hir::Ty { kind: hir::TyKind::Array(_, len), .. }), + GenericParamDefKind::Const { .. }, + ) if tcx.type_of(param.def_id) == tcx.types.usize => { + let snippet = sess.source_map().span_to_snippet(tcx.hir().span(len.hir_id())); + if let Ok(snippet) = snippet { + err.span_suggestion( + arg.span(), + "array type provided where a `usize` was expected, try", + format!("{{ {} }}", snippet), + Applicability::MaybeIncorrect, + ); + } + } + (GenericArg::Const(cnst), GenericParamDefKind::Type { .. }) => { + let body = tcx.hir().body(cnst.value.body); + if let rustc_hir::ExprKind::Path(rustc_hir::QPath::Resolved(_, path)) = + body.value.kind + { + if let Res::Def(DefKind::Fn { .. }, id) = path.res { + err.help(&format!( + "`{}` is a function item, not a type", + tcx.item_name(id) + )); + err.help("function item types cannot be named directly"); + } + } + } + _ => {} + } + + let kind_ord = param.kind.to_ord(); + let arg_ord = arg.to_ord(); + + // This note is only true when generic parameters are strictly ordered by their kind. + if possible_ordering_error && kind_ord.cmp(&arg_ord) != core::cmp::Ordering::Equal { + let (first, last) = if kind_ord < arg_ord { + (param.kind.descr(), arg.descr()) + } else { + (arg.descr(), param.kind.descr()) + }; + err.note(&format!("{} arguments must be provided before {} arguments", first, last)); + if let Some(help) = help { + err.help(help); + } + } + + err.emit(); + } + + /// Creates the relevant generic argument substitutions + /// corresponding to a set of generic parameters. This is a + /// rather complex function. Let us try to explain the role + /// of each of its parameters: + /// + /// To start, we are given the `def_id` of the thing we are + /// creating the substitutions for, and a partial set of + /// substitutions `parent_substs`. In general, the substitutions + /// for an item begin with substitutions for all the "parents" of + /// that item -- e.g., for a method it might include the + /// parameters from the impl. + /// + /// Therefore, the method begins by walking down these parents, + /// starting with the outermost parent and proceed inwards until + /// it reaches `def_id`. For each parent `P`, it will check `parent_substs` + /// first to see if the parent's substitutions are listed in there. If so, + /// we can append those and move on. Otherwise, it invokes the + /// three callback functions: + /// + /// - `args_for_def_id`: given the `DefId` `P`, supplies back the + /// generic arguments that were given to that parent from within + /// the path; so e.g., if you have `::Bar`, the `DefId` + /// might refer to the trait `Foo`, and the arguments might be + /// `[T]`. The boolean value indicates whether to infer values + /// for arguments whose values were not explicitly provided. + /// - `provided_kind`: given the generic parameter and the value from `args_for_def_id`, + /// instantiate a `GenericArg`. + /// - `inferred_kind`: if no parameter was provided, and inference is enabled, then + /// creates a suitable inference variable. + pub fn create_substs_for_generic_args<'a>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + parent_substs: &[subst::GenericArg<'tcx>], + has_self: bool, + self_ty: Option>, + arg_count: &GenericArgCountResult, + ctx: &mut impl CreateSubstsForGenericArgsCtxt<'a, 'tcx>, + ) -> SubstsRef<'tcx> { + // Collect the segments of the path; we need to substitute arguments + // for parameters throughout the entire path (wherever there are + // generic parameters). + let mut parent_defs = tcx.generics_of(def_id); + let count = parent_defs.count(); + let mut stack = vec![(def_id, parent_defs)]; + while let Some(def_id) = parent_defs.parent { + parent_defs = tcx.generics_of(def_id); + stack.push((def_id, parent_defs)); + } + + // We manually build up the substitution, rather than using convenience + // methods in `subst.rs`, so that we can iterate over the arguments and + // parameters in lock-step linearly, instead of trying to match each pair. + let mut substs: SmallVec<[subst::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count); + // Iterate over each segment of the path. + while let Some((def_id, defs)) = stack.pop() { + let mut params = defs.params.iter().peekable(); + + // If we have already computed substitutions for parents, we can use those directly. + while let Some(¶m) = params.peek() { + if let Some(&kind) = parent_substs.get(param.index as usize) { + substs.push(kind); + params.next(); + } else { + break; + } + } + + // `Self` is handled first, unless it's been handled in `parent_substs`. + if has_self { + if let Some(¶m) = params.peek() { + if param.index == 0 { + if let GenericParamDefKind::Type { .. } = param.kind { + substs.push( + self_ty + .map(|ty| ty.into()) + .unwrap_or_else(|| ctx.inferred_kind(None, param, true)), + ); + params.next(); + } + } + } + } + + // Check whether this segment takes generic arguments and the user has provided any. + let (generic_args, infer_args) = ctx.args_for_def_id(def_id); + + let args_iter = generic_args.iter().flat_map(|generic_args| generic_args.args.iter()); + let mut args = args_iter.clone().peekable(); + + // If we encounter a type or const when we expect a lifetime, we infer the lifetimes. + // If we later encounter a lifetime, we know that the arguments were provided in the + // wrong order. `force_infer_lt` records the type or const that forced lifetimes to be + // inferred, so we can use it for diagnostics later. + let mut force_infer_lt = None; + + loop { + // We're going to iterate through the generic arguments that the user + // provided, matching them with the generic parameters we expect. + // Mismatches can occur as a result of elided lifetimes, or for malformed + // input. We try to handle both sensibly. + match (args.peek(), params.peek()) { + (Some(&arg), Some(¶m)) => { + match (arg, ¶m.kind, arg_count.explicit_late_bound) { + (GenericArg::Lifetime(_), GenericParamDefKind::Lifetime, _) + | ( + GenericArg::Type(_) | GenericArg::Infer(_), + GenericParamDefKind::Type { .. }, + _, + ) + | ( + GenericArg::Const(_) | GenericArg::Infer(_), + GenericParamDefKind::Const { .. }, + _, + ) => { + substs.push(ctx.provided_kind(param, arg)); + args.next(); + params.next(); + } + ( + GenericArg::Infer(_) | GenericArg::Type(_) | GenericArg::Const(_), + GenericParamDefKind::Lifetime, + _, + ) => { + // We expected a lifetime argument, but got a type or const + // argument. That means we're inferring the lifetimes. + substs.push(ctx.inferred_kind(None, param, infer_args)); + force_infer_lt = Some((arg, param)); + params.next(); + } + (GenericArg::Lifetime(_), _, ExplicitLateBound::Yes) => { + // We've come across a lifetime when we expected something else in + // the presence of explicit late bounds. This is most likely + // due to the presence of the explicit bound so we're just going to + // ignore it. + args.next(); + } + (_, _, _) => { + // We expected one kind of parameter, but the user provided + // another. This is an error. However, if we already know that + // the arguments don't match up with the parameters, we won't issue + // an additional error, as the user already knows what's wrong. + if arg_count.correct.is_ok() { + // We're going to iterate over the parameters to sort them out, and + // show that order to the user as a possible order for the parameters + let mut param_types_present = defs + .params + .clone() + .into_iter() + .map(|param| (param.kind.to_ord(), param)) + .collect::>(); + param_types_present.sort_by_key(|(ord, _)| *ord); + let (mut param_types_present, ordered_params): ( + Vec, + Vec, + ) = param_types_present.into_iter().unzip(); + param_types_present.dedup(); + + Self::generic_arg_mismatch_err( + tcx, + arg, + param, + !args_iter.clone().is_sorted_by_key(|arg| arg.to_ord()), + Some(&format!( + "reorder the arguments: {}: `<{}>`", + param_types_present + .into_iter() + .map(|ord| format!("{}s", ord)) + .collect::>() + .join(", then "), + ordered_params + .into_iter() + .filter_map(|param| { + if param.name == kw::SelfUpper { + None + } else { + Some(param.name.to_string()) + } + }) + .collect::>() + .join(", ") + )), + ); + } + + // We've reported the error, but we want to make sure that this + // problem doesn't bubble down and create additional, irrelevant + // errors. In this case, we're simply going to ignore the argument + // and any following arguments. The rest of the parameters will be + // inferred. + while args.next().is_some() {} + } + } + } + + (Some(&arg), None) => { + // We should never be able to reach this point with well-formed input. + // There are three situations in which we can encounter this issue. + // + // 1. The number of arguments is incorrect. In this case, an error + // will already have been emitted, and we can ignore it. + // 2. There are late-bound lifetime parameters present, yet the + // lifetime arguments have also been explicitly specified by the + // user. + // 3. We've inferred some lifetimes, which have been provided later (i.e. + // after a type or const). We want to throw an error in this case. + + if arg_count.correct.is_ok() + && arg_count.explicit_late_bound == ExplicitLateBound::No + { + let kind = arg.descr(); + assert_eq!(kind, "lifetime"); + let (provided_arg, param) = + force_infer_lt.expect("lifetimes ought to have been inferred"); + Self::generic_arg_mismatch_err(tcx, provided_arg, param, false, None); + } + + break; + } + + (None, Some(¶m)) => { + // If there are fewer arguments than parameters, it means + // we're inferring the remaining arguments. + substs.push(ctx.inferred_kind(Some(&substs), param, infer_args)); + params.next(); + } + + (None, None) => break, + } + } + } + + tcx.intern_substs(&substs) + } + + /// Checks that the correct number of generic arguments have been provided. + /// Used specifically for function calls. + pub fn check_generic_arg_count_for_call( + tcx: TyCtxt<'_>, + span: Span, + def_id: DefId, + generics: &ty::Generics, + seg: &hir::PathSegment<'_>, + is_method_call: IsMethodCall, + ) -> GenericArgCountResult { + let empty_args = hir::GenericArgs::none(); + let gen_args = seg.args.unwrap_or(&empty_args); + let gen_pos = if is_method_call == IsMethodCall::Yes { + GenericArgPosition::MethodCall + } else { + GenericArgPosition::Value + }; + let has_self = generics.parent.is_none() && generics.has_self; + + Self::check_generic_arg_count( + tcx, + span, + def_id, + seg, + generics, + gen_args, + gen_pos, + has_self, + seg.infer_args, + ) + } + + /// Checks that the correct number of generic arguments have been provided. + /// This is used both for datatypes and function calls. + #[instrument(skip(tcx, gen_pos), level = "debug")] + pub(crate) fn check_generic_arg_count( + tcx: TyCtxt<'_>, + span: Span, + def_id: DefId, + seg: &hir::PathSegment<'_>, + gen_params: &ty::Generics, + gen_args: &hir::GenericArgs<'_>, + gen_pos: GenericArgPosition, + has_self: bool, + infer_args: bool, + ) -> GenericArgCountResult { + let default_counts = gen_params.own_defaults(); + let param_counts = gen_params.own_counts(); + + // Subtracting from param count to ensure type params synthesized from `impl Trait` + // cannot be explicitly specified. + let synth_type_param_count = gen_params + .params + .iter() + .filter(|param| { + matches!(param.kind, ty::GenericParamDefKind::Type { synthetic: true, .. }) + }) + .count(); + let named_type_param_count = + param_counts.types - has_self as usize - synth_type_param_count; + let infer_lifetimes = + (gen_pos != GenericArgPosition::Type || infer_args) && !gen_args.has_lifetime_params(); + + if gen_pos != GenericArgPosition::Type && !gen_args.bindings.is_empty() { + Self::prohibit_assoc_ty_binding(tcx, gen_args.bindings[0].span); + } + + let explicit_late_bound = + Self::prohibit_explicit_late_bound_lifetimes(tcx, gen_params, gen_args, gen_pos); + + let mut invalid_args = vec![]; + + let mut check_lifetime_args = + |min_expected_args: usize, + max_expected_args: usize, + provided_args: usize, + late_bounds_ignore: bool| { + if (min_expected_args..=max_expected_args).contains(&provided_args) { + return Ok(()); + } + + if late_bounds_ignore { + return Ok(()); + } + + if provided_args > max_expected_args { + invalid_args.extend( + gen_args.args[max_expected_args..provided_args] + .iter() + .map(|arg| arg.span()), + ); + }; + + let gen_args_info = if provided_args > min_expected_args { + invalid_args.extend( + gen_args.args[min_expected_args..provided_args] + .iter() + .map(|arg| arg.span()), + ); + let num_redundant_args = provided_args - min_expected_args; + GenericArgsInfo::ExcessLifetimes { num_redundant_args } + } else { + let num_missing_args = min_expected_args - provided_args; + GenericArgsInfo::MissingLifetimes { num_missing_args } + }; + + let reported = WrongNumberOfGenericArgs::new( + tcx, + gen_args_info, + seg, + gen_params, + has_self as usize, + gen_args, + def_id, + ) + .diagnostic() + .emit(); + + Err(reported) + }; + + let min_expected_lifetime_args = if infer_lifetimes { 0 } else { param_counts.lifetimes }; + let max_expected_lifetime_args = param_counts.lifetimes; + let num_provided_lifetime_args = gen_args.num_lifetime_params(); + + let lifetimes_correct = check_lifetime_args( + min_expected_lifetime_args, + max_expected_lifetime_args, + num_provided_lifetime_args, + explicit_late_bound == ExplicitLateBound::Yes, + ); + + let mut check_types_and_consts = |expected_min, + expected_max, + expected_max_with_synth, + provided, + params_offset, + args_offset| { + debug!( + ?expected_min, + ?expected_max, + ?provided, + ?params_offset, + ?args_offset, + "check_types_and_consts" + ); + if (expected_min..=expected_max).contains(&provided) { + return Ok(()); + } + + let num_default_params = expected_max - expected_min; + + let gen_args_info = if provided > expected_max { + invalid_args.extend( + gen_args.args[args_offset + expected_max..args_offset + provided] + .iter() + .map(|arg| arg.span()), + ); + let num_redundant_args = provided - expected_max; + + // Provide extra note if synthetic arguments like `impl Trait` are specified. + let synth_provided = provided <= expected_max_with_synth; + + GenericArgsInfo::ExcessTypesOrConsts { + num_redundant_args, + num_default_params, + args_offset, + synth_provided, + } + } else { + let num_missing_args = expected_max - provided; + + GenericArgsInfo::MissingTypesOrConsts { + num_missing_args, + num_default_params, + args_offset, + } + }; + + debug!(?gen_args_info); + + let reported = WrongNumberOfGenericArgs::new( + tcx, + gen_args_info, + seg, + gen_params, + params_offset, + gen_args, + def_id, + ) + .diagnostic() + .emit_unless(gen_args.has_err()); + + Err(reported) + }; + + let args_correct = { + let expected_min = if infer_args { + 0 + } else { + param_counts.consts + named_type_param_count + - default_counts.types + - default_counts.consts + }; + debug!(?expected_min); + debug!(arg_counts.lifetimes=?gen_args.num_lifetime_params()); + + check_types_and_consts( + expected_min, + param_counts.consts + named_type_param_count, + param_counts.consts + named_type_param_count + synth_type_param_count, + gen_args.num_generic_params(), + param_counts.lifetimes + has_self as usize, + gen_args.num_lifetime_params(), + ) + }; + + GenericArgCountResult { + explicit_late_bound, + correct: lifetimes_correct.and(args_correct).map_err(|reported| { + GenericArgCountMismatch { reported: Some(reported), invalid_args } + }), + } + } + + /// Emits an error regarding forbidden type binding associations + pub fn prohibit_assoc_ty_binding(tcx: TyCtxt<'_>, span: Span) { + tcx.sess.emit_err(AssocTypeBindingNotAllowed { span }); + } + + /// Prohibits explicit lifetime arguments if late-bound lifetime parameters + /// are present. This is used both for datatypes and function calls. + pub(crate) fn prohibit_explicit_late_bound_lifetimes( + tcx: TyCtxt<'_>, + def: &ty::Generics, + args: &hir::GenericArgs<'_>, + position: GenericArgPosition, + ) -> ExplicitLateBound { + let param_counts = def.own_counts(); + let infer_lifetimes = position != GenericArgPosition::Type && !args.has_lifetime_params(); + + if infer_lifetimes { + return ExplicitLateBound::No; + } + + if let Some(span_late) = def.has_late_bound_regions { + let msg = "cannot specify lifetime arguments explicitly \ + if late bound lifetime parameters are present"; + let note = "the late bound lifetime parameter is introduced here"; + let span = args.args[0].span(); + + if position == GenericArgPosition::Value + && args.num_lifetime_params() != param_counts.lifetimes + { + let mut err = tcx.sess.struct_span_err(span, msg); + err.span_note(span_late, note); + err.emit(); + } else { + let mut multispan = MultiSpan::from_span(span); + multispan.push_span_label(span_late, note); + tcx.struct_span_lint_hir( + LATE_BOUND_LIFETIME_ARGUMENTS, + args.args[0].id(), + multispan, + |lint| { + lint.build(msg).emit(); + }, + ); + } + + ExplicitLateBound::Yes + } else { + ExplicitLateBound::No + } + } +} diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs new file mode 100644 index 000000000..8a5c7fee6 --- /dev/null +++ b/compiler/rustc_typeck/src/astconv/mod.rs @@ -0,0 +1,3091 @@ +//! Conversion from AST representation of types to the `ty.rs` representation. +//! The main routine here is `ast_ty_to_ty()`; each use is parameterized by an +//! instance of `AstConv`. + +mod errors; +mod generics; + +use crate::bounds::Bounds; +use crate::collect::HirPlaceholderCollector; +use crate::errors::{ + AmbiguousLifetimeBound, MultipleRelaxedDefaultBounds, TraitObjectDeclaredWithNoTraits, + TypeofReservedKeywordUsed, ValueOfAssociatedStructAlreadySpecified, +}; +use crate::middle::resolve_lifetime as rl; +use crate::require_c_abi_if_c_variadic; +use rustc_ast::TraitObjectSyntax; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::{ + struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, MultiSpan, +}; +use rustc_hir as hir; +use rustc_hir::def::{CtorOf, DefKind, Namespace, Res}; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::intravisit::{walk_generics, Visitor as _}; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{GenericArg, GenericArgs, OpaqueTyOrigin}; +use rustc_middle::middle::stability::AllowUnstable; +use rustc_middle::ty::subst::{self, GenericArgKind, InternalSubsts, Subst, SubstsRef}; +use rustc_middle::ty::GenericParamDefKind; +use rustc_middle::ty::{ + self, Const, DefIdTree, EarlyBinder, IsSuggestable, Ty, TyCtxt, TypeVisitable, +}; +use rustc_session::lint::builtin::{AMBIGUOUS_ASSOCIATED_ITEMS, BARE_TRAIT_OBJECTS}; +use rustc_span::edition::Edition; +use rustc_span::lev_distance::find_best_match_for_name; +use rustc_span::symbol::{kw, Ident, Symbol}; +use rustc_span::{Span, DUMMY_SP}; +use rustc_target::spec::abi; +use rustc_trait_selection::traits; +use rustc_trait_selection::traits::astconv_object_safety_violations; +use rustc_trait_selection::traits::error_reporting::{ + report_object_safety_error, suggestions::NextTypeParamName, +}; +use rustc_trait_selection::traits::wf::object_region_bounds; + +use smallvec::SmallVec; +use std::collections::BTreeSet; +use std::slice; + +#[derive(Debug)] +pub struct PathSeg(pub DefId, pub usize); + +pub trait AstConv<'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'tcx>; + + fn item_def_id(&self) -> Option; + + /// Returns predicates in scope of the form `X: Foo`, where `X` + /// is a type parameter `X` with the given id `def_id` and T + /// matches `assoc_name`. This is a subset of the full set of + /// predicates. + /// + /// This is used for one specific purpose: resolving "short-hand" + /// associated type references like `T::Item`. In principle, we + /// would do that by first getting the full set of predicates in + /// scope and then filtering down to find those that apply to `T`, + /// but this can lead to cycle errors. The problem is that we have + /// to do this resolution *in order to create the predicates in + /// the first place*. Hence, we have this "special pass". + fn get_type_parameter_bounds( + &self, + span: Span, + def_id: DefId, + assoc_name: Ident, + ) -> ty::GenericPredicates<'tcx>; + + /// Returns the lifetime to use when a lifetime is omitted (and not elided). + fn re_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) + -> Option>; + + /// Returns the type to use when a type is omitted. + fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx>; + + /// Returns `true` if `_` is allowed in type signatures in the current context. + fn allow_ty_infer(&self) -> bool; + + /// Returns the const to use when a const is omitted. + fn ct_infer( + &self, + ty: Ty<'tcx>, + param: Option<&ty::GenericParamDef>, + span: Span, + ) -> Const<'tcx>; + + /// Projecting an associated type from a (potentially) + /// higher-ranked trait reference is more complicated, because of + /// the possibility of late-bound regions appearing in the + /// associated type binding. This is not legal in function + /// signatures for that reason. In a function body, we can always + /// handle it because we can use inference variables to remove the + /// late-bound regions. + fn projected_ty_from_poly_trait_ref( + &self, + span: Span, + item_def_id: DefId, + item_segment: &hir::PathSegment<'_>, + poly_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Ty<'tcx>; + + /// Normalize an associated type coming from the user. + fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx>; + + /// Invoked when we encounter an error from some prior pass + /// (e.g., resolve) that is translated into a ty-error. This is + /// used to help suppress derived errors typeck might otherwise + /// report. + fn set_tainted_by_errors(&self); + + fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span); +} + +#[derive(Debug)] +struct ConvertedBinding<'a, 'tcx> { + hir_id: hir::HirId, + item_name: Ident, + kind: ConvertedBindingKind<'a, 'tcx>, + gen_args: &'a GenericArgs<'a>, + span: Span, +} + +#[derive(Debug)] +enum ConvertedBindingKind<'a, 'tcx> { + Equality(ty::Term<'tcx>), + Constraint(&'a [hir::GenericBound<'a>]), +} + +/// New-typed boolean indicating whether explicit late-bound lifetimes +/// are present in a set of generic arguments. +/// +/// For example if we have some method `fn f<'a>(&'a self)` implemented +/// for some type `T`, although `f` is generic in the lifetime `'a`, `'a` +/// is late-bound so should not be provided explicitly. Thus, if `f` is +/// instantiated with some generic arguments providing `'a` explicitly, +/// we taint those arguments with `ExplicitLateBound::Yes` so that we +/// can provide an appropriate diagnostic later. +#[derive(Copy, Clone, PartialEq)] +pub enum ExplicitLateBound { + Yes, + No, +} + +#[derive(Copy, Clone, PartialEq)] +pub enum IsMethodCall { + Yes, + No, +} + +/// Denotes the "position" of a generic argument, indicating if it is a generic type, +/// generic function or generic method call. +#[derive(Copy, Clone, PartialEq)] +pub(crate) enum GenericArgPosition { + Type, + Value, // e.g., functions + MethodCall, +} + +/// A marker denoting that the generic arguments that were +/// provided did not match the respective generic parameters. +#[derive(Clone, Default)] +pub struct GenericArgCountMismatch { + /// Indicates whether a fatal error was reported (`Some`), or just a lint (`None`). + pub reported: Option, + /// A list of spans of arguments provided that were not valid. + pub invalid_args: Vec, +} + +/// Decorates the result of a generic argument count mismatch +/// check with whether explicit late bounds were provided. +#[derive(Clone)] +pub struct GenericArgCountResult { + pub explicit_late_bound: ExplicitLateBound, + pub correct: Result<(), GenericArgCountMismatch>, +} + +pub trait CreateSubstsForGenericArgsCtxt<'a, 'tcx> { + fn args_for_def_id(&mut self, def_id: DefId) -> (Option<&'a GenericArgs<'a>>, bool); + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx>; + + fn inferred_kind( + &mut self, + substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + infer_args: bool, + ) -> subst::GenericArg<'tcx>; +} + +impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { + #[tracing::instrument(level = "debug", skip(self))] + pub fn ast_region_to_region( + &self, + lifetime: &hir::Lifetime, + def: Option<&ty::GenericParamDef>, + ) -> ty::Region<'tcx> { + let tcx = self.tcx(); + let lifetime_name = |def_id| tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id)); + + let r = match tcx.named_region(lifetime.hir_id) { + Some(rl::Region::Static) => tcx.lifetimes.re_static, + + Some(rl::Region::LateBound(debruijn, index, def_id)) => { + let name = lifetime_name(def_id.expect_local()); + let br = ty::BoundRegion { + var: ty::BoundVar::from_u32(index), + kind: ty::BrNamed(def_id, name), + }; + tcx.mk_region(ty::ReLateBound(debruijn, br)) + } + + Some(rl::Region::EarlyBound(index, id)) => { + let name = lifetime_name(id.expect_local()); + tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { def_id: id, index, name })) + } + + Some(rl::Region::Free(scope, id)) => { + let name = lifetime_name(id.expect_local()); + tcx.mk_region(ty::ReFree(ty::FreeRegion { + scope, + bound_region: ty::BrNamed(id, name), + })) + + // (*) -- not late-bound, won't change + } + + None => { + self.re_infer(def, lifetime.span).unwrap_or_else(|| { + debug!(?lifetime, "unelided lifetime in signature"); + + // This indicates an illegal lifetime + // elision. `resolve_lifetime` should have + // reported an error in this case -- but if + // not, let's error out. + tcx.sess.delay_span_bug(lifetime.span, "unelided lifetime in signature"); + + // Supply some dummy value. We don't have an + // `re_error`, annoyingly, so use `'static`. + tcx.lifetimes.re_static + }) + } + }; + + debug!("ast_region_to_region(lifetime={:?}) yields {:?}", lifetime, r); + + r + } + + /// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`, + /// returns an appropriate set of substitutions for this particular reference to `I`. + pub fn ast_path_substs_for_ty( + &self, + span: Span, + def_id: DefId, + item_segment: &hir::PathSegment<'_>, + ) -> SubstsRef<'tcx> { + let (substs, _) = self.create_substs_for_ast_path( + span, + def_id, + &[], + item_segment, + item_segment.args(), + item_segment.infer_args, + None, + ); + let assoc_bindings = self.create_assoc_bindings_for_generic_args(item_segment.args()); + + if let Some(b) = assoc_bindings.first() { + Self::prohibit_assoc_ty_binding(self.tcx(), b.span); + } + + substs + } + + /// Given the type/lifetime/const arguments provided to some path (along with + /// an implicit `Self`, if this is a trait reference), returns the complete + /// set of substitutions. This may involve applying defaulted type parameters. + /// Constraints on associated types are created from `create_assoc_bindings_for_generic_args`. + /// + /// Example: + /// + /// ```ignore (illustrative) + /// T: std::ops::Index + /// // ^1 ^^^^^^^^^^^^^^2 ^^^^3 ^^^^^^^^^^^4 + /// ``` + /// + /// 1. The `self_ty` here would refer to the type `T`. + /// 2. The path in question is the path to the trait `std::ops::Index`, + /// which will have been resolved to a `def_id` + /// 3. The `generic_args` contains info on the `<...>` contents. The `usize` type + /// parameters are returned in the `SubstsRef`, the associated type bindings like + /// `Output = u32` are returned from `create_assoc_bindings_for_generic_args`. + /// + /// Note that the type listing given here is *exactly* what the user provided. + /// + /// For (generic) associated types + /// + /// ```ignore (illustrative) + /// as Iterable>::Iter::<'a> + /// ``` + /// + /// We have the parent substs are the substs for the parent trait: + /// `[Vec, u8]` and `generic_args` are the arguments for the associated + /// type itself: `['a]`. The returned `SubstsRef` concatenates these two + /// lists: `[Vec, u8, 'a]`. + #[tracing::instrument(level = "debug", skip(self, span))] + fn create_substs_for_ast_path<'a>( + &self, + span: Span, + def_id: DefId, + parent_substs: &[subst::GenericArg<'tcx>], + seg: &hir::PathSegment<'_>, + generic_args: &'a hir::GenericArgs<'_>, + infer_args: bool, + self_ty: Option>, + ) -> (SubstsRef<'tcx>, GenericArgCountResult) { + // If the type is parameterized by this region, then replace this + // region with the current anon region binding (in other words, + // whatever & would get replaced with). + + let tcx = self.tcx(); + let generics = tcx.generics_of(def_id); + debug!("generics: {:?}", generics); + + if generics.has_self { + if generics.parent.is_some() { + // The parent is a trait so it should have at least one subst + // for the `Self` type. + assert!(!parent_substs.is_empty()) + } else { + // This item (presumably a trait) needs a self-type. + assert!(self_ty.is_some()); + } + } else { + assert!(self_ty.is_none() && parent_substs.is_empty()); + } + + let arg_count = Self::check_generic_arg_count( + tcx, + span, + def_id, + seg, + generics, + generic_args, + GenericArgPosition::Type, + self_ty.is_some(), + infer_args, + ); + + // Skip processing if type has no generic parameters. + // Traits always have `Self` as a generic parameter, which means they will not return early + // here and so associated type bindings will be handled regardless of whether there are any + // non-`Self` generic parameters. + if generics.params.is_empty() { + return (tcx.intern_substs(&[]), arg_count); + } + + let is_object = self_ty.map_or(false, |ty| ty == self.tcx().types.trait_object_dummy_self); + + struct SubstsForAstPathCtxt<'a, 'tcx> { + astconv: &'a (dyn AstConv<'tcx> + 'a), + def_id: DefId, + generic_args: &'a GenericArgs<'a>, + span: Span, + missing_type_params: Vec, + inferred_params: Vec, + infer_args: bool, + is_object: bool, + } + + impl<'tcx, 'a> SubstsForAstPathCtxt<'tcx, 'a> { + fn default_needs_object_self(&mut self, param: &ty::GenericParamDef) -> bool { + let tcx = self.astconv.tcx(); + if let GenericParamDefKind::Type { has_default, .. } = param.kind { + if self.is_object && has_default { + let default_ty = tcx.at(self.span).type_of(param.def_id); + let self_param = tcx.types.self_param; + if default_ty.walk().any(|arg| arg == self_param.into()) { + // There is no suitable inference default for a type parameter + // that references self, in an object type. + return true; + } + } + } + + false + } + } + + impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for SubstsForAstPathCtxt<'a, 'tcx> { + fn args_for_def_id(&mut self, did: DefId) -> (Option<&'a GenericArgs<'a>>, bool) { + if did == self.def_id { + (Some(self.generic_args), self.infer_args) + } else { + // The last component of this tuple is unimportant. + (None, false) + } + } + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx> { + let tcx = self.astconv.tcx(); + + let mut handle_ty_args = |has_default, ty: &hir::Ty<'_>| { + if has_default { + tcx.check_optional_stability( + param.def_id, + Some(arg.id()), + arg.span(), + None, + AllowUnstable::No, + |_, _| { + // Default generic parameters may not be marked + // with stability attributes, i.e. when the + // default parameter was defined at the same time + // as the rest of the type. As such, we ignore missing + // stability attributes. + }, + ); + } + if let (hir::TyKind::Infer, false) = (&ty.kind, self.astconv.allow_ty_infer()) { + self.inferred_params.push(ty.span); + tcx.ty_error().into() + } else { + self.astconv.ast_ty_to_ty(ty).into() + } + }; + + match (¶m.kind, arg) { + (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { + self.astconv.ast_region_to_region(lt, Some(param)).into() + } + (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Type(ty)) => { + handle_ty_args(has_default, ty) + } + (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Infer(inf)) => { + handle_ty_args(has_default, &inf.to_ty()) + } + (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => { + ty::Const::from_opt_const_arg_anon_const( + tcx, + ty::WithOptConstParam { + did: tcx.hir().local_def_id(ct.value.hir_id), + const_param_did: Some(param.def_id), + }, + ) + .into() + } + (&GenericParamDefKind::Const { .. }, hir::GenericArg::Infer(inf)) => { + let ty = tcx.at(self.span).type_of(param.def_id); + if self.astconv.allow_ty_infer() { + self.astconv.ct_infer(ty, Some(param), inf.span).into() + } else { + self.inferred_params.push(inf.span); + tcx.const_error(ty).into() + } + } + _ => unreachable!(), + } + } + + fn inferred_kind( + &mut self, + substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + infer_args: bool, + ) -> subst::GenericArg<'tcx> { + let tcx = self.astconv.tcx(); + match param.kind { + GenericParamDefKind::Lifetime => self + .astconv + .re_infer(Some(param), self.span) + .unwrap_or_else(|| { + debug!(?param, "unelided lifetime in signature"); + + // This indicates an illegal lifetime in a non-assoc-trait position + tcx.sess.delay_span_bug(self.span, "unelided lifetime in signature"); + + // Supply some dummy value. We don't have an + // `re_error`, annoyingly, so use `'static`. + tcx.lifetimes.re_static + }) + .into(), + GenericParamDefKind::Type { has_default, .. } => { + if !infer_args && has_default { + // No type parameter provided, but a default exists. + + // If we are converting an object type, then the + // `Self` parameter is unknown. However, some of the + // other type parameters may reference `Self` in their + // defaults. This will lead to an ICE if we are not + // careful! + if self.default_needs_object_self(param) { + self.missing_type_params.push(param.name); + tcx.ty_error().into() + } else { + // This is a default type parameter. + let substs = substs.unwrap(); + if substs.iter().any(|arg| match arg.unpack() { + GenericArgKind::Type(ty) => ty.references_error(), + _ => false, + }) { + // Avoid ICE #86756 when type error recovery goes awry. + return tcx.ty_error().into(); + } + self.astconv + .normalize_ty( + self.span, + EarlyBinder(tcx.at(self.span).type_of(param.def_id)) + .subst(tcx, substs), + ) + .into() + } + } else if infer_args { + // No type parameters were provided, we can infer all. + let param = if !self.default_needs_object_self(param) { + Some(param) + } else { + None + }; + self.astconv.ty_infer(param, self.span).into() + } else { + // We've already errored above about the mismatch. + tcx.ty_error().into() + } + } + GenericParamDefKind::Const { has_default } => { + let ty = tcx.at(self.span).type_of(param.def_id); + if !infer_args && has_default { + tcx.bound_const_param_default(param.def_id) + .subst(tcx, substs.unwrap()) + .into() + } else { + if infer_args { + self.astconv.ct_infer(ty, Some(param), self.span).into() + } else { + // We've already errored above about the mismatch. + tcx.const_error(ty).into() + } + } + } + } + } + } + + let mut substs_ctx = SubstsForAstPathCtxt { + astconv: self, + def_id, + span, + generic_args, + missing_type_params: vec![], + inferred_params: vec![], + infer_args, + is_object, + }; + let substs = Self::create_substs_for_generic_args( + tcx, + def_id, + parent_substs, + self_ty.is_some(), + self_ty, + &arg_count, + &mut substs_ctx, + ); + + self.complain_about_missing_type_params( + substs_ctx.missing_type_params, + def_id, + span, + generic_args.args.is_empty(), + ); + + debug!( + "create_substs_for_ast_path(generic_params={:?}, self_ty={:?}) -> {:?}", + generics, self_ty, substs + ); + + (substs, arg_count) + } + + fn create_assoc_bindings_for_generic_args<'a>( + &self, + generic_args: &'a hir::GenericArgs<'_>, + ) -> Vec> { + // Convert associated-type bindings or constraints into a separate vector. + // Example: Given this: + // + // T: Iterator + // + // The `T` is passed in as a self-type; the `Item = u32` is + // not a "type parameter" of the `Iterator` trait, but rather + // a restriction on `::Item`, so it is passed + // back separately. + let assoc_bindings = generic_args + .bindings + .iter() + .map(|binding| { + let kind = match binding.kind { + hir::TypeBindingKind::Equality { ref term } => match term { + hir::Term::Ty(ref ty) => { + ConvertedBindingKind::Equality(self.ast_ty_to_ty(ty).into()) + } + hir::Term::Const(ref c) => { + let local_did = self.tcx().hir().local_def_id(c.hir_id); + let c = Const::from_anon_const(self.tcx(), local_did); + ConvertedBindingKind::Equality(c.into()) + } + }, + hir::TypeBindingKind::Constraint { ref bounds } => { + ConvertedBindingKind::Constraint(bounds) + } + }; + ConvertedBinding { + hir_id: binding.hir_id, + item_name: binding.ident, + kind, + gen_args: binding.gen_args, + span: binding.span, + } + }) + .collect(); + + assoc_bindings + } + + pub(crate) fn create_substs_for_associated_item( + &self, + tcx: TyCtxt<'tcx>, + span: Span, + item_def_id: DefId, + item_segment: &hir::PathSegment<'_>, + parent_substs: SubstsRef<'tcx>, + ) -> SubstsRef<'tcx> { + debug!( + "create_substs_for_associated_item(span: {:?}, item_def_id: {:?}, item_segment: {:?}", + span, item_def_id, item_segment + ); + if tcx.generics_of(item_def_id).params.is_empty() { + self.prohibit_generics(slice::from_ref(item_segment).iter(), |_| {}); + + parent_substs + } else { + self.create_substs_for_ast_path( + span, + item_def_id, + parent_substs, + item_segment, + item_segment.args(), + item_segment.infer_args, + None, + ) + .0 + } + } + + /// Instantiates the path for the given trait reference, assuming that it's + /// bound to a valid trait type. Returns the `DefId` of the defining trait. + /// The type _cannot_ be a type other than a trait type. + /// + /// If the `projections` argument is `None`, then assoc type bindings like `Foo` + /// are disallowed. Otherwise, they are pushed onto the vector given. + pub fn instantiate_mono_trait_ref( + &self, + trait_ref: &hir::TraitRef<'_>, + self_ty: Ty<'tcx>, + ) -> ty::TraitRef<'tcx> { + self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {}); + + self.ast_path_to_mono_trait_ref( + trait_ref.path.span, + trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()), + self_ty, + trait_ref.path.segments.last().unwrap(), + true, + ) + } + + fn instantiate_poly_trait_ref_inner( + &self, + hir_id: hir::HirId, + span: Span, + binding_span: Option, + constness: ty::BoundConstness, + bounds: &mut Bounds<'tcx>, + speculative: bool, + trait_ref_span: Span, + trait_def_id: DefId, + trait_segment: &hir::PathSegment<'_>, + args: &GenericArgs<'_>, + infer_args: bool, + self_ty: Ty<'tcx>, + ) -> GenericArgCountResult { + let (substs, arg_count) = self.create_substs_for_ast_path( + trait_ref_span, + trait_def_id, + &[], + trait_segment, + args, + infer_args, + Some(self_ty), + ); + + let tcx = self.tcx(); + let bound_vars = tcx.late_bound_vars(hir_id); + debug!(?bound_vars); + + let assoc_bindings = self.create_assoc_bindings_for_generic_args(args); + + let poly_trait_ref = + ty::Binder::bind_with_vars(ty::TraitRef::new(trait_def_id, substs), bound_vars); + + debug!(?poly_trait_ref, ?assoc_bindings); + bounds.trait_bounds.push((poly_trait_ref, span, constness)); + + let mut dup_bindings = FxHashMap::default(); + for binding in &assoc_bindings { + // Specify type to assert that error was already reported in `Err` case. + let _: Result<_, ErrorGuaranteed> = self.add_predicates_for_ast_type_binding( + hir_id, + poly_trait_ref, + binding, + bounds, + speculative, + &mut dup_bindings, + binding_span.unwrap_or(binding.span), + ); + // Okay to ignore `Err` because of `ErrorGuaranteed` (see above). + } + + arg_count + } + + /// Given a trait bound like `Debug`, applies that trait bound the given self-type to construct + /// a full trait reference. The resulting trait reference is returned. This may also generate + /// auxiliary bounds, which are added to `bounds`. + /// + /// Example: + /// + /// ```ignore (illustrative) + /// poly_trait_ref = Iterator + /// self_ty = Foo + /// ``` + /// + /// this would return `Foo: Iterator` and add `::Item = u32` into `bounds`. + /// + /// **A note on binders:** against our usual convention, there is an implied bounder around + /// the `self_ty` and `poly_trait_ref` parameters here. So they may reference bound regions. + /// If for example you had `for<'a> Foo<'a>: Bar<'a>`, then the `self_ty` would be `Foo<'a>` + /// where `'a` is a bound region at depth 0. Similarly, the `poly_trait_ref` would be + /// `Bar<'a>`. The returned poly-trait-ref will have this binder instantiated explicitly, + /// however. + #[tracing::instrument(level = "debug", skip(self, span, constness, bounds, speculative))] + pub(crate) fn instantiate_poly_trait_ref( + &self, + trait_ref: &hir::TraitRef<'_>, + span: Span, + constness: ty::BoundConstness, + self_ty: Ty<'tcx>, + bounds: &mut Bounds<'tcx>, + speculative: bool, + ) -> GenericArgCountResult { + let hir_id = trait_ref.hir_ref_id; + let binding_span = None; + let trait_ref_span = trait_ref.path.span; + let trait_def_id = trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()); + let trait_segment = trait_ref.path.segments.last().unwrap(); + let args = trait_segment.args(); + let infer_args = trait_segment.infer_args; + + self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {}); + self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, false); + + self.instantiate_poly_trait_ref_inner( + hir_id, + span, + binding_span, + constness, + bounds, + speculative, + trait_ref_span, + trait_def_id, + trait_segment, + args, + infer_args, + self_ty, + ) + } + + pub(crate) fn instantiate_lang_item_trait_ref( + &self, + lang_item: hir::LangItem, + span: Span, + hir_id: hir::HirId, + args: &GenericArgs<'_>, + self_ty: Ty<'tcx>, + bounds: &mut Bounds<'tcx>, + ) { + let binding_span = Some(span); + let constness = ty::BoundConstness::NotConst; + let speculative = false; + let trait_ref_span = span; + let trait_def_id = self.tcx().require_lang_item(lang_item, Some(span)); + let trait_segment = &hir::PathSegment::invalid(); + let infer_args = false; + + self.instantiate_poly_trait_ref_inner( + hir_id, + span, + binding_span, + constness, + bounds, + speculative, + trait_ref_span, + trait_def_id, + trait_segment, + args, + infer_args, + self_ty, + ); + } + + fn ast_path_to_mono_trait_ref( + &self, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + trait_segment: &hir::PathSegment<'_>, + is_impl: bool, + ) -> ty::TraitRef<'tcx> { + let (substs, _) = self.create_substs_for_ast_trait_ref( + span, + trait_def_id, + self_ty, + trait_segment, + is_impl, + ); + let assoc_bindings = self.create_assoc_bindings_for_generic_args(trait_segment.args()); + if let Some(b) = assoc_bindings.first() { + Self::prohibit_assoc_ty_binding(self.tcx(), b.span); + } + ty::TraitRef::new(trait_def_id, substs) + } + + #[tracing::instrument(level = "debug", skip(self, span))] + fn create_substs_for_ast_trait_ref<'a>( + &self, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + trait_segment: &'a hir::PathSegment<'a>, + is_impl: bool, + ) -> (SubstsRef<'tcx>, GenericArgCountResult) { + self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, is_impl); + + self.create_substs_for_ast_path( + span, + trait_def_id, + &[], + trait_segment, + trait_segment.args(), + trait_segment.infer_args, + Some(self_ty), + ) + } + + fn trait_defines_associated_type_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool { + self.tcx() + .associated_items(trait_def_id) + .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, trait_def_id) + .is_some() + } + fn trait_defines_associated_const_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool { + self.tcx() + .associated_items(trait_def_id) + .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Const, trait_def_id) + .is_some() + } + + // Sets `implicitly_sized` to true on `Bounds` if necessary + pub(crate) fn add_implicitly_sized<'hir>( + &self, + bounds: &mut Bounds<'hir>, + ast_bounds: &'hir [hir::GenericBound<'hir>], + self_ty_where_predicates: Option<(hir::HirId, &'hir [hir::WherePredicate<'hir>])>, + span: Span, + ) { + let tcx = self.tcx(); + + // Try to find an unbound in bounds. + let mut unbound = None; + let mut search_bounds = |ast_bounds: &'hir [hir::GenericBound<'hir>]| { + for ab in ast_bounds { + if let hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = ab { + if unbound.is_none() { + unbound = Some(&ptr.trait_ref); + } else { + tcx.sess.emit_err(MultipleRelaxedDefaultBounds { span }); + } + } + } + }; + search_bounds(ast_bounds); + if let Some((self_ty, where_clause)) = self_ty_where_predicates { + let self_ty_def_id = tcx.hir().local_def_id(self_ty).to_def_id(); + for clause in where_clause { + if let hir::WherePredicate::BoundPredicate(pred) = clause { + if pred.is_param_bound(self_ty_def_id) { + search_bounds(pred.bounds); + } + } + } + } + + let sized_def_id = tcx.lang_items().require(LangItem::Sized); + match (&sized_def_id, unbound) { + (Ok(sized_def_id), Some(tpb)) + if tpb.path.res == Res::Def(DefKind::Trait, *sized_def_id) => + { + // There was in fact a `?Sized` bound, return without doing anything + return; + } + (_, Some(_)) => { + // There was a `?Trait` bound, but it was not `?Sized`; warn. + tcx.sess.span_warn( + span, + "default bound relaxed for a type parameter, but \ + this does nothing because the given bound is not \ + a default; only `?Sized` is supported", + ); + // Otherwise, add implicitly sized if `Sized` is available. + } + _ => { + // There was no `?Sized` bound; add implicitly sized if `Sized` is available. + } + } + if sized_def_id.is_err() { + // No lang item for `Sized`, so we can't add it as a bound. + return; + } + bounds.implicitly_sized = Some(span); + } + + /// This helper takes a *converted* parameter type (`param_ty`) + /// and an *unconverted* list of bounds: + /// + /// ```text + /// fn foo + /// ^ ^^^^^ `ast_bounds` parameter, in HIR form + /// | + /// `param_ty`, in ty form + /// ``` + /// + /// It adds these `ast_bounds` into the `bounds` structure. + /// + /// **A note on binders:** there is an implied binder around + /// `param_ty` and `ast_bounds`. See `instantiate_poly_trait_ref` + /// for more details. + #[tracing::instrument(level = "debug", skip(self, ast_bounds, bounds))] + pub(crate) fn add_bounds<'hir, I: Iterator>>( + &self, + param_ty: Ty<'tcx>, + ast_bounds: I, + bounds: &mut Bounds<'tcx>, + bound_vars: &'tcx ty::List, + ) { + for ast_bound in ast_bounds { + match ast_bound { + hir::GenericBound::Trait(poly_trait_ref, modifier) => { + let constness = match modifier { + hir::TraitBoundModifier::MaybeConst => ty::BoundConstness::ConstIfConst, + hir::TraitBoundModifier::None => ty::BoundConstness::NotConst, + hir::TraitBoundModifier::Maybe => continue, + }; + + let _ = self.instantiate_poly_trait_ref( + &poly_trait_ref.trait_ref, + poly_trait_ref.span, + constness, + param_ty, + bounds, + false, + ); + } + &hir::GenericBound::LangItemTrait(lang_item, span, hir_id, args) => { + self.instantiate_lang_item_trait_ref( + lang_item, span, hir_id, args, param_ty, bounds, + ); + } + hir::GenericBound::Outlives(lifetime) => { + let region = self.ast_region_to_region(lifetime, None); + bounds + .region_bounds + .push((ty::Binder::bind_with_vars(region, bound_vars), lifetime.span)); + } + } + } + } + + /// Translates a list of bounds from the HIR into the `Bounds` data structure. + /// The self-type for the bounds is given by `param_ty`. + /// + /// Example: + /// + /// ```ignore (illustrative) + /// fn foo() { } + /// // ^ ^^^^^^^^^ ast_bounds + /// // param_ty + /// ``` + /// + /// The `sized_by_default` parameter indicates if, in this context, the `param_ty` should be + /// considered `Sized` unless there is an explicit `?Sized` bound. This would be true in the + /// example above, but is not true in supertrait listings like `trait Foo: Bar + Baz`. + /// + /// `span` should be the declaration size of the parameter. + pub(crate) fn compute_bounds( + &self, + param_ty: Ty<'tcx>, + ast_bounds: &[hir::GenericBound<'_>], + ) -> Bounds<'tcx> { + self.compute_bounds_inner(param_ty, ast_bounds) + } + + /// Convert the bounds in `ast_bounds` that refer to traits which define an associated type + /// named `assoc_name` into ty::Bounds. Ignore the rest. + pub(crate) fn compute_bounds_that_match_assoc_type( + &self, + param_ty: Ty<'tcx>, + ast_bounds: &[hir::GenericBound<'_>], + assoc_name: Ident, + ) -> Bounds<'tcx> { + let mut result = Vec::new(); + + for ast_bound in ast_bounds { + if let Some(trait_ref) = ast_bound.trait_ref() + && let Some(trait_did) = trait_ref.trait_def_id() + && self.tcx().trait_may_define_assoc_type(trait_did, assoc_name) + { + result.push(ast_bound.clone()); + } + } + + self.compute_bounds_inner(param_ty, &result) + } + + fn compute_bounds_inner( + &self, + param_ty: Ty<'tcx>, + ast_bounds: &[hir::GenericBound<'_>], + ) -> Bounds<'tcx> { + let mut bounds = Bounds::default(); + + self.add_bounds(param_ty, ast_bounds.iter(), &mut bounds, ty::List::empty()); + debug!(?bounds); + + bounds + } + + /// Given an HIR binding like `Item = Foo` or `Item: Foo`, pushes the corresponding predicates + /// onto `bounds`. + /// + /// **A note on binders:** given something like `T: for<'a> Iterator`, the + /// `trait_ref` here will be `for<'a> T: Iterator`. The `binding` data however is from *inside* + /// the binder (e.g., `&'a u32`) and hence may reference bound regions. + #[tracing::instrument( + level = "debug", + skip(self, bounds, speculative, dup_bindings, path_span) + )] + fn add_predicates_for_ast_type_binding( + &self, + hir_ref_id: hir::HirId, + trait_ref: ty::PolyTraitRef<'tcx>, + binding: &ConvertedBinding<'_, 'tcx>, + bounds: &mut Bounds<'tcx>, + speculative: bool, + dup_bindings: &mut FxHashMap, + path_span: Span, + ) -> Result<(), ErrorGuaranteed> { + // Given something like `U: SomeTrait`, we want to produce a + // predicate like `::T = X`. This is somewhat + // subtle in the event that `T` is defined in a supertrait of + // `SomeTrait`, because in that case we need to upcast. + // + // That is, consider this case: + // + // ``` + // trait SubTrait: SuperTrait { } + // trait SuperTrait { type T; } + // + // ... B: SubTrait ... + // ``` + // + // We want to produce `>::T == foo`. + + let tcx = self.tcx(); + + let candidate = + if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) { + // Simple case: X is defined in the current trait. + trait_ref + } else { + // Otherwise, we have to walk through the supertraits to find + // those that do. + self.one_bound_for_assoc_type( + || traits::supertraits(tcx, trait_ref), + || trait_ref.print_only_trait_path().to_string(), + binding.item_name, + path_span, + || match binding.kind { + ConvertedBindingKind::Equality(ty) => Some(ty.to_string()), + _ => None, + }, + )? + }; + + let (assoc_ident, def_scope) = + tcx.adjust_ident_and_get_scope(binding.item_name, candidate.def_id(), hir_ref_id); + + // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead + // of calling `filter_by_name_and_kind`. + let find_item_of_kind = |kind| { + tcx.associated_items(candidate.def_id()) + .filter_by_name_unhygienic(assoc_ident.name) + .find(|i| i.kind == kind && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident) + }; + let assoc_item = find_item_of_kind(ty::AssocKind::Type) + .or_else(|| find_item_of_kind(ty::AssocKind::Const)) + .expect("missing associated type"); + + if !assoc_item.visibility(tcx).is_accessible_from(def_scope, tcx) { + tcx.sess + .struct_span_err( + binding.span, + &format!("{} `{}` is private", assoc_item.kind, binding.item_name), + ) + .span_label(binding.span, &format!("private {}", assoc_item.kind)) + .emit(); + } + tcx.check_stability(assoc_item.def_id, Some(hir_ref_id), binding.span, None); + + if !speculative { + dup_bindings + .entry(assoc_item.def_id) + .and_modify(|prev_span| { + self.tcx().sess.emit_err(ValueOfAssociatedStructAlreadySpecified { + span: binding.span, + prev_span: *prev_span, + item_name: binding.item_name, + def_path: tcx.def_path_str(assoc_item.container_id(tcx)), + }); + }) + .or_insert(binding.span); + } + + // Include substitutions for generic parameters of associated types + let projection_ty = candidate.map_bound(|trait_ref| { + let ident = Ident::new(assoc_item.name, binding.item_name.span); + let item_segment = hir::PathSegment { + ident, + hir_id: Some(binding.hir_id), + res: None, + args: Some(binding.gen_args), + infer_args: false, + }; + + let substs_trait_ref_and_assoc_item = self.create_substs_for_associated_item( + tcx, + path_span, + assoc_item.def_id, + &item_segment, + trait_ref.substs, + ); + + debug!( + "add_predicates_for_ast_type_binding: substs for trait-ref and assoc_item: {:?}", + substs_trait_ref_and_assoc_item + ); + + ty::ProjectionTy { + item_def_id: assoc_item.def_id, + substs: substs_trait_ref_and_assoc_item, + } + }); + + if !speculative { + // Find any late-bound regions declared in `ty` that are not + // declared in the trait-ref or assoc_item. These are not well-formed. + // + // Example: + // + // for<'a> ::Item = &'a str // <-- 'a is bad + // for<'a> >::Output = &'a str // <-- 'a is ok + if let ConvertedBindingKind::Equality(ty) = binding.kind { + let late_bound_in_trait_ref = + tcx.collect_constrained_late_bound_regions(&projection_ty); + let late_bound_in_ty = + tcx.collect_referenced_late_bound_regions(&trait_ref.rebind(ty)); + debug!("late_bound_in_trait_ref = {:?}", late_bound_in_trait_ref); + debug!("late_bound_in_ty = {:?}", late_bound_in_ty); + + // FIXME: point at the type params that don't have appropriate lifetimes: + // struct S1 Fn(&i32, &i32) -> &'a i32>(F); + // ---- ---- ^^^^^^^ + self.validate_late_bound_regions( + late_bound_in_trait_ref, + late_bound_in_ty, + |br_name| { + struct_span_err!( + tcx.sess, + binding.span, + E0582, + "binding for associated type `{}` references {}, \ + which does not appear in the trait input types", + binding.item_name, + br_name + ) + }, + ); + } + } + + match binding.kind { + ConvertedBindingKind::Equality(mut term) => { + // "Desugar" a constraint like `T: Iterator` this to + // the "projection predicate" for: + // + // `::Item = u32` + let assoc_item_def_id = projection_ty.skip_binder().item_def_id; + let def_kind = tcx.def_kind(assoc_item_def_id); + match (def_kind, term) { + (hir::def::DefKind::AssocTy, ty::Term::Ty(_)) + | (hir::def::DefKind::AssocConst, ty::Term::Const(_)) => (), + (_, _) => { + let got = if let ty::Term::Ty(_) = term { "type" } else { "constant" }; + let expected = def_kind.descr(assoc_item_def_id); + tcx.sess + .struct_span_err( + binding.span, + &format!("expected {expected} bound, found {got}"), + ) + .span_note( + tcx.def_span(assoc_item_def_id), + &format!("{expected} defined here"), + ) + .emit(); + term = match def_kind { + hir::def::DefKind::AssocTy => tcx.ty_error().into(), + hir::def::DefKind::AssocConst => tcx + .const_error( + tcx.bound_type_of(assoc_item_def_id) + .subst(tcx, projection_ty.skip_binder().substs), + ) + .into(), + _ => unreachable!(), + }; + } + } + bounds.projection_bounds.push(( + projection_ty.map_bound(|projection_ty| ty::ProjectionPredicate { + projection_ty, + term: term, + }), + binding.span, + )); + } + ConvertedBindingKind::Constraint(ast_bounds) => { + // "Desugar" a constraint like `T: Iterator` to + // + // `::Item: Debug` + // + // Calling `skip_binder` is okay, because `add_bounds` expects the `param_ty` + // parameter to have a skipped binder. + let param_ty = tcx.mk_ty(ty::Projection(projection_ty.skip_binder())); + self.add_bounds(param_ty, ast_bounds.iter(), bounds, candidate.bound_vars()); + } + } + Ok(()) + } + + fn ast_path_to_ty( + &self, + span: Span, + did: DefId, + item_segment: &hir::PathSegment<'_>, + ) -> Ty<'tcx> { + let substs = self.ast_path_substs_for_ty(span, did, item_segment); + self.normalize_ty( + span, + EarlyBinder(self.tcx().at(span).type_of(did)).subst(self.tcx(), substs), + ) + } + + fn conv_object_ty_poly_trait_ref( + &self, + span: Span, + trait_bounds: &[hir::PolyTraitRef<'_>], + lifetime: &hir::Lifetime, + borrowed: bool, + ) -> Ty<'tcx> { + let tcx = self.tcx(); + + let mut bounds = Bounds::default(); + let mut potential_assoc_types = Vec::new(); + let dummy_self = self.tcx().types.trait_object_dummy_self; + for trait_bound in trait_bounds.iter().rev() { + if let GenericArgCountResult { + correct: + Err(GenericArgCountMismatch { invalid_args: cur_potential_assoc_types, .. }), + .. + } = self.instantiate_poly_trait_ref( + &trait_bound.trait_ref, + trait_bound.span, + ty::BoundConstness::NotConst, + dummy_self, + &mut bounds, + false, + ) { + potential_assoc_types.extend(cur_potential_assoc_types); + } + } + + // Expand trait aliases recursively and check that only one regular (non-auto) trait + // is used and no 'maybe' bounds are used. + let expanded_traits = + traits::expand_trait_aliases(tcx, bounds.trait_bounds.iter().map(|&(a, b, _)| (a, b))); + let (mut auto_traits, regular_traits): (Vec<_>, Vec<_>) = expanded_traits + .filter(|i| i.trait_ref().self_ty().skip_binder() == dummy_self) + .partition(|i| tcx.trait_is_auto(i.trait_ref().def_id())); + if regular_traits.len() > 1 { + let first_trait = ®ular_traits[0]; + let additional_trait = ®ular_traits[1]; + let mut err = struct_span_err!( + tcx.sess, + additional_trait.bottom().1, + E0225, + "only auto traits can be used as additional traits in a trait object" + ); + additional_trait.label_with_exp_info( + &mut err, + "additional non-auto trait", + "additional use", + ); + first_trait.label_with_exp_info(&mut err, "first non-auto trait", "first use"); + err.help(&format!( + "consider creating a new trait with all of these as supertraits and using that \ + trait here instead: `trait NewTrait: {} {{}}`", + regular_traits + .iter() + .map(|t| t.trait_ref().print_only_trait_path().to_string()) + .collect::>() + .join(" + "), + )); + err.note( + "auto-traits like `Send` and `Sync` are traits that have special properties; \ + for more information on them, visit \ + ", + ); + err.emit(); + } + + if regular_traits.is_empty() && auto_traits.is_empty() { + let trait_alias_span = bounds + .trait_bounds + .iter() + .map(|&(trait_ref, _, _)| trait_ref.def_id()) + .find(|&trait_ref| tcx.is_trait_alias(trait_ref)) + .map(|trait_ref| tcx.def_span(trait_ref)); + tcx.sess.emit_err(TraitObjectDeclaredWithNoTraits { span, trait_alias_span }); + return tcx.ty_error(); + } + + // Check that there are no gross object safety violations; + // most importantly, that the supertraits don't contain `Self`, + // to avoid ICEs. + for item in ®ular_traits { + let object_safety_violations = + astconv_object_safety_violations(tcx, item.trait_ref().def_id()); + if !object_safety_violations.is_empty() { + report_object_safety_error( + tcx, + span, + item.trait_ref().def_id(), + &object_safety_violations, + ) + .emit(); + return tcx.ty_error(); + } + } + + // Use a `BTreeSet` to keep output in a more consistent order. + let mut associated_types: FxHashMap> = FxHashMap::default(); + + let regular_traits_refs_spans = bounds + .trait_bounds + .into_iter() + .filter(|(trait_ref, _, _)| !tcx.trait_is_auto(trait_ref.def_id())); + + for (base_trait_ref, span, constness) in regular_traits_refs_spans { + assert_eq!(constness, ty::BoundConstness::NotConst); + + for obligation in traits::elaborate_trait_ref(tcx, base_trait_ref) { + debug!( + "conv_object_ty_poly_trait_ref: observing object predicate `{:?}`", + obligation.predicate + ); + + let bound_predicate = obligation.predicate.kind(); + match bound_predicate.skip_binder() { + ty::PredicateKind::Trait(pred) => { + let pred = bound_predicate.rebind(pred); + associated_types.entry(span).or_default().extend( + tcx.associated_items(pred.def_id()) + .in_definition_order() + .filter(|item| item.kind == ty::AssocKind::Type) + .map(|item| item.def_id), + ); + } + ty::PredicateKind::Projection(pred) => { + let pred = bound_predicate.rebind(pred); + // A `Self` within the original bound will be substituted with a + // `trait_object_dummy_self`, so check for that. + let references_self = match pred.skip_binder().term { + ty::Term::Ty(ty) => ty.walk().any(|arg| arg == dummy_self.into()), + ty::Term::Const(c) => c.ty().walk().any(|arg| arg == dummy_self.into()), + }; + + // If the projection output contains `Self`, force the user to + // elaborate it explicitly to avoid a lot of complexity. + // + // The "classically useful" case is the following: + // ``` + // trait MyTrait: FnMut() -> ::MyOutput { + // type MyOutput; + // } + // ``` + // + // Here, the user could theoretically write `dyn MyTrait`, + // but actually supporting that would "expand" to an infinitely-long type + // `fix $ τ → dyn MyTrait::MyOutput`. + // + // Instead, we force the user to write + // `dyn MyTrait`, which is uglier but works. See + // the discussion in #56288 for alternatives. + if !references_self { + // Include projections defined on supertraits. + bounds.projection_bounds.push((pred, span)); + } + } + _ => (), + } + } + } + + for (projection_bound, _) in &bounds.projection_bounds { + for def_ids in associated_types.values_mut() { + def_ids.remove(&projection_bound.projection_def_id()); + } + } + + self.complain_about_missing_associated_types( + associated_types, + potential_assoc_types, + trait_bounds, + ); + + // De-duplicate auto traits so that, e.g., `dyn Trait + Send + Send` is the same as + // `dyn Trait + Send`. + // We remove duplicates by inserting into a `FxHashSet` to avoid re-ordering + // the bounds + let mut duplicates = FxHashSet::default(); + auto_traits.retain(|i| duplicates.insert(i.trait_ref().def_id())); + debug!("regular_traits: {:?}", regular_traits); + debug!("auto_traits: {:?}", auto_traits); + + // Erase the `dummy_self` (`trait_object_dummy_self`) used above. + let existential_trait_refs = regular_traits.iter().map(|i| { + i.trait_ref().map_bound(|trait_ref: ty::TraitRef<'tcx>| { + if trait_ref.self_ty() != dummy_self { + // FIXME: There appears to be a missing filter on top of `expand_trait_aliases`, + // which picks up non-supertraits where clauses - but also, the object safety + // completely ignores trait aliases, which could be object safety hazards. We + // `delay_span_bug` here to avoid an ICE in stable even when the feature is + // disabled. (#66420) + tcx.sess.delay_span_bug( + DUMMY_SP, + &format!( + "trait_ref_to_existential called on {:?} with non-dummy Self", + trait_ref, + ), + ); + } + ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref) + }) + }); + let existential_projections = bounds.projection_bounds.iter().map(|(bound, _)| { + bound.map_bound(|b| { + if b.projection_ty.self_ty() != dummy_self { + tcx.sess.delay_span_bug( + DUMMY_SP, + &format!("trait_ref_to_existential called on {:?} with non-dummy Self", b), + ); + } + ty::ExistentialProjection::erase_self_ty(tcx, b) + }) + }); + + let regular_trait_predicates = existential_trait_refs + .map(|trait_ref| trait_ref.map_bound(ty::ExistentialPredicate::Trait)); + let auto_trait_predicates = auto_traits.into_iter().map(|trait_ref| { + ty::Binder::dummy(ty::ExistentialPredicate::AutoTrait(trait_ref.trait_ref().def_id())) + }); + // N.b. principal, projections, auto traits + // FIXME: This is actually wrong with multiple principals in regards to symbol mangling + let mut v = regular_trait_predicates + .chain( + existential_projections.map(|x| x.map_bound(ty::ExistentialPredicate::Projection)), + ) + .chain(auto_trait_predicates) + .collect::>(); + v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder())); + v.dedup(); + let existential_predicates = tcx.mk_poly_existential_predicates(v.into_iter()); + + // Use explicitly-specified region bound. + let region_bound = if !lifetime.is_elided() { + self.ast_region_to_region(lifetime, None) + } else { + self.compute_object_lifetime_bound(span, existential_predicates).unwrap_or_else(|| { + if tcx.named_region(lifetime.hir_id).is_some() { + self.ast_region_to_region(lifetime, None) + } else { + self.re_infer(None, span).unwrap_or_else(|| { + let mut err = struct_span_err!( + tcx.sess, + span, + E0228, + "the lifetime bound for this object type cannot be deduced \ + from context; please supply an explicit bound" + ); + if borrowed { + // We will have already emitted an error E0106 complaining about a + // missing named lifetime in `&dyn Trait`, so we elide this one. + err.delay_as_bug(); + } else { + err.emit(); + } + tcx.lifetimes.re_static + }) + } + }) + }; + debug!("region_bound: {:?}", region_bound); + + let ty = tcx.mk_dynamic(existential_predicates, region_bound); + debug!("trait_object_type: {:?}", ty); + ty + } + + fn report_ambiguous_associated_type( + &self, + span: Span, + type_str: &str, + trait_str: &str, + name: Symbol, + ) -> ErrorGuaranteed { + let mut err = struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type"); + if self + .tcx() + .resolutions(()) + .confused_type_with_std_module + .keys() + .any(|full_span| full_span.contains(span)) + { + err.span_suggestion( + span.shrink_to_lo(), + "you are looking for the module in `std`, not the primitive type", + "std::", + Applicability::MachineApplicable, + ); + } else { + err.span_suggestion( + span, + "use fully-qualified syntax", + format!("<{} as {}>::{}", type_str, trait_str, name), + Applicability::HasPlaceholders, + ); + } + err.emit() + } + + // Search for a bound on a type parameter which includes the associated item + // given by `assoc_name`. `ty_param_def_id` is the `DefId` of the type parameter + // This function will fail if there are no suitable bounds or there is + // any ambiguity. + fn find_bound_for_assoc_item( + &self, + ty_param_def_id: LocalDefId, + assoc_name: Ident, + span: Span, + ) -> Result, ErrorGuaranteed> { + let tcx = self.tcx(); + + debug!( + "find_bound_for_assoc_item(ty_param_def_id={:?}, assoc_name={:?}, span={:?})", + ty_param_def_id, assoc_name, span, + ); + + let predicates = &self + .get_type_parameter_bounds(span, ty_param_def_id.to_def_id(), assoc_name) + .predicates; + + debug!("find_bound_for_assoc_item: predicates={:#?}", predicates); + + let param_name = tcx.hir().ty_param_name(ty_param_def_id); + self.one_bound_for_assoc_type( + || { + traits::transitive_bounds_that_define_assoc_type( + tcx, + predicates.iter().filter_map(|(p, _)| { + Some(p.to_opt_poly_trait_pred()?.map_bound(|t| t.trait_ref)) + }), + assoc_name, + ) + }, + || param_name.to_string(), + assoc_name, + span, + || None, + ) + } + + // Checks that `bounds` contains exactly one element and reports appropriate + // errors otherwise. + fn one_bound_for_assoc_type( + &self, + all_candidates: impl Fn() -> I, + ty_param_name: impl Fn() -> String, + assoc_name: Ident, + span: Span, + is_equality: impl Fn() -> Option, + ) -> Result, ErrorGuaranteed> + where + I: Iterator>, + { + let mut matching_candidates = all_candidates() + .filter(|r| self.trait_defines_associated_type_named(r.def_id(), assoc_name)); + let mut const_candidates = all_candidates() + .filter(|r| self.trait_defines_associated_const_named(r.def_id(), assoc_name)); + + let (bound, next_cand) = match (matching_candidates.next(), const_candidates.next()) { + (Some(bound), _) => (bound, matching_candidates.next()), + (None, Some(bound)) => (bound, const_candidates.next()), + (None, None) => { + let reported = self.complain_about_assoc_type_not_found( + all_candidates, + &ty_param_name(), + assoc_name, + span, + ); + return Err(reported); + } + }; + debug!("one_bound_for_assoc_type: bound = {:?}", bound); + + if let Some(bound2) = next_cand { + debug!("one_bound_for_assoc_type: bound2 = {:?}", bound2); + + let is_equality = is_equality(); + let bounds = IntoIterator::into_iter([bound, bound2]).chain(matching_candidates); + let mut err = if is_equality.is_some() { + // More specific Error Index entry. + struct_span_err!( + self.tcx().sess, + span, + E0222, + "ambiguous associated type `{}` in bounds of `{}`", + assoc_name, + ty_param_name() + ) + } else { + struct_span_err!( + self.tcx().sess, + span, + E0221, + "ambiguous associated type `{}` in bounds of `{}`", + assoc_name, + ty_param_name() + ) + }; + err.span_label(span, format!("ambiguous associated type `{}`", assoc_name)); + + let mut where_bounds = vec![]; + for bound in bounds { + let bound_id = bound.def_id(); + let bound_span = self + .tcx() + .associated_items(bound_id) + .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, bound_id) + .and_then(|item| self.tcx().hir().span_if_local(item.def_id)); + + if let Some(bound_span) = bound_span { + err.span_label( + bound_span, + format!( + "ambiguous `{}` from `{}`", + assoc_name, + bound.print_only_trait_path(), + ), + ); + if let Some(constraint) = &is_equality { + where_bounds.push(format!( + " T: {trait}::{assoc} = {constraint}", + trait=bound.print_only_trait_path(), + assoc=assoc_name, + constraint=constraint, + )); + } else { + err.span_suggestion_verbose( + span.with_hi(assoc_name.span.lo()), + "use fully qualified syntax to disambiguate", + format!( + "<{} as {}>::", + ty_param_name(), + bound.print_only_trait_path(), + ), + Applicability::MaybeIncorrect, + ); + } + } else { + err.note(&format!( + "associated type `{}` could derive from `{}`", + ty_param_name(), + bound.print_only_trait_path(), + )); + } + } + if !where_bounds.is_empty() { + err.help(&format!( + "consider introducing a new type parameter `T` and adding `where` constraints:\ + \n where\n T: {},\n{}", + ty_param_name(), + where_bounds.join(",\n"), + )); + } + let reported = err.emit(); + if !where_bounds.is_empty() { + return Err(reported); + } + } + + Ok(bound) + } + + // Create a type from a path to an associated type. + // For a path `A::B::C::D`, `qself_ty` and `qself_def` are the type and def for `A::B::C` + // and item_segment is the path segment for `D`. We return a type and a def for + // the whole path. + // Will fail except for `T::A` and `Self::A`; i.e., if `qself_ty`/`qself_def` are not a type + // parameter or `Self`. + // NOTE: When this function starts resolving `Trait::AssocTy` successfully + // it should also start reporting the `BARE_TRAIT_OBJECTS` lint. + pub fn associated_path_to_ty( + &self, + hir_ref_id: hir::HirId, + span: Span, + qself_ty: Ty<'tcx>, + qself: &hir::Ty<'_>, + assoc_segment: &hir::PathSegment<'_>, + permit_variants: bool, + ) -> Result<(Ty<'tcx>, DefKind, DefId), ErrorGuaranteed> { + let tcx = self.tcx(); + let assoc_ident = assoc_segment.ident; + let qself_res = if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = qself.kind { + path.res + } else { + Res::Err + }; + + debug!("associated_path_to_ty: {:?}::{}", qself_ty, assoc_ident); + + // Check if we have an enum variant. + let mut variant_resolution = None; + if let ty::Adt(adt_def, _) = qself_ty.kind() { + if adt_def.is_enum() { + let variant_def = adt_def + .variants() + .iter() + .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did())); + if let Some(variant_def) = variant_def { + if permit_variants { + tcx.check_stability(variant_def.def_id, Some(hir_ref_id), span, None); + self.prohibit_generics(slice::from_ref(assoc_segment).iter(), |err| { + err.note("enum variants can't have type parameters"); + let type_name = tcx.item_name(adt_def.did()); + let msg = format!( + "you might have meant to specity type parameters on enum \ + `{type_name}`" + ); + let Some(args) = assoc_segment.args else { return; }; + // Get the span of the generics args *including* the leading `::`. + let args_span = assoc_segment.ident.span.shrink_to_hi().to(args.span_ext); + if tcx.generics_of(adt_def.did()).count() == 0 { + // FIXME(estebank): we could also verify that the arguments being + // work for the `enum`, instead of just looking if it takes *any*. + err.span_suggestion_verbose( + args_span, + &format!("{type_name} doesn't have generic parameters"), + "", + Applicability::MachineApplicable, + ); + return; + } + let Ok(snippet) = tcx.sess.source_map().span_to_snippet(args_span) else { + err.note(&msg); + return; + }; + let (qself_sugg_span, is_self) = if let hir::TyKind::Path( + hir::QPath::Resolved(_, ref path) + ) = qself.kind { + // If the path segment already has type params, we want to overwrite + // them. + match &path.segments[..] { + // `segment` is the previous to last element on the path, + // which would normally be the `enum` itself, while the last + // `_` `PathSegment` corresponds to the variant. + [.., hir::PathSegment { + ident, + args, + res: Some(Res::Def(DefKind::Enum, _)), + .. + }, _] => ( + // We need to include the `::` in `Type::Variant::` + // to point the span to `::`, not just ``. + ident.span.shrink_to_hi().to(args.map_or( + ident.span.shrink_to_hi(), + |a| a.span_ext)), + false, + ), + [segment] => ( + // We need to include the `::` in `Type::Variant::` + // to point the span to `::`, not just ``. + segment.ident.span.shrink_to_hi().to(segment.args.map_or( + segment.ident.span.shrink_to_hi(), + |a| a.span_ext)), + kw::SelfUpper == segment.ident.name, + ), + _ => { + err.note(&msg); + return; + } + } + } else { + err.note(&msg); + return; + }; + let suggestion = vec![ + if is_self { + // Account for people writing `Self::Variant::`, where + // `Self` is the enum, and suggest replacing `Self` with the + // appropriate type: `Type::::Variant`. + (qself.span, format!("{type_name}{snippet}")) + } else { + (qself_sugg_span, snippet) + }, + (args_span, String::new()), + ]; + err.multipart_suggestion_verbose( + &msg, + suggestion, + Applicability::MaybeIncorrect, + ); + }); + return Ok((qself_ty, DefKind::Variant, variant_def.def_id)); + } else { + variant_resolution = Some(variant_def.def_id); + } + } + } + } + + // Find the type of the associated item, and the trait where the associated + // item is declared. + let bound = match (&qself_ty.kind(), qself_res) { + (_, Res::SelfTy { trait_: Some(_), alias_to: Some((impl_def_id, _)) }) => { + // `Self` in an impl of a trait -- we have a concrete self type and a + // trait reference. + let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) else { + // A cycle error occurred, most likely. + let guar = tcx.sess.delay_span_bug(span, "expected cycle error"); + return Err(guar); + }; + + self.one_bound_for_assoc_type( + || traits::supertraits(tcx, ty::Binder::dummy(trait_ref)), + || "Self".to_string(), + assoc_ident, + span, + || None, + )? + } + ( + &ty::Param(_), + Res::SelfTy { trait_: Some(param_did), alias_to: None } + | Res::Def(DefKind::TyParam, param_did), + ) => self.find_bound_for_assoc_item(param_did.expect_local(), assoc_ident, span)?, + _ => { + let reported = if variant_resolution.is_some() { + // Variant in type position + let msg = format!("expected type, found variant `{}`", assoc_ident); + tcx.sess.span_err(span, &msg) + } else if qself_ty.is_enum() { + let mut err = struct_span_err!( + tcx.sess, + assoc_ident.span, + E0599, + "no variant named `{}` found for enum `{}`", + assoc_ident, + qself_ty, + ); + + let adt_def = qself_ty.ty_adt_def().expect("enum is not an ADT"); + if let Some(suggested_name) = find_best_match_for_name( + &adt_def + .variants() + .iter() + .map(|variant| variant.name) + .collect::>(), + assoc_ident.name, + None, + ) { + err.span_suggestion( + assoc_ident.span, + "there is a variant with a similar name", + suggested_name, + Applicability::MaybeIncorrect, + ); + } else { + err.span_label( + assoc_ident.span, + format!("variant not found in `{}`", qself_ty), + ); + } + + if let Some(sp) = tcx.hir().span_if_local(adt_def.did()) { + err.span_label(sp, format!("variant `{}` not found here", assoc_ident)); + } + + err.emit() + } else if let Some(reported) = qself_ty.error_reported() { + reported + } else { + // Don't print `TyErr` to the user. + self.report_ambiguous_associated_type( + span, + &qself_ty.to_string(), + "Trait", + assoc_ident.name, + ) + }; + return Err(reported); + } + }; + + let trait_did = bound.def_id(); + let (assoc_ident, def_scope) = + tcx.adjust_ident_and_get_scope(assoc_ident, trait_did, hir_ref_id); + + // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead + // of calling `filter_by_name_and_kind`. + let item = tcx.associated_items(trait_did).in_definition_order().find(|i| { + i.kind.namespace() == Namespace::TypeNS + && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident + }); + // Assume that if it's not matched, there must be a const defined with the same name + // but it was used in a type position. + let Some(item) = item else { + let msg = format!("found associated const `{assoc_ident}` when type was expected"); + let guar = tcx.sess.struct_span_err(span, &msg).emit(); + return Err(guar); + }; + + let ty = self.projected_ty_from_poly_trait_ref(span, item.def_id, assoc_segment, bound); + let ty = self.normalize_ty(span, ty); + + let kind = DefKind::AssocTy; + if !item.visibility(tcx).is_accessible_from(def_scope, tcx) { + let kind = kind.descr(item.def_id); + let msg = format!("{} `{}` is private", kind, assoc_ident); + tcx.sess + .struct_span_err(span, &msg) + .span_label(span, &format!("private {}", kind)) + .emit(); + } + tcx.check_stability(item.def_id, Some(hir_ref_id), span, None); + + if let Some(variant_def_id) = variant_resolution { + tcx.struct_span_lint_hir(AMBIGUOUS_ASSOCIATED_ITEMS, hir_ref_id, span, |lint| { + let mut err = lint.build("ambiguous associated item"); + let mut could_refer_to = |kind: DefKind, def_id, also| { + let note_msg = format!( + "`{}` could{} refer to the {} defined here", + assoc_ident, + also, + kind.descr(def_id) + ); + err.span_note(tcx.def_span(def_id), ¬e_msg); + }; + + could_refer_to(DefKind::Variant, variant_def_id, ""); + could_refer_to(kind, item.def_id, " also"); + + err.span_suggestion( + span, + "use fully-qualified syntax", + format!("<{} as {}>::{}", qself_ty, tcx.item_name(trait_did), assoc_ident), + Applicability::MachineApplicable, + ); + + err.emit(); + }); + } + Ok((ty, kind, item.def_id)) + } + + fn qpath_to_ty( + &self, + span: Span, + opt_self_ty: Option>, + item_def_id: DefId, + trait_segment: &hir::PathSegment<'_>, + item_segment: &hir::PathSegment<'_>, + ) -> Ty<'tcx> { + let tcx = self.tcx(); + + let trait_def_id = tcx.parent(item_def_id); + + debug!("qpath_to_ty: trait_def_id={:?}", trait_def_id); + + let Some(self_ty) = opt_self_ty else { + let path_str = tcx.def_path_str(trait_def_id); + + let def_id = self.item_def_id(); + + debug!("qpath_to_ty: self.item_def_id()={:?}", def_id); + + let parent_def_id = def_id + .and_then(|def_id| { + def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id)) + }) + .map(|hir_id| tcx.hir().get_parent_item(hir_id).to_def_id()); + + debug!("qpath_to_ty: parent_def_id={:?}", parent_def_id); + + // If the trait in segment is the same as the trait defining the item, + // use the `` syntax in the error. + let is_part_of_self_trait_constraints = def_id == Some(trait_def_id); + let is_part_of_fn_in_self_trait = parent_def_id == Some(trait_def_id); + + let type_name = if is_part_of_self_trait_constraints || is_part_of_fn_in_self_trait { + "Self" + } else { + "Type" + }; + + self.report_ambiguous_associated_type( + span, + type_name, + &path_str, + item_segment.ident.name, + ); + return tcx.ty_error(); + }; + + debug!("qpath_to_ty: self_type={:?}", self_ty); + + let trait_ref = + self.ast_path_to_mono_trait_ref(span, trait_def_id, self_ty, trait_segment, false); + + let item_substs = self.create_substs_for_associated_item( + tcx, + span, + item_def_id, + item_segment, + trait_ref.substs, + ); + + debug!("qpath_to_ty: trait_ref={:?}", trait_ref); + + self.normalize_ty(span, tcx.mk_projection(item_def_id, item_substs)) + } + + pub fn prohibit_generics<'a>( + &self, + segments: impl Iterator> + Clone, + extend: impl Fn(&mut DiagnosticBuilder<'tcx, ErrorGuaranteed>), + ) -> bool { + let args = segments.clone().flat_map(|segment| segment.args().args); + + let (lt, ty, ct, inf) = + args.clone().fold((false, false, false, false), |(lt, ty, ct, inf), arg| match arg { + hir::GenericArg::Lifetime(_) => (true, ty, ct, inf), + hir::GenericArg::Type(_) => (lt, true, ct, inf), + hir::GenericArg::Const(_) => (lt, ty, true, inf), + hir::GenericArg::Infer(_) => (lt, ty, ct, true), + }); + let mut emitted = false; + if lt || ty || ct || inf { + let types_and_spans: Vec<_> = segments + .clone() + .flat_map(|segment| { + segment.res.and_then(|res| { + if segment.args().args.is_empty() { + None + } else { + Some(( + match res { + Res::PrimTy(ty) => format!("{} `{}`", res.descr(), ty.name()), + Res::Def(_, def_id) + if let Some(name) = self.tcx().opt_item_name(def_id) => { + format!("{} `{name}`", res.descr()) + } + Res::Err => "this type".to_string(), + _ => res.descr().to_string(), + }, + segment.ident.span, + )) + } + }) + }) + .collect(); + let this_type = match &types_and_spans[..] { + [.., _, (last, _)] => format!( + "{} and {last}", + types_and_spans[..types_and_spans.len() - 1] + .iter() + .map(|(x, _)| x.as_str()) + .intersperse(&", ") + .collect::() + ), + [(only, _)] => only.to_string(), + [] => "this type".to_string(), + }; + + let arg_spans: Vec = args.map(|arg| arg.span()).collect(); + + let mut kinds = Vec::with_capacity(4); + if lt { + kinds.push("lifetime"); + } + if ty { + kinds.push("type"); + } + if ct { + kinds.push("const"); + } + if inf { + kinds.push("generic"); + } + let (kind, s) = match kinds[..] { + [.., _, last] => ( + format!( + "{} and {last}", + kinds[..kinds.len() - 1] + .iter() + .map(|&x| x) + .intersperse(", ") + .collect::() + ), + "s", + ), + [only] => (format!("{only}"), ""), + [] => unreachable!(), + }; + let last_span = *arg_spans.last().unwrap(); + let span: MultiSpan = arg_spans.into(); + let mut err = struct_span_err!( + self.tcx().sess, + span, + E0109, + "{kind} arguments are not allowed on {this_type}", + ); + err.span_label(last_span, format!("{kind} argument{s} not allowed")); + for (what, span) in types_and_spans { + err.span_label(span, format!("not allowed on {what}")); + } + extend(&mut err); + err.emit(); + emitted = true; + } + + for segment in segments { + // Only emit the first error to avoid overloading the user with error messages. + if let [binding, ..] = segment.args().bindings { + Self::prohibit_assoc_ty_binding(self.tcx(), binding.span); + return true; + } + } + emitted + } + + // FIXME(eddyb, varkor) handle type paths here too, not just value ones. + pub fn def_ids_for_value_path_segments( + &self, + segments: &[hir::PathSegment<'_>], + self_ty: Option>, + kind: DefKind, + def_id: DefId, + ) -> Vec { + // We need to extract the type parameters supplied by the user in + // the path `path`. Due to the current setup, this is a bit of a + // tricky-process; the problem is that resolve only tells us the + // end-point of the path resolution, and not the intermediate steps. + // Luckily, we can (at least for now) deduce the intermediate steps + // just from the end-point. + // + // There are basically five cases to consider: + // + // 1. Reference to a constructor of a struct: + // + // struct Foo(...) + // + // In this case, the parameters are declared in the type space. + // + // 2. Reference to a constructor of an enum variant: + // + // enum E { Foo(...) } + // + // In this case, the parameters are defined in the type space, + // but may be specified either on the type or the variant. + // + // 3. Reference to a fn item or a free constant: + // + // fn foo() { } + // + // In this case, the path will again always have the form + // `a::b::foo::` where only the final segment should have + // type parameters. However, in this case, those parameters are + // declared on a value, and hence are in the `FnSpace`. + // + // 4. Reference to a method or an associated constant: + // + // impl SomeStruct { + // fn foo(...) + // } + // + // Here we can have a path like + // `a::b::SomeStruct::::foo::`, in which case parameters + // may appear in two places. The penultimate segment, + // `SomeStruct::`, contains parameters in TypeSpace, and the + // final segment, `foo::` contains parameters in fn space. + // + // The first step then is to categorize the segments appropriately. + + let tcx = self.tcx(); + + assert!(!segments.is_empty()); + let last = segments.len() - 1; + + let mut path_segs = vec![]; + + match kind { + // Case 1. Reference to a struct constructor. + DefKind::Ctor(CtorOf::Struct, ..) => { + // Everything but the final segment should have no + // parameters at all. + let generics = tcx.generics_of(def_id); + // Variant and struct constructors use the + // generics of their parent type definition. + let generics_def_id = generics.parent.unwrap_or(def_id); + path_segs.push(PathSeg(generics_def_id, last)); + } + + // Case 2. Reference to a variant constructor. + DefKind::Ctor(CtorOf::Variant, ..) | DefKind::Variant => { + let adt_def = self_ty.map(|t| t.ty_adt_def().unwrap()); + let (generics_def_id, index) = if let Some(adt_def) = adt_def { + debug_assert!(adt_def.is_enum()); + (adt_def.did(), last) + } else if last >= 1 && segments[last - 1].args.is_some() { + // Everything but the penultimate segment should have no + // parameters at all. + let mut def_id = def_id; + + // `DefKind::Ctor` -> `DefKind::Variant` + if let DefKind::Ctor(..) = kind { + def_id = tcx.parent(def_id); + } + + // `DefKind::Variant` -> `DefKind::Enum` + let enum_def_id = tcx.parent(def_id); + (enum_def_id, last - 1) + } else { + // FIXME: lint here recommending `Enum::<...>::Variant` form + // instead of `Enum::Variant::<...>` form. + + // Everything but the final segment should have no + // parameters at all. + let generics = tcx.generics_of(def_id); + // Variant and struct constructors use the + // generics of their parent type definition. + (generics.parent.unwrap_or(def_id), last) + }; + path_segs.push(PathSeg(generics_def_id, index)); + } + + // Case 3. Reference to a top-level value. + DefKind::Fn | DefKind::Const | DefKind::ConstParam | DefKind::Static(_) => { + path_segs.push(PathSeg(def_id, last)); + } + + // Case 4. Reference to a method or associated const. + DefKind::AssocFn | DefKind::AssocConst => { + if segments.len() >= 2 { + let generics = tcx.generics_of(def_id); + path_segs.push(PathSeg(generics.parent.unwrap(), last - 1)); + } + path_segs.push(PathSeg(def_id, last)); + } + + kind => bug!("unexpected definition kind {:?} for {:?}", kind, def_id), + } + + debug!("path_segs = {:?}", path_segs); + + path_segs + } + + // Check a type `Path` and convert it to a `Ty`. + pub fn res_to_ty( + &self, + opt_self_ty: Option>, + path: &hir::Path<'_>, + permit_variants: bool, + ) -> Ty<'tcx> { + let tcx = self.tcx(); + + debug!( + "res_to_ty(res={:?}, opt_self_ty={:?}, path_segments={:?})", + path.res, opt_self_ty, path.segments + ); + + let span = path.span; + match path.res { + Res::Def(DefKind::OpaqueTy, did) => { + // Check for desugared `impl Trait`. + assert!(ty::is_impl_trait_defn(tcx, did).is_none()); + let item_segment = path.segments.split_last().unwrap(); + self.prohibit_generics(item_segment.1.iter(), |err| { + err.note("`impl Trait` types can't have type parameters"); + }); + let substs = self.ast_path_substs_for_ty(span, did, item_segment.0); + self.normalize_ty(span, tcx.mk_opaque(did, substs)) + } + Res::Def( + DefKind::Enum + | DefKind::TyAlias + | DefKind::Struct + | DefKind::Union + | DefKind::ForeignTy, + did, + ) => { + assert_eq!(opt_self_ty, None); + self.prohibit_generics(path.segments.split_last().unwrap().1.iter(), |_| {}); + self.ast_path_to_ty(span, did, path.segments.last().unwrap()) + } + Res::Def(kind @ DefKind::Variant, def_id) if permit_variants => { + // Convert "variant type" as if it were a real type. + // The resulting `Ty` is type of the variant's enum for now. + assert_eq!(opt_self_ty, None); + + let path_segs = + self.def_ids_for_value_path_segments(path.segments, None, kind, def_id); + let generic_segs: FxHashSet<_> = + path_segs.iter().map(|PathSeg(_, index)| index).collect(); + self.prohibit_generics( + path.segments.iter().enumerate().filter_map(|(index, seg)| { + if !generic_segs.contains(&index) { Some(seg) } else { None } + }), + |err| { + err.note("enum variants can't have type parameters"); + }, + ); + + let PathSeg(def_id, index) = path_segs.last().unwrap(); + self.ast_path_to_ty(span, *def_id, &path.segments[*index]) + } + Res::Def(DefKind::TyParam, def_id) => { + assert_eq!(opt_self_ty, None); + self.prohibit_generics(path.segments.iter(), |err| { + if let Some(span) = tcx.def_ident_span(def_id) { + let name = tcx.item_name(def_id); + err.span_note(span, &format!("type parameter `{name}` defined here")); + } + }); + + let def_id = def_id.expect_local(); + let item_def_id = tcx.hir().ty_param_owner(def_id); + let generics = tcx.generics_of(item_def_id); + let index = generics.param_def_id_to_index[&def_id.to_def_id()]; + tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id)) + } + Res::SelfTy { trait_: Some(_), alias_to: None } => { + // `Self` in trait or type alias. + assert_eq!(opt_self_ty, None); + self.prohibit_generics(path.segments.iter(), |err| { + if let [hir::PathSegment { args: Some(args), ident, .. }] = &path.segments[..] { + err.span_suggestion_verbose( + ident.span.shrink_to_hi().to(args.span_ext), + "the `Self` type doesn't accept type parameters", + "", + Applicability::MaybeIncorrect, + ); + } + }); + tcx.types.self_param + } + Res::SelfTy { trait_: _, alias_to: Some((def_id, forbid_generic)) } => { + // `Self` in impl (we know the concrete type). + assert_eq!(opt_self_ty, None); + // Try to evaluate any array length constants. + let ty = tcx.at(span).type_of(def_id); + let span_of_impl = tcx.span_of_impl(def_id); + self.prohibit_generics(path.segments.iter(), |err| { + let def_id = match *ty.kind() { + ty::Adt(self_def, _) => self_def.did(), + _ => return, + }; + + let type_name = tcx.item_name(def_id); + let span_of_ty = tcx.def_ident_span(def_id); + let generics = tcx.generics_of(def_id).count(); + + let msg = format!("`Self` is of type `{ty}`"); + if let (Ok(i_sp), Some(t_sp)) = (span_of_impl, span_of_ty) { + let mut span: MultiSpan = vec![t_sp].into(); + span.push_span_label( + i_sp, + &format!("`Self` is on type `{type_name}` in this `impl`"), + ); + let mut postfix = ""; + if generics == 0 { + postfix = ", which doesn't have generic parameters"; + } + span.push_span_label( + t_sp, + &format!("`Self` corresponds to this type{postfix}"), + ); + err.span_note(span, &msg); + } else { + err.note(&msg); + } + for segment in path.segments { + if let Some(args) = segment.args && segment.ident.name == kw::SelfUpper { + if generics == 0 { + // FIXME(estebank): we could also verify that the arguments being + // work for the `enum`, instead of just looking if it takes *any*. + err.span_suggestion_verbose( + segment.ident.span.shrink_to_hi().to(args.span_ext), + "the `Self` type doesn't accept type parameters", + "", + Applicability::MachineApplicable, + ); + return; + } else { + err.span_suggestion_verbose( + segment.ident.span, + format!( + "the `Self` type doesn't accept type parameters, use the \ + concrete type's name `{type_name}` instead if you want to \ + specify its type parameters" + ), + type_name, + Applicability::MaybeIncorrect, + ); + } + } + } + }); + // HACK(min_const_generics): Forbid generic `Self` types + // here as we can't easily do that during nameres. + // + // We do this before normalization as we otherwise allow + // ```rust + // trait AlwaysApplicable { type Assoc; } + // impl AlwaysApplicable for T { type Assoc = usize; } + // + // trait BindsParam { + // type ArrayTy; + // } + // impl BindsParam for ::Assoc { + // type ArrayTy = [u8; Self::MAX]; + // } + // ``` + // Note that the normalization happens in the param env of + // the anon const, which is empty. This is why the + // `AlwaysApplicable` impl needs a `T: ?Sized` bound for + // this to compile if we were to normalize here. + if forbid_generic && ty.needs_subst() { + let mut err = tcx.sess.struct_span_err( + path.span, + "generic `Self` types are currently not permitted in anonymous constants", + ); + if let Some(hir::Node::Item(&hir::Item { + kind: hir::ItemKind::Impl(ref impl_), + .. + })) = tcx.hir().get_if_local(def_id) + { + err.span_note(impl_.self_ty.span, "not a concrete type"); + } + err.emit(); + tcx.ty_error() + } else { + self.normalize_ty(span, ty) + } + } + Res::Def(DefKind::AssocTy, def_id) => { + debug_assert!(path.segments.len() >= 2); + self.prohibit_generics(path.segments[..path.segments.len() - 2].iter(), |_| {}); + self.qpath_to_ty( + span, + opt_self_ty, + def_id, + &path.segments[path.segments.len() - 2], + path.segments.last().unwrap(), + ) + } + Res::PrimTy(prim_ty) => { + assert_eq!(opt_self_ty, None); + self.prohibit_generics(path.segments.iter(), |err| { + let name = prim_ty.name_str(); + for segment in path.segments { + if let Some(args) = segment.args { + err.span_suggestion_verbose( + segment.ident.span.shrink_to_hi().to(args.span_ext), + &format!("primitive type `{name}` doesn't have generic parameters"), + "", + Applicability::MaybeIncorrect, + ); + } + } + }); + match prim_ty { + hir::PrimTy::Bool => tcx.types.bool, + hir::PrimTy::Char => tcx.types.char, + hir::PrimTy::Int(it) => tcx.mk_mach_int(ty::int_ty(it)), + hir::PrimTy::Uint(uit) => tcx.mk_mach_uint(ty::uint_ty(uit)), + hir::PrimTy::Float(ft) => tcx.mk_mach_float(ty::float_ty(ft)), + hir::PrimTy::Str => tcx.types.str_, + } + } + Res::Err => { + self.set_tainted_by_errors(); + self.tcx().ty_error() + } + _ => span_bug!(span, "unexpected resolution: {:?}", path.res), + } + } + + /// Parses the programmer's textual representation of a type into our + /// internal notion of a type. + pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> { + self.ast_ty_to_ty_inner(ast_ty, false, false) + } + + /// Parses the programmer's textual representation of a type into our + /// internal notion of a type. This is meant to be used within a path. + pub fn ast_ty_to_ty_in_path(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> { + self.ast_ty_to_ty_inner(ast_ty, false, true) + } + + /// Turns a `hir::Ty` into a `Ty`. For diagnostics' purposes we keep track of whether trait + /// objects are borrowed like `&dyn Trait` to avoid emitting redundant errors. + #[tracing::instrument(level = "debug", skip(self))] + fn ast_ty_to_ty_inner(&self, ast_ty: &hir::Ty<'_>, borrowed: bool, in_path: bool) -> Ty<'tcx> { + let tcx = self.tcx(); + + let result_ty = match ast_ty.kind { + hir::TyKind::Slice(ref ty) => tcx.mk_slice(self.ast_ty_to_ty(ty)), + hir::TyKind::Ptr(ref mt) => { + tcx.mk_ptr(ty::TypeAndMut { ty: self.ast_ty_to_ty(mt.ty), mutbl: mt.mutbl }) + } + hir::TyKind::Rptr(ref region, ref mt) => { + let r = self.ast_region_to_region(region, None); + debug!(?r); + let t = self.ast_ty_to_ty_inner(mt.ty, true, false); + tcx.mk_ref(r, ty::TypeAndMut { ty: t, mutbl: mt.mutbl }) + } + hir::TyKind::Never => tcx.types.never, + hir::TyKind::Tup(fields) => tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(t))), + hir::TyKind::BareFn(bf) => { + require_c_abi_if_c_variadic(tcx, bf.decl, bf.abi, ast_ty.span); + + tcx.mk_fn_ptr(self.ty_of_fn( + ast_ty.hir_id, + bf.unsafety, + bf.abi, + bf.decl, + None, + Some(ast_ty), + )) + } + hir::TyKind::TraitObject(bounds, ref lifetime, _) => { + self.maybe_lint_bare_trait(ast_ty, in_path); + self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime, borrowed) + } + hir::TyKind::Path(hir::QPath::Resolved(ref maybe_qself, ref path)) => { + debug!(?maybe_qself, ?path); + let opt_self_ty = maybe_qself.as_ref().map(|qself| self.ast_ty_to_ty(qself)); + self.res_to_ty(opt_self_ty, path, false) + } + hir::TyKind::OpaqueDef(item_id, lifetimes) => { + let opaque_ty = tcx.hir().item(item_id); + let def_id = item_id.def_id.to_def_id(); + + match opaque_ty.kind { + hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => { + self.impl_trait_ty_to_ty(def_id, lifetimes, origin) + } + ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i), + } + } + hir::TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => { + debug!(?qself, ?segment); + let ty = self.ast_ty_to_ty_inner(qself, false, true); + self.associated_path_to_ty(ast_ty.hir_id, ast_ty.span, ty, qself, segment, false) + .map(|(ty, _, _)| ty) + .unwrap_or_else(|_| tcx.ty_error()) + } + hir::TyKind::Path(hir::QPath::LangItem(lang_item, span, _)) => { + let def_id = tcx.require_lang_item(lang_item, Some(span)); + let (substs, _) = self.create_substs_for_ast_path( + span, + def_id, + &[], + &hir::PathSegment::invalid(), + &GenericArgs::none(), + true, + None, + ); + EarlyBinder(self.normalize_ty(span, tcx.at(span).type_of(def_id))) + .subst(tcx, substs) + } + hir::TyKind::Array(ref ty, ref length) => { + let length = match length { + &hir::ArrayLen::Infer(_, span) => self.ct_infer(tcx.types.usize, None, span), + hir::ArrayLen::Body(constant) => { + let length_def_id = tcx.hir().local_def_id(constant.hir_id); + ty::Const::from_anon_const(tcx, length_def_id) + } + }; + + let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(ty), length)); + self.normalize_ty(ast_ty.span, array_ty) + } + hir::TyKind::Typeof(ref e) => { + let ty = tcx.type_of(tcx.hir().local_def_id(e.hir_id)); + let span = ast_ty.span; + tcx.sess.emit_err(TypeofReservedKeywordUsed { + span, + ty, + opt_sugg: Some((span, Applicability::MachineApplicable)) + .filter(|_| ty.is_suggestable(tcx, false)), + }); + + ty + } + hir::TyKind::Infer => { + // Infer also appears as the type of arguments or return + // values in an ExprKind::Closure, or as + // the type of local variables. Both of these cases are + // handled specially and will not descend into this routine. + self.ty_infer(None, ast_ty.span) + } + hir::TyKind::Err => tcx.ty_error(), + }; + + debug!(?result_ty); + + self.record_ty(ast_ty.hir_id, result_ty, ast_ty.span); + result_ty + } + + fn impl_trait_ty_to_ty( + &self, + def_id: DefId, + lifetimes: &[hir::GenericArg<'_>], + origin: OpaqueTyOrigin, + ) -> Ty<'tcx> { + debug!("impl_trait_ty_to_ty(def_id={:?}, lifetimes={:?})", def_id, lifetimes); + let tcx = self.tcx(); + + let generics = tcx.generics_of(def_id); + + debug!("impl_trait_ty_to_ty: generics={:?}", generics); + let substs = InternalSubsts::for_item(tcx, def_id, |param, _| { + if let Some(i) = (param.index as usize).checked_sub(generics.parent_count) { + // Our own parameters are the resolved lifetimes. + if let GenericParamDefKind::Lifetime = param.kind { + if let hir::GenericArg::Lifetime(lifetime) = &lifetimes[i] { + self.ast_region_to_region(lifetime, None).into() + } else { + bug!() + } + } else { + bug!() + } + } else { + match param.kind { + // For RPIT (return position impl trait), only lifetimes + // mentioned in the impl Trait predicate are captured by + // the opaque type, so the lifetime parameters from the + // parent item need to be replaced with `'static`. + // + // For `impl Trait` in the types of statics, constants, + // locals and type aliases. These capture all parent + // lifetimes, so they can use their identity subst. + GenericParamDefKind::Lifetime + if matches!( + origin, + hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) + ) => + { + tcx.lifetimes.re_static.into() + } + _ => tcx.mk_param_from_def(param), + } + } + }); + debug!("impl_trait_ty_to_ty: substs={:?}", substs); + + let ty = tcx.mk_opaque(def_id, substs); + debug!("impl_trait_ty_to_ty: {}", ty); + ty + } + + pub fn ty_of_arg(&self, ty: &hir::Ty<'_>, expected_ty: Option>) -> Ty<'tcx> { + match ty.kind { + hir::TyKind::Infer if expected_ty.is_some() => { + self.record_ty(ty.hir_id, expected_ty.unwrap(), ty.span); + expected_ty.unwrap() + } + _ => self.ast_ty_to_ty(ty), + } + } + + pub fn ty_of_fn( + &self, + hir_id: hir::HirId, + unsafety: hir::Unsafety, + abi: abi::Abi, + decl: &hir::FnDecl<'_>, + generics: Option<&hir::Generics<'_>>, + hir_ty: Option<&hir::Ty<'_>>, + ) -> ty::PolyFnSig<'tcx> { + debug!("ty_of_fn"); + + let tcx = self.tcx(); + let bound_vars = tcx.late_bound_vars(hir_id); + debug!(?bound_vars); + + // We proactively collect all the inferred type params to emit a single error per fn def. + let mut visitor = HirPlaceholderCollector::default(); + let mut infer_replacements = vec![]; + + if let Some(generics) = generics { + walk_generics(&mut visitor, generics); + } + + let input_tys: Vec<_> = decl + .inputs + .iter() + .enumerate() + .map(|(i, a)| { + if let hir::TyKind::Infer = a.kind && !self.allow_ty_infer() { + if let Some(suggested_ty) = + self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, Some(i)) + { + infer_replacements.push((a.span, suggested_ty.to_string())); + return suggested_ty; + } + } + + // Only visit the type looking for `_` if we didn't fix the type above + visitor.visit_ty(a); + self.ty_of_arg(a, None) + }) + .collect(); + + let output_ty = match decl.output { + hir::FnRetTy::Return(output) => { + if let hir::TyKind::Infer = output.kind + && !self.allow_ty_infer() + && let Some(suggested_ty) = + self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, None) + { + infer_replacements.push((output.span, suggested_ty.to_string())); + suggested_ty + } else { + visitor.visit_ty(output); + self.ast_ty_to_ty(output) + } + } + hir::FnRetTy::DefaultReturn(..) => tcx.mk_unit(), + }; + + debug!("ty_of_fn: output_ty={:?}", output_ty); + + let fn_ty = tcx.mk_fn_sig(input_tys.into_iter(), output_ty, decl.c_variadic, unsafety, abi); + let bare_fn_ty = ty::Binder::bind_with_vars(fn_ty, bound_vars); + + if !self.allow_ty_infer() && !(visitor.0.is_empty() && infer_replacements.is_empty()) { + // We always collect the spans for placeholder types when evaluating `fn`s, but we + // only want to emit an error complaining about them if infer types (`_`) are not + // allowed. `allow_ty_infer` gates this behavior. We check for the presence of + // `ident_span` to not emit an error twice when we have `fn foo(_: fn() -> _)`. + + let mut diag = crate::collect::placeholder_type_error_diag( + tcx, + generics, + visitor.0, + infer_replacements.iter().map(|(s, _)| *s).collect(), + true, + hir_ty, + "function", + ); + + if !infer_replacements.is_empty() { + diag.multipart_suggestion(&format!( + "try replacing `_` with the type{} in the corresponding trait method signature", + rustc_errors::pluralize!(infer_replacements.len()), + ), infer_replacements, Applicability::MachineApplicable); + } + + diag.emit(); + } + + // Find any late-bound regions declared in return type that do + // not appear in the arguments. These are not well-formed. + // + // Example: + // for<'a> fn() -> &'a str <-- 'a is bad + // for<'a> fn(&'a String) -> &'a str <-- 'a is ok + let inputs = bare_fn_ty.inputs(); + let late_bound_in_args = + tcx.collect_constrained_late_bound_regions(&inputs.map_bound(|i| i.to_owned())); + let output = bare_fn_ty.output(); + let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); + + self.validate_late_bound_regions(late_bound_in_args, late_bound_in_ret, |br_name| { + struct_span_err!( + tcx.sess, + decl.output.span(), + E0581, + "return type references {}, which is not constrained by the fn input types", + br_name + ) + }); + + bare_fn_ty + } + + /// Given a fn_hir_id for a impl function, suggest the type that is found on the + /// corresponding function in the trait that the impl implements, if it exists. + /// If arg_idx is Some, then it corresponds to an input type index, otherwise it + /// corresponds to the return type. + fn suggest_trait_fn_ty_for_impl_fn_infer( + &self, + fn_hir_id: hir::HirId, + arg_idx: Option, + ) -> Option> { + let tcx = self.tcx(); + let hir = tcx.hir(); + + let hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), ident, .. }) = + hir.get(fn_hir_id) else { return None }; + let hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(i), .. }) = + hir.get(hir.get_parent_node(fn_hir_id)) else { bug!("ImplItem should have Impl parent") }; + + let trait_ref = + self.instantiate_mono_trait_ref(i.of_trait.as_ref()?, self.ast_ty_to_ty(i.self_ty)); + + let assoc = tcx.associated_items(trait_ref.def_id).find_by_name_and_kind( + tcx, + *ident, + ty::AssocKind::Fn, + trait_ref.def_id, + )?; + + let fn_sig = tcx.bound_fn_sig(assoc.def_id).subst( + tcx, + trait_ref.substs.extend_to(tcx, assoc.def_id, |param, _| tcx.mk_param_from_def(param)), + ); + + let ty = if let Some(arg_idx) = arg_idx { fn_sig.input(arg_idx) } else { fn_sig.output() }; + + Some(tcx.liberate_late_bound_regions(fn_hir_id.expect_owner().to_def_id(), ty)) + } + + fn validate_late_bound_regions( + &self, + constrained_regions: FxHashSet, + referenced_regions: FxHashSet, + generate_err: impl Fn(&str) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>, + ) { + for br in referenced_regions.difference(&constrained_regions) { + let br_name = match *br { + ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) | ty::BrEnv => { + "an anonymous lifetime".to_string() + } + ty::BrNamed(_, name) => format!("lifetime `{}`", name), + }; + + let mut err = generate_err(&br_name); + + if let ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) = *br { + // The only way for an anonymous lifetime to wind up + // in the return type but **also** be unconstrained is + // if it only appears in "associated types" in the + // input. See #47511 and #62200 for examples. In this case, + // though we can easily give a hint that ought to be + // relevant. + err.note( + "lifetimes appearing in an associated type are not considered constrained", + ); + } + + err.emit(); + } + } + + /// Given the bounds on an object, determines what single region bound (if any) we can + /// use to summarize this type. The basic idea is that we will use the bound the user + /// provided, if they provided one, and otherwise search the supertypes of trait bounds + /// for region bounds. It may be that we can derive no bound at all, in which case + /// we return `None`. + fn compute_object_lifetime_bound( + &self, + span: Span, + existential_predicates: &'tcx ty::List>>, + ) -> Option> // if None, use the default + { + let tcx = self.tcx(); + + debug!("compute_opt_region_bound(existential_predicates={:?})", existential_predicates); + + // No explicit region bound specified. Therefore, examine trait + // bounds and see if we can derive region bounds from those. + let derived_region_bounds = object_region_bounds(tcx, existential_predicates); + + // If there are no derived region bounds, then report back that we + // can find no region bound. The caller will use the default. + if derived_region_bounds.is_empty() { + return None; + } + + // If any of the derived region bounds are 'static, that is always + // the best choice. + if derived_region_bounds.iter().any(|r| r.is_static()) { + return Some(tcx.lifetimes.re_static); + } + + // Determine whether there is exactly one unique region in the set + // of derived region bounds. If so, use that. Otherwise, report an + // error. + let r = derived_region_bounds[0]; + if derived_region_bounds[1..].iter().any(|r1| r != *r1) { + tcx.sess.emit_err(AmbiguousLifetimeBound { span }); + } + Some(r) + } + + /// Make sure that we are in the condition to suggest the blanket implementation. + fn maybe_lint_blanket_trait_impl( + &self, + self_ty: &hir::Ty<'_>, + diag: &mut DiagnosticBuilder<'_, T>, + ) { + let tcx = self.tcx(); + let parent_id = tcx.hir().get_parent_item(self_ty.hir_id); + if let hir::Node::Item(hir::Item { + kind: + hir::ItemKind::Impl(hir::Impl { + self_ty: impl_self_ty, of_trait: Some(of_trait_ref), generics, .. + }), + .. + }) = tcx.hir().get_by_def_id(parent_id) && self_ty.hir_id == impl_self_ty.hir_id + { + if !of_trait_ref.trait_def_id().map_or(false, |def_id| def_id.is_local()) { + return; + } + let of_trait_span = of_trait_ref.path.span; + // make sure that we are not calling unwrap to abort during the compilation + let Ok(impl_trait_name) = tcx.sess.source_map().span_to_snippet(self_ty.span) else { return; }; + let Ok(of_trait_name) = tcx.sess.source_map().span_to_snippet(of_trait_span) else { return; }; + // check if the trait has generics, to make a correct suggestion + let param_name = generics.params.next_type_param_name(None); + + let add_generic_sugg = if let Some(span) = generics.span_for_param_suggestion() { + (span, format!(", {}: {}", param_name, impl_trait_name)) + } else { + (generics.span, format!("<{}: {}>", param_name, impl_trait_name)) + }; + diag.multipart_suggestion( + format!("alternatively use a blanket \ + implementation to implement `{of_trait_name}` for \ + all types that also implement `{impl_trait_name}`"), + vec![ + (self_ty.span, param_name), + add_generic_sugg, + ], + Applicability::MaybeIncorrect, + ); + } + } + + fn maybe_lint_bare_trait(&self, self_ty: &hir::Ty<'_>, in_path: bool) { + let tcx = self.tcx(); + if let hir::TyKind::TraitObject([poly_trait_ref, ..], _, TraitObjectSyntax::None) = + self_ty.kind + { + let needs_bracket = in_path + && !tcx + .sess + .source_map() + .span_to_prev_source(self_ty.span) + .ok() + .map_or(false, |s| s.trim_end().ends_with('<')); + + let is_global = poly_trait_ref.trait_ref.path.is_global(); + let sugg = Vec::from_iter([ + ( + self_ty.span.shrink_to_lo(), + format!( + "{}dyn {}", + if needs_bracket { "<" } else { "" }, + if is_global { "(" } else { "" }, + ), + ), + ( + self_ty.span.shrink_to_hi(), + format!( + "{}{}", + if is_global { ")" } else { "" }, + if needs_bracket { ">" } else { "" }, + ), + ), + ]); + if self_ty.span.edition() >= Edition::Edition2021 { + let msg = "trait objects must include the `dyn` keyword"; + let label = "add `dyn` keyword before this trait"; + let mut diag = + rustc_errors::struct_span_err!(tcx.sess, self_ty.span, E0782, "{}", msg); + diag.multipart_suggestion_verbose(label, sugg, Applicability::MachineApplicable); + // check if the impl trait that we are considering is a impl of a local trait + self.maybe_lint_blanket_trait_impl(&self_ty, &mut diag); + diag.emit(); + } else { + let msg = "trait objects without an explicit `dyn` are deprecated"; + tcx.struct_span_lint_hir( + BARE_TRAIT_OBJECTS, + self_ty.hir_id, + self_ty.span, + |lint| { + let mut diag = lint.build(msg); + diag.multipart_suggestion_verbose( + "use `dyn`", + sugg, + Applicability::MachineApplicable, + ); + self.maybe_lint_blanket_trait_impl::<()>(&self_ty, &mut diag); + diag.emit(); + }, + ); + } + } + } +} diff --git a/compiler/rustc_typeck/src/bounds.rs b/compiler/rustc_typeck/src/bounds.rs new file mode 100644 index 000000000..6a28bb16a --- /dev/null +++ b/compiler/rustc_typeck/src/bounds.rs @@ -0,0 +1,90 @@ +//! Bounds are restrictions applied to some types after they've been converted into the +//! `ty` form from the HIR. + +use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt}; +use rustc_span::Span; + +/// Collects together a list of type bounds. These lists of bounds occur in many places +/// in Rust's syntax: +/// +/// ```text +/// trait Foo: Bar + Baz { } +/// ^^^^^^^^^ supertrait list bounding the `Self` type parameter +/// +/// fn foo() { } +/// ^^^^^^^^^ bounding the type parameter `T` +/// +/// impl dyn Bar + Baz +/// ^^^^^^^^^ bounding the forgotten dynamic type +/// ``` +/// +/// Our representation is a bit mixed here -- in some cases, we +/// include the self type (e.g., `trait_bounds`) but in others we do not +#[derive(Default, PartialEq, Eq, Clone, Debug)] +pub struct Bounds<'tcx> { + /// A list of region bounds on the (implicit) self type. So if you + /// had `T: 'a + 'b` this might would be a list `['a, 'b]` (but + /// the `T` is not explicitly included). + pub region_bounds: Vec<(ty::Binder<'tcx, ty::Region<'tcx>>, Span)>, + + /// A list of trait bounds. So if you had `T: Debug` this would be + /// `T: Debug`. Note that the self-type is explicit here. + pub trait_bounds: Vec<(ty::PolyTraitRef<'tcx>, Span, ty::BoundConstness)>, + + /// A list of projection equality bounds. So if you had `T: + /// Iterator` this would include `::Item => u32`. Note that the self-type is explicit + /// here. + pub projection_bounds: Vec<(ty::PolyProjectionPredicate<'tcx>, Span)>, + + /// `Some` if there is *no* `?Sized` predicate. The `span` + /// is the location in the source of the `T` declaration which can + /// be cited as the source of the `T: Sized` requirement. + pub implicitly_sized: Option, +} + +impl<'tcx> Bounds<'tcx> { + /// Converts a bounds list into a flat set of predicates (like + /// where-clauses). Because some of our bounds listings (e.g., + /// regions) don't include the self-type, you must supply the + /// self-type here (the `param_ty` parameter). + pub fn predicates<'out, 's>( + &'s self, + tcx: TyCtxt<'tcx>, + param_ty: Ty<'tcx>, + // the output must live shorter than the duration of the borrow of self and 'tcx. + ) -> impl Iterator, Span)> + 'out + where + 'tcx: 'out, + 's: 'out, + { + // If it could be sized, and is, add the `Sized` predicate. + let sized_predicate = self.implicitly_sized.and_then(|span| { + tcx.lang_items().sized_trait().map(move |sized| { + let trait_ref = ty::Binder::dummy(ty::TraitRef { + def_id: sized, + substs: tcx.mk_substs_trait(param_ty, &[]), + }); + (trait_ref.without_const().to_predicate(tcx), span) + }) + }); + + let region_preds = self.region_bounds.iter().map(move |&(region_bound, span)| { + let pred = region_bound + .map_bound(|region_bound| ty::OutlivesPredicate(param_ty, region_bound)) + .to_predicate(tcx); + (pred, span) + }); + let trait_bounds = + self.trait_bounds.iter().map(move |&(bound_trait_ref, span, constness)| { + let predicate = bound_trait_ref.with_constness(constness).to_predicate(tcx); + (predicate, span) + }); + let projection_bounds = self + .projection_bounds + .iter() + .map(move |&(projection, span)| (projection.to_predicate(tcx), span)); + + sized_predicate.into_iter().chain(region_preds).chain(trait_bounds).chain(projection_bounds) + } +} diff --git a/compiler/rustc_typeck/src/check/_match.rs b/compiler/rustc_typeck/src/check/_match.rs new file mode 100644 index 000000000..1b13c98e4 --- /dev/null +++ b/compiler/rustc_typeck/src/check/_match.rs @@ -0,0 +1,529 @@ +use crate::check::coercion::{AsCoercionSite, CoerceMany}; +use crate::check::{Diverges, Expectation, FnCtxt, Needs}; +use rustc_errors::{Applicability, MultiSpan}; +use rustc_hir::{self as hir, ExprKind}; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::traits::Obligation; +use rustc_middle::ty::{self, ToPredicate, Ty, TypeVisitable}; +use rustc_span::Span; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; +use rustc_trait_selection::traits::{ + IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode, +}; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + #[instrument(skip(self), level = "debug")] + pub fn check_match( + &self, + expr: &'tcx hir::Expr<'tcx>, + scrut: &'tcx hir::Expr<'tcx>, + arms: &'tcx [hir::Arm<'tcx>], + orig_expected: Expectation<'tcx>, + match_src: hir::MatchSource, + ) -> Ty<'tcx> { + let tcx = self.tcx; + + let acrb = arms_contain_ref_bindings(arms); + let scrutinee_ty = self.demand_scrutinee_type(scrut, acrb, arms.is_empty()); + debug!(?scrutinee_ty); + + // If there are no arms, that is a diverging match; a special case. + if arms.is_empty() { + self.diverges.set(self.diverges.get() | Diverges::always(expr.span)); + return tcx.types.never; + } + + self.warn_arms_when_scrutinee_diverges(arms); + + // Otherwise, we have to union together the types that the arms produce and so forth. + let scrut_diverges = self.diverges.replace(Diverges::Maybe); + + // #55810: Type check patterns first so we get types for all bindings. + let scrut_span = scrut.span.find_ancestor_inside(expr.span).unwrap_or(scrut.span); + for arm in arms { + self.check_pat_top(&arm.pat, scrutinee_ty, Some(scrut_span), true); + } + + // Now typecheck the blocks. + // + // The result of the match is the common supertype of all the + // arms. Start out the value as bottom, since it's the, well, + // bottom the type lattice, and we'll be moving up the lattice as + // we process each arm. (Note that any match with 0 arms is matching + // on any empty type and is therefore unreachable; should the flow + // of execution reach it, we will panic, so bottom is an appropriate + // type in that case) + let mut all_arms_diverge = Diverges::WarnedAlways; + + let expected = orig_expected.adjust_for_branches(self); + debug!(?expected); + + let mut coercion = { + let coerce_first = match expected { + // We don't coerce to `()` so that if the match expression is a + // statement it's branches can have any consistent type. That allows + // us to give better error messages (pointing to a usually better + // arm for inconsistent arms or to the whole match when a `()` type + // is required). + Expectation::ExpectHasType(ety) if ety != self.tcx.mk_unit() => ety, + _ => self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: expr.span, + }), + }; + CoerceMany::with_coercion_sites(coerce_first, arms) + }; + + let mut other_arms = vec![]; // Used only for diagnostics. + let mut prior_arm = None; + for arm in arms { + if let Some(g) = &arm.guard { + self.diverges.set(Diverges::Maybe); + match g { + hir::Guard::If(e) => { + self.check_expr_has_type_or_error(e, tcx.types.bool, |_| {}); + } + hir::Guard::IfLet(l) => { + self.check_expr_let(l); + } + }; + } + + self.diverges.set(Diverges::Maybe); + + let arm_ty = self.check_expr_with_expectation(&arm.body, expected); + all_arms_diverge &= self.diverges.get(); + + let opt_suggest_box_span = self.opt_suggest_box_span(arm_ty, orig_expected); + + let (arm_block_id, arm_span) = if let hir::ExprKind::Block(blk, _) = arm.body.kind { + (Some(blk.hir_id), self.find_block_span(blk)) + } else { + (None, arm.body.span) + }; + + let (span, code) = match prior_arm { + // The reason for the first arm to fail is not that the match arms diverge, + // but rather that there's a prior obligation that doesn't hold. + None => (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)), + Some((prior_arm_block_id, prior_arm_ty, prior_arm_span)) => ( + expr.span, + ObligationCauseCode::MatchExpressionArm(Box::new(MatchExpressionArmCause { + arm_block_id, + arm_span, + arm_ty, + prior_arm_block_id, + prior_arm_ty, + prior_arm_span, + scrut_span: scrut.span, + source: match_src, + prior_arms: other_arms.clone(), + scrut_hir_id: scrut.hir_id, + opt_suggest_box_span, + })), + ), + }; + let cause = self.cause(span, code); + + // This is the moral equivalent of `coercion.coerce(self, cause, arm.body, arm_ty)`. + // We use it this way to be able to expand on the potential error and detect when a + // `match` tail statement could be a tail expression instead. If so, we suggest + // removing the stray semicolon. + coercion.coerce_inner( + self, + &cause, + Some(&arm.body), + arm_ty, + Some(&mut |err| { + let Some(ret) = self.ret_type_span else { + return; + }; + let Expectation::IsLast(stmt) = orig_expected else { + return + }; + let can_coerce_to_return_ty = match self.ret_coercion.as_ref() { + Some(ret_coercion) if self.in_tail_expr => { + let ret_ty = ret_coercion.borrow().expected_ty(); + let ret_ty = self.inh.infcx.shallow_resolve(ret_ty); + self.can_coerce(arm_ty, ret_ty) + && prior_arm.map_or(true, |(_, t, _)| self.can_coerce(t, ret_ty)) + // The match arms need to unify for the case of `impl Trait`. + && !matches!(ret_ty.kind(), ty::Opaque(..)) + } + _ => false, + }; + if !can_coerce_to_return_ty { + return; + } + + let semi_span = expr.span.shrink_to_hi().with_hi(stmt.hi()); + let mut ret_span: MultiSpan = semi_span.into(); + ret_span.push_span_label( + expr.span, + "this could be implicitly returned but it is a statement, not a \ + tail expression", + ); + ret_span + .push_span_label(ret, "the `match` arms can conform to this return type"); + ret_span.push_span_label( + semi_span, + "the `match` is a statement because of this semicolon, consider \ + removing it", + ); + err.span_note( + ret_span, + "you might have meant to return the `match` expression", + ); + err.tool_only_span_suggestion( + semi_span, + "remove this semicolon", + "", + Applicability::MaybeIncorrect, + ); + }), + false, + ); + + other_arms.push(arm_span); + if other_arms.len() > 5 { + other_arms.remove(0); + } + + prior_arm = Some((arm_block_id, arm_ty, arm_span)); + } + + // If all of the arms in the `match` diverge, + // and we're dealing with an actual `match` block + // (as opposed to a `match` desugared from something else'), + // we can emit a better note. Rather than pointing + // at a diverging expression in an arbitrary arm, + // we can point at the entire `match` expression + if let (Diverges::Always { .. }, hir::MatchSource::Normal) = (all_arms_diverge, match_src) { + all_arms_diverge = Diverges::Always { + span: expr.span, + custom_note: Some( + "any code following this `match` expression is unreachable, as all arms diverge", + ), + }; + } + + // We won't diverge unless the scrutinee or all arms diverge. + self.diverges.set(scrut_diverges | all_arms_diverge); + + let match_ty = coercion.complete(self); + debug!(?match_ty); + match_ty + } + + /// When the previously checked expression (the scrutinee) diverges, + /// warn the user about the match arms being unreachable. + fn warn_arms_when_scrutinee_diverges(&self, arms: &'tcx [hir::Arm<'tcx>]) { + for arm in arms { + self.warn_if_unreachable(arm.body.hir_id, arm.body.span, "arm"); + } + } + + /// Handle the fallback arm of a desugared if(-let) like a missing else. + /// + /// Returns `true` if there was an error forcing the coercion to the `()` type. + pub(super) fn if_fallback_coercion( + &self, + span: Span, + then_expr: &'tcx hir::Expr<'tcx>, + coercion: &mut CoerceMany<'tcx, '_, T>, + ) -> bool + where + T: AsCoercionSite, + { + // If this `if` expr is the parent's function return expr, + // the cause of the type coercion is the return type, point at it. (#25228) + let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, span); + let cause = self.cause(span, ObligationCauseCode::IfExpressionWithNoElse); + let mut error = false; + coercion.coerce_forced_unit( + self, + &cause, + &mut |err| { + if let Some((span, msg)) = &ret_reason { + err.span_label(*span, msg); + } else if let ExprKind::Block(block, _) = &then_expr.kind + && let Some(expr) = &block.expr + { + err.span_label(expr.span, "found here"); + } + err.note("`if` expressions without `else` evaluate to `()`"); + err.help("consider adding an `else` block that evaluates to the expected type"); + error = true; + }, + ret_reason.is_none(), + ); + error + } + + fn maybe_get_coercion_reason(&self, hir_id: hir::HirId, sp: Span) -> Option<(Span, String)> { + let node = { + let rslt = self.tcx.hir().get_parent_node(self.tcx.hir().get_parent_node(hir_id)); + self.tcx.hir().get(rslt) + }; + if let hir::Node::Block(block) = node { + // check that the body's parent is an fn + let parent = self + .tcx + .hir() + .get(self.tcx.hir().get_parent_node(self.tcx.hir().get_parent_node(block.hir_id))); + if let (Some(expr), hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })) = + (&block.expr, parent) + { + // check that the `if` expr without `else` is the fn body's expr + if expr.span == sp { + return self.get_fn_decl(hir_id).and_then(|(fn_decl, _)| { + let span = fn_decl.output.span(); + let snippet = self.tcx.sess.source_map().span_to_snippet(span).ok()?; + Some((span, format!("expected `{snippet}` because of this return type"))) + }); + } + } + } + if let hir::Node::Local(hir::Local { ty: Some(_), pat, .. }) = node { + return Some((pat.span, "expected because of this assignment".to_string())); + } + None + } + + pub(crate) fn if_cause( + &self, + span: Span, + cond_span: Span, + then_expr: &'tcx hir::Expr<'tcx>, + else_expr: &'tcx hir::Expr<'tcx>, + then_ty: Ty<'tcx>, + else_ty: Ty<'tcx>, + opt_suggest_box_span: Option, + ) -> ObligationCause<'tcx> { + let mut outer_span = if self.tcx.sess.source_map().is_multiline(span) { + // The `if`/`else` isn't in one line in the output, include some context to make it + // clear it is an if/else expression: + // ``` + // LL | let x = if true { + // | _____________- + // LL || 10i32 + // || ----- expected because of this + // LL || } else { + // LL || 10u32 + // || ^^^^^ expected `i32`, found `u32` + // LL || }; + // ||_____- `if` and `else` have incompatible types + // ``` + Some(span) + } else { + // The entire expression is in one line, only point at the arms + // ``` + // LL | let x = if true { 10i32 } else { 10u32 }; + // | ----- ^^^^^ expected `i32`, found `u32` + // | | + // | expected because of this + // ``` + None + }; + + let (error_sp, else_id) = if let ExprKind::Block(block, _) = &else_expr.kind { + let block = block.innermost_block(); + + // Avoid overlapping spans that aren't as readable: + // ``` + // 2 | let x = if true { + // | _____________- + // 3 | | 3 + // | | - expected because of this + // 4 | | } else { + // | |____________^ + // 5 | || + // 6 | || }; + // | || ^ + // | ||_____| + // | |______if and else have incompatible types + // | expected integer, found `()` + // ``` + // by not pointing at the entire expression: + // ``` + // 2 | let x = if true { + // | ------- `if` and `else` have incompatible types + // 3 | 3 + // | - expected because of this + // 4 | } else { + // | ____________^ + // 5 | | + // 6 | | }; + // | |_____^ expected integer, found `()` + // ``` + if block.expr.is_none() && block.stmts.is_empty() + && let Some(outer_span) = &mut outer_span + && let Some(cond_span) = cond_span.find_ancestor_inside(*outer_span) + { + *outer_span = outer_span.with_hi(cond_span.hi()) + } + + (self.find_block_span(block), block.hir_id) + } else { + (else_expr.span, else_expr.hir_id) + }; + + let then_id = if let ExprKind::Block(block, _) = &then_expr.kind { + let block = block.innermost_block(); + // Exclude overlapping spans + if block.expr.is_none() && block.stmts.is_empty() { + outer_span = None; + } + block.hir_id + } else { + then_expr.hir_id + }; + + // Finally construct the cause: + self.cause( + error_sp, + ObligationCauseCode::IfExpression(Box::new(IfExpressionCause { + else_id, + then_id, + then_ty, + else_ty, + outer_span, + opt_suggest_box_span, + })), + ) + } + + pub(super) fn demand_scrutinee_type( + &self, + scrut: &'tcx hir::Expr<'tcx>, + contains_ref_bindings: Option, + no_arms: bool, + ) -> Ty<'tcx> { + // Not entirely obvious: if matches may create ref bindings, we want to + // use the *precise* type of the scrutinee, *not* some supertype, as + // the "scrutinee type" (issue #23116). + // + // arielb1 [writes here in this comment thread][c] that there + // is certainly *some* potential danger, e.g., for an example + // like: + // + // [c]: https://github.com/rust-lang/rust/pull/43399#discussion_r130223956 + // + // ``` + // let Foo(x) = f()[0]; + // ``` + // + // Then if the pattern matches by reference, we want to match + // `f()[0]` as a lexpr, so we can't allow it to be + // coerced. But if the pattern matches by value, `f()[0]` is + // still syntactically a lexpr, but we *do* want to allow + // coercions. + // + // However, *likely* we are ok with allowing coercions to + // happen if there are no explicit ref mut patterns - all + // implicit ref mut patterns must occur behind a reference, so + // they will have the "correct" variance and lifetime. + // + // This does mean that the following pattern would be legal: + // + // ``` + // struct Foo(Bar); + // struct Bar(u32); + // impl Deref for Foo { + // type Target = Bar; + // fn deref(&self) -> &Bar { &self.0 } + // } + // impl DerefMut for Foo { + // fn deref_mut(&mut self) -> &mut Bar { &mut self.0 } + // } + // fn foo(x: &mut Foo) { + // { + // let Bar(z): &mut Bar = x; + // *z = 42; + // } + // assert_eq!(foo.0.0, 42); + // } + // ``` + // + // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which + // is problematic as the HIR is being scraped, but ref bindings may be + // implicit after #42640. We need to make sure that pat_adjustments + // (once introduced) is populated by the time we get here. + // + // See #44848. + if let Some(m) = contains_ref_bindings { + self.check_expr_with_needs(scrut, Needs::maybe_mut_place(m)) + } else if no_arms { + self.check_expr(scrut) + } else { + // ...but otherwise we want to use any supertype of the + // scrutinee. This is sort of a workaround, see note (*) in + // `check_pat` for some details. + let scrut_ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span: scrut.span, + }); + self.check_expr_has_type_or_error(scrut, scrut_ty, |_| {}); + scrut_ty + } + } + + // When we have a `match` as a tail expression in a `fn` with a returned `impl Trait` + // we check if the different arms would work with boxed trait objects instead and + // provide a structured suggestion in that case. + pub(crate) fn opt_suggest_box_span( + &self, + outer_ty: Ty<'tcx>, + orig_expected: Expectation<'tcx>, + ) -> Option { + match orig_expected { + Expectation::ExpectHasType(expected) + if self.in_tail_expr + && self.ret_coercion.as_ref()?.borrow().merged_ty().has_opaque_types() + && self.can_coerce(outer_ty, expected) => + { + let obligations = self.fulfillment_cx.borrow().pending_obligations(); + let mut suggest_box = !obligations.is_empty(); + for o in obligations { + match o.predicate.kind().skip_binder() { + ty::PredicateKind::Trait(t) => { + let pred = + ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate { + trait_ref: ty::TraitRef { + def_id: t.def_id(), + substs: self.tcx.mk_substs_trait(outer_ty, &[]), + }, + constness: t.constness, + polarity: t.polarity, + })); + let obl = Obligation::new( + o.cause.clone(), + self.param_env, + pred.to_predicate(self.tcx), + ); + suggest_box &= self.predicate_must_hold_modulo_regions(&obl); + if !suggest_box { + // We've encountered some obligation that didn't hold, so the + // return expression can't just be boxed. We don't need to + // evaluate the rest of the obligations. + break; + } + } + _ => {} + } + } + // If all the obligations hold (or there are no obligations) the tail expression + // we can suggest to return a boxed trait object instead of an opaque type. + if suggest_box { self.ret_type_span } else { None } + } + _ => None, + } + } +} + +fn arms_contain_ref_bindings<'tcx>(arms: &'tcx [hir::Arm<'tcx>]) -> Option { + arms.iter().filter_map(|a| a.pat.contains_explicit_ref_binding()).max_by_key(|m| match *m { + hir::Mutability::Mut => 1, + hir::Mutability::Not => 0, + }) +} diff --git a/compiler/rustc_typeck/src/check/autoderef.rs b/compiler/rustc_typeck/src/check/autoderef.rs new file mode 100644 index 000000000..59c366ad7 --- /dev/null +++ b/compiler/rustc_typeck/src/check/autoderef.rs @@ -0,0 +1,78 @@ +//! Some helper functions for `AutoDeref` +use super::method::MethodCallee; +use super::{FnCtxt, PlaceOp}; + +use rustc_infer::infer::InferOk; +use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref}; +use rustc_middle::ty::{self, Ty}; +use rustc_span::Span; +use rustc_trait_selection::autoderef::{Autoderef, AutoderefKind}; + +use std::iter; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn autoderef(&'a self, span: Span, base_ty: Ty<'tcx>) -> Autoderef<'a, 'tcx> { + Autoderef::new(self, self.param_env, self.body_id, span, base_ty, span) + } + + /// Like `autoderef`, but provides a custom `Span` to use for calls to + /// an overloaded `Deref` operator + pub fn autoderef_overloaded_span( + &'a self, + span: Span, + base_ty: Ty<'tcx>, + overloaded_span: Span, + ) -> Autoderef<'a, 'tcx> { + Autoderef::new(self, self.param_env, self.body_id, span, base_ty, overloaded_span) + } + + pub fn try_overloaded_deref( + &self, + span: Span, + base_ty: Ty<'tcx>, + ) -> Option>> { + self.try_overloaded_place_op(span, base_ty, &[], PlaceOp::Deref) + } + + /// Returns the adjustment steps. + pub fn adjust_steps(&self, autoderef: &Autoderef<'a, 'tcx>) -> Vec> { + self.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(autoderef)) + } + + pub fn adjust_steps_as_infer_ok( + &self, + autoderef: &Autoderef<'a, 'tcx>, + ) -> InferOk<'tcx, Vec>> { + let mut obligations = vec![]; + let steps = autoderef.steps(); + let targets = + steps.iter().skip(1).map(|&(ty, _)| ty).chain(iter::once(autoderef.final_ty(false))); + let steps: Vec<_> = steps + .iter() + .map(|&(source, kind)| { + if let AutoderefKind::Overloaded = kind { + self.try_overloaded_deref(autoderef.span(), source).and_then( + |InferOk { value: method, obligations: o }| { + obligations.extend(o); + if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() { + Some(OverloadedDeref { + region, + mutbl, + span: autoderef.overloaded_span(), + }) + } else { + None + } + }, + ) + } else { + None + } + }) + .zip(targets) + .map(|(autoderef, target)| Adjustment { kind: Adjust::Deref(autoderef), target }) + .collect(); + + InferOk { obligations, value: steps } + } +} diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs new file mode 100644 index 000000000..75f5aced8 --- /dev/null +++ b/compiler/rustc_typeck/src/check/callee.rs @@ -0,0 +1,675 @@ +use super::method::MethodCallee; +use super::{Expectation, FnCtxt, TupleArgumentsFlag}; +use crate::type_error_struct; + +use rustc_errors::{struct_span_err, Applicability, Diagnostic}; +use rustc_hir as hir; +use rustc_hir::def::{self, Namespace, Res}; +use rustc_hir::def_id::DefId; +use rustc_infer::{ + infer, + traits::{self, Obligation}, +}; +use rustc_infer::{ + infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}, + traits::ObligationCause, +}; +use rustc_middle::ty::adjustment::{ + Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, +}; +use rustc_middle::ty::subst::{Subst, SubstsRef}; +use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable}; +use rustc_span::def_id::LocalDefId; +use rustc_span::symbol::{sym, Ident}; +use rustc_span::Span; +use rustc_target::spec::abi; +use rustc_trait_selection::autoderef::Autoderef; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; + +use std::iter; + +/// Checks that it is legal to call methods of the trait corresponding +/// to `trait_id` (this only cares about the trait, not the specific +/// method that is called). +pub fn check_legal_trait_for_method_call( + tcx: TyCtxt<'_>, + span: Span, + receiver: Option, + expr_span: Span, + trait_id: DefId, +) { + if tcx.lang_items().drop_trait() == Some(trait_id) { + let mut err = struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method"); + err.span_label(span, "explicit destructor calls not allowed"); + + let (sp, suggestion) = receiver + .and_then(|s| tcx.sess.source_map().span_to_snippet(s).ok()) + .filter(|snippet| !snippet.is_empty()) + .map(|snippet| (expr_span, format!("drop({snippet})"))) + .unwrap_or_else(|| (span, "drop".to_string())); + + err.span_suggestion( + sp, + "consider using `drop` function", + suggestion, + Applicability::MaybeIncorrect, + ); + + err.emit(); + } +} + +enum CallStep<'tcx> { + Builtin(Ty<'tcx>), + DeferredClosure(LocalDefId, ty::FnSig<'tcx>), + /// E.g., enum variant constructors. + Overloaded(MethodCallee<'tcx>), +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn check_call( + &self, + call_expr: &'tcx hir::Expr<'tcx>, + callee_expr: &'tcx hir::Expr<'tcx>, + arg_exprs: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let original_callee_ty = match &callee_expr.kind { + hir::ExprKind::Path(hir::QPath::Resolved(..) | hir::QPath::TypeRelative(..)) => self + .check_expr_with_expectation_and_args( + callee_expr, + Expectation::NoExpectation, + arg_exprs, + ), + _ => self.check_expr(callee_expr), + }; + + let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty); + + let mut autoderef = self.autoderef(callee_expr.span, expr_ty); + let mut result = None; + while result.is_none() && autoderef.next().is_some() { + result = self.try_overloaded_call_step(call_expr, callee_expr, arg_exprs, &autoderef); + } + self.register_predicates(autoderef.into_obligations()); + + let output = match result { + None => { + // this will report an error since original_callee_ty is not a fn + self.confirm_builtin_call( + call_expr, + callee_expr, + original_callee_ty, + arg_exprs, + expected, + ) + } + + Some(CallStep::Builtin(callee_ty)) => { + self.confirm_builtin_call(call_expr, callee_expr, callee_ty, arg_exprs, expected) + } + + Some(CallStep::DeferredClosure(def_id, fn_sig)) => { + self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, def_id, fn_sig) + } + + Some(CallStep::Overloaded(method_callee)) => { + self.confirm_overloaded_call(call_expr, arg_exprs, expected, method_callee) + } + }; + + // we must check that return type of called functions is WF: + self.register_wf_obligation(output.into(), call_expr.span, traits::WellFormed(None)); + + output + } + + fn try_overloaded_call_step( + &self, + call_expr: &'tcx hir::Expr<'tcx>, + callee_expr: &'tcx hir::Expr<'tcx>, + arg_exprs: &'tcx [hir::Expr<'tcx>], + autoderef: &Autoderef<'a, 'tcx>, + ) -> Option> { + let adjusted_ty = + self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false)); + debug!( + "try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})", + call_expr, adjusted_ty + ); + + // If the callee is a bare function or a closure, then we're all set. + match *adjusted_ty.kind() { + ty::FnDef(..) | ty::FnPtr(_) => { + let adjustments = self.adjust_steps(autoderef); + self.apply_adjustments(callee_expr, adjustments); + return Some(CallStep::Builtin(adjusted_ty)); + } + + ty::Closure(def_id, substs) => { + let def_id = def_id.expect_local(); + + // Check whether this is a call to a closure where we + // haven't yet decided on whether the closure is fn vs + // fnmut vs fnonce. If so, we have to defer further processing. + if self.closure_kind(substs).is_none() { + let closure_sig = substs.as_closure().sig(); + let closure_sig = self.replace_bound_vars_with_fresh_vars( + call_expr.span, + infer::FnCall, + closure_sig, + ); + let adjustments = self.adjust_steps(autoderef); + self.record_deferred_call_resolution( + def_id, + DeferredCallResolution { + call_expr, + callee_expr, + adjusted_ty, + adjustments, + fn_sig: closure_sig, + closure_substs: substs, + }, + ); + return Some(CallStep::DeferredClosure(def_id, closure_sig)); + } + } + + // Hack: we know that there are traits implementing Fn for &F + // where F:Fn and so forth. In the particular case of types + // like `x: &mut FnMut()`, if there is a call `x()`, we would + // normally translate to `FnMut::call_mut(&mut x, ())`, but + // that winds up requiring `mut x: &mut FnMut()`. A little + // over the top. The simplest fix by far is to just ignore + // this case and deref again, so we wind up with + // `FnMut::call_mut(&mut *x, ())`. + ty::Ref(..) if autoderef.step_count() == 0 => { + return None; + } + + _ => {} + } + + // Now, we look for the implementation of a Fn trait on the object's type. + // We first do it with the explicit instruction to look for an impl of + // `Fn`, with the tuple `Tuple` having an arity corresponding + // to the number of call parameters. + // If that fails (or_else branch), we try again without specifying the + // shape of the tuple (hence the None). This allows to detect an Fn trait + // is implemented, and use this information for diagnostic. + self.try_overloaded_call_traits(call_expr, adjusted_ty, Some(arg_exprs)) + .or_else(|| self.try_overloaded_call_traits(call_expr, adjusted_ty, None)) + .map(|(autoref, method)| { + let mut adjustments = self.adjust_steps(autoderef); + adjustments.extend(autoref); + self.apply_adjustments(callee_expr, adjustments); + CallStep::Overloaded(method) + }) + } + + fn try_overloaded_call_traits( + &self, + call_expr: &hir::Expr<'_>, + adjusted_ty: Ty<'tcx>, + opt_arg_exprs: Option<&'tcx [hir::Expr<'tcx>]>, + ) -> Option<(Option>, MethodCallee<'tcx>)> { + // Try the options that are least restrictive on the caller first. + for (opt_trait_def_id, method_name, borrow) in [ + (self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true), + (self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true), + (self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false), + ] { + let Some(trait_def_id) = opt_trait_def_id else { continue }; + + let opt_input_types = opt_arg_exprs.map(|arg_exprs| { + [self.tcx.mk_tup(arg_exprs.iter().map(|e| { + self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span: e.span, + }) + }))] + }); + let opt_input_types = opt_input_types.as_ref().map(AsRef::as_ref); + + if let Some(ok) = self.lookup_method_in_trait( + call_expr.span, + method_name, + trait_def_id, + adjusted_ty, + opt_input_types, + ) { + let method = self.register_infer_ok_obligations(ok); + let mut autoref = None; + if borrow { + // Check for &self vs &mut self in the method signature. Since this is either + // the Fn or FnMut trait, it should be one of those. + let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() else { + // The `fn`/`fn_mut` lang item is ill-formed, which should have + // caused an error elsewhere. + self.tcx + .sess + .delay_span_bug(call_expr.span, "input to call/call_mut is not a ref?"); + return None; + }; + + let mutbl = match mutbl { + hir::Mutability::Not => AutoBorrowMutability::Not, + hir::Mutability::Mut => AutoBorrowMutability::Mut { + // For initial two-phase borrow + // deployment, conservatively omit + // overloaded function call ops. + allow_two_phase_borrow: AllowTwoPhase::No, + }, + }; + autoref = Some(Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)), + target: method.sig.inputs()[0], + }); + } + return Some((autoref, method)); + } + } + + None + } + + /// Give appropriate suggestion when encountering `||{/* not callable */}()`, where the + /// likely intention is to call the closure, suggest `(||{})()`. (#55851) + fn identify_bad_closure_def_and_call( + &self, + err: &mut Diagnostic, + hir_id: hir::HirId, + callee_node: &hir::ExprKind<'_>, + callee_span: Span, + ) { + let hir = self.tcx.hir(); + let parent_hir_id = hir.get_parent_node(hir_id); + let parent_node = hir.get(parent_hir_id); + if let ( + hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, body, .. }), + .. + }), + hir::ExprKind::Block(..), + ) = (parent_node, callee_node) + { + let fn_decl_span = if hir.body(body).generator_kind + == Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure)) + { + // Actually need to unwrap a few more layers of HIR to get to + // the _real_ closure... + let async_closure = hir.get_parent_node(hir.get_parent_node(parent_hir_id)); + if let hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }), + .. + }) = hir.get(async_closure) + { + fn_decl_span + } else { + return; + } + } else { + fn_decl_span + }; + + let start = fn_decl_span.shrink_to_lo(); + let end = callee_span.shrink_to_hi(); + err.multipart_suggestion( + "if you meant to create this closure and immediately call it, surround the \ + closure with parentheses", + vec![(start, "(".to_string()), (end, ")".to_string())], + Applicability::MaybeIncorrect, + ); + } + } + + /// Give appropriate suggestion when encountering `[("a", 0) ("b", 1)]`, where the + /// likely intention is to create an array containing tuples. + fn maybe_suggest_bad_array_definition( + &self, + err: &mut Diagnostic, + call_expr: &'tcx hir::Expr<'tcx>, + callee_expr: &'tcx hir::Expr<'tcx>, + ) -> bool { + let hir_id = self.tcx.hir().get_parent_node(call_expr.hir_id); + let parent_node = self.tcx.hir().get(hir_id); + if let ( + hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Array(_), .. }), + hir::ExprKind::Tup(exp), + hir::ExprKind::Call(_, args), + ) = (parent_node, &callee_expr.kind, &call_expr.kind) + && args.len() == exp.len() + { + let start = callee_expr.span.shrink_to_hi(); + err.span_suggestion( + start, + "consider separating array elements with a comma", + ",", + Applicability::MaybeIncorrect, + ); + return true; + } + false + } + + fn confirm_builtin_call( + &self, + call_expr: &'tcx hir::Expr<'tcx>, + callee_expr: &'tcx hir::Expr<'tcx>, + callee_ty: Ty<'tcx>, + arg_exprs: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let (fn_sig, def_id) = match *callee_ty.kind() { + ty::FnDef(def_id, subst) => { + let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, subst); + + // Unit testing: function items annotated with + // `#[rustc_evaluate_where_clauses]` trigger special output + // to let us test the trait evaluation system. + if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) { + let predicates = self.tcx.predicates_of(def_id); + let predicates = predicates.instantiate(self.tcx, subst); + for (predicate, predicate_span) in + predicates.predicates.iter().zip(&predicates.spans) + { + let obligation = Obligation::new( + ObligationCause::dummy_with_span(callee_expr.span), + self.param_env, + *predicate, + ); + let result = self.evaluate_obligation(&obligation); + self.tcx + .sess + .struct_span_err( + callee_expr.span, + &format!("evaluate({:?}) = {:?}", predicate, result), + ) + .span_label(*predicate_span, "predicate") + .emit(); + } + } + (fn_sig, Some(def_id)) + } + ty::FnPtr(sig) => (sig, None), + _ => { + let mut unit_variant = None; + if let hir::ExprKind::Path(qpath) = &callee_expr.kind + && let Res::Def(def::DefKind::Ctor(kind, def::CtorKind::Const), _) + = self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id) + // Only suggest removing parens if there are no arguments + && arg_exprs.is_empty() + { + let descr = match kind { + def::CtorOf::Struct => "struct", + def::CtorOf::Variant => "enum variant", + }; + let removal_span = + callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi()); + unit_variant = + Some((removal_span, descr, rustc_hir_pretty::qpath_to_string(qpath))); + } + + let callee_ty = self.resolve_vars_if_possible(callee_ty); + let mut err = type_error_struct!( + self.tcx.sess, + callee_expr.span, + callee_ty, + E0618, + "expected function, found {}", + match &unit_variant { + Some((_, kind, path)) => format!("{kind} `{path}`"), + None => format!("`{callee_ty}`"), + } + ); + + self.identify_bad_closure_def_and_call( + &mut err, + call_expr.hir_id, + &callee_expr.kind, + callee_expr.span, + ); + + if let Some((removal_span, kind, path)) = &unit_variant { + err.span_suggestion_verbose( + *removal_span, + &format!( + "`{path}` is a unit {kind}, and does not take parentheses to be constructed", + ), + "", + Applicability::MachineApplicable, + ); + } + + let mut inner_callee_path = None; + let def = match callee_expr.kind { + hir::ExprKind::Path(ref qpath) => { + self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id) + } + hir::ExprKind::Call(ref inner_callee, _) => { + // If the call spans more than one line and the callee kind is + // itself another `ExprCall`, that's a clue that we might just be + // missing a semicolon (Issue #51055) + let call_is_multiline = + self.tcx.sess.source_map().is_multiline(call_expr.span); + if call_is_multiline { + err.span_suggestion( + callee_expr.span.shrink_to_hi(), + "consider using a semicolon here", + ";", + Applicability::MaybeIncorrect, + ); + } + if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind { + inner_callee_path = Some(inner_qpath); + self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id) + } else { + Res::Err + } + } + _ => Res::Err, + }; + + if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) { + err.span_label(call_expr.span, "call expression requires function"); + } + + if let Some(span) = self.tcx.hir().res_span(def) { + let callee_ty = callee_ty.to_string(); + let label = match (unit_variant, inner_callee_path) { + (Some((_, kind, path)), _) => Some(format!("{kind} `{path}` defined here")), + (_, Some(hir::QPath::Resolved(_, path))) => self + .tcx + .sess + .source_map() + .span_to_snippet(path.span) + .ok() + .map(|p| format!("`{p}` defined here returns `{callee_ty}`")), + _ => { + match def { + // Emit a different diagnostic for local variables, as they are not + // type definitions themselves, but rather variables *of* that type. + Res::Local(hir_id) => Some(format!( + "`{}` has type `{}`", + self.tcx.hir().name(hir_id), + callee_ty + )), + Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => { + Some(format!( + "`{}` defined here", + self.tcx.def_path_str(def_id), + )) + } + _ => Some(format!("`{callee_ty}` defined here")), + } + } + }; + if let Some(label) = label { + err.span_label(span, label); + } + } + err.emit(); + + // This is the "default" function signature, used in case of error. + // In that case, we check each argument against "error" in order to + // set up all the node type bindings. + ( + ty::Binder::dummy(self.tcx.mk_fn_sig( + self.err_args(arg_exprs.len()).into_iter(), + self.tcx.ty_error(), + false, + hir::Unsafety::Normal, + abi::Abi::Rust, + )), + None, + ) + } + }; + + // Replace any late-bound regions that appear in the function + // signature with region variables. We also have to + // renormalize the associated types at this point, since they + // previously appeared within a `Binder<>` and hence would not + // have been normalized before. + let fn_sig = self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, fn_sig); + let fn_sig = self.normalize_associated_types_in(call_expr.span, fn_sig); + + // Call the generic checker. + let expected_arg_tys = self.expected_inputs_for_expected_output( + call_expr.span, + expected, + fn_sig.output(), + fn_sig.inputs(), + ); + self.check_argument_types( + call_expr.span, + call_expr, + fn_sig.inputs(), + expected_arg_tys, + arg_exprs, + fn_sig.c_variadic, + TupleArgumentsFlag::DontTupleArguments, + def_id, + ); + + fn_sig.output() + } + + fn confirm_deferred_closure_call( + &self, + call_expr: &'tcx hir::Expr<'tcx>, + arg_exprs: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + closure_def_id: LocalDefId, + fn_sig: ty::FnSig<'tcx>, + ) -> Ty<'tcx> { + // `fn_sig` is the *signature* of the closure being called. We + // don't know the full details yet (`Fn` vs `FnMut` etc), but we + // do know the types expected for each argument and the return + // type. + + let expected_arg_tys = self.expected_inputs_for_expected_output( + call_expr.span, + expected, + fn_sig.output(), + fn_sig.inputs(), + ); + + self.check_argument_types( + call_expr.span, + call_expr, + fn_sig.inputs(), + expected_arg_tys, + arg_exprs, + fn_sig.c_variadic, + TupleArgumentsFlag::TupleArguments, + Some(closure_def_id.to_def_id()), + ); + + fn_sig.output() + } + + fn confirm_overloaded_call( + &self, + call_expr: &'tcx hir::Expr<'tcx>, + arg_exprs: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + method_callee: MethodCallee<'tcx>, + ) -> Ty<'tcx> { + let output_type = self.check_method_argument_types( + call_expr.span, + call_expr, + Ok(method_callee), + arg_exprs, + TupleArgumentsFlag::TupleArguments, + expected, + ); + + self.write_method_call(call_expr.hir_id, method_callee); + output_type + } +} + +#[derive(Debug)] +pub struct DeferredCallResolution<'tcx> { + call_expr: &'tcx hir::Expr<'tcx>, + callee_expr: &'tcx hir::Expr<'tcx>, + adjusted_ty: Ty<'tcx>, + adjustments: Vec>, + fn_sig: ty::FnSig<'tcx>, + closure_substs: SubstsRef<'tcx>, +} + +impl<'a, 'tcx> DeferredCallResolution<'tcx> { + pub fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) { + debug!("DeferredCallResolution::resolve() {:?}", self); + + // we should not be invoked until the closure kind has been + // determined by upvar inference + assert!(fcx.closure_kind(self.closure_substs).is_some()); + + // We may now know enough to figure out fn vs fnmut etc. + match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) { + Some((autoref, method_callee)) => { + // One problem is that when we get here, we are going + // to have a newly instantiated function signature + // from the call trait. This has to be reconciled with + // the older function signature we had before. In + // principle we *should* be able to fn_sigs(), but we + // can't because of the annoying need for a TypeTrace. + // (This always bites me, should find a way to + // refactor it.) + let method_sig = method_callee.sig; + + debug!("attempt_resolution: method_callee={:?}", method_callee); + + for (method_arg_ty, self_arg_ty) in + iter::zip(method_sig.inputs().iter().skip(1), self.fn_sig.inputs()) + { + fcx.demand_eqtype(self.call_expr.span, *self_arg_ty, *method_arg_ty); + } + + fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output()); + + let mut adjustments = self.adjustments; + adjustments.extend(autoref); + fcx.apply_adjustments(self.callee_expr, adjustments); + + fcx.write_method_call(self.call_expr.hir_id, method_callee); + } + None => { + // This can happen if `#![no_core]` is used and the `fn/fn_mut/fn_once` + // lang items are not defined (issue #86238). + let mut err = fcx.inh.tcx.sess.struct_span_err( + self.call_expr.span, + "failed to find an overloaded call trait for closure call", + ); + err.help( + "make sure the `fn`/`fn_mut`/`fn_once` lang items are defined \ + and have associated `call`/`call_mut`/`call_once` functions", + ); + err.emit(); + } + } + } +} diff --git a/compiler/rustc_typeck/src/check/cast.rs b/compiler/rustc_typeck/src/check/cast.rs new file mode 100644 index 000000000..7aaddc2bd --- /dev/null +++ b/compiler/rustc_typeck/src/check/cast.rs @@ -0,0 +1,1072 @@ +//! Code for type-checking cast expressions. +//! +//! A cast `e as U` is valid if one of the following holds: +//! * `e` has type `T` and `T` coerces to `U`; *coercion-cast* +//! * `e` has type `*T`, `U` is `*U_0`, and either `U_0: Sized` or +//! pointer_kind(`T`) = pointer_kind(`U_0`); *ptr-ptr-cast* +//! * `e` has type `*T` and `U` is a numeric type, while `T: Sized`; *ptr-addr-cast* +//! * `e` is an integer and `U` is `*U_0`, while `U_0: Sized`; *addr-ptr-cast* +//! * `e` has type `T` and `T` and `U` are any numeric types; *numeric-cast* +//! * `e` is a C-like enum and `U` is an integer type; *enum-cast* +//! * `e` has type `bool` or `char` and `U` is an integer; *prim-int-cast* +//! * `e` has type `u8` and `U` is `char`; *u8-char-cast* +//! * `e` has type `&[T; n]` and `U` is `*const T`; *array-ptr-cast* +//! * `e` is a function pointer type and `U` has type `*T`, +//! while `T: Sized`; *fptr-ptr-cast* +//! * `e` is a function pointer type and `U` is an integer; *fptr-addr-cast* +//! +//! where `&.T` and `*T` are references of either mutability, +//! and where pointer_kind(`T`) is the kind of the unsize info +//! in `T` - the vtable for a trait definition (e.g., `fmt::Display` or +//! `Iterator`, not `Iterator`) or a length (or `()` if `T: Sized`). +//! +//! Note that lengths are not adjusted when casting raw slices - +//! `T: *const [u16] as *const [u8]` creates a slice that only includes +//! half of the original memory. +//! +//! Casting is not transitive, that is, even if `e as U1 as U2` is a valid +//! expression, `e as U2` is not necessarily so (in fact it will only be valid if +//! `U1` coerces to `U2`). + +use super::FnCtxt; + +use crate::hir::def_id::DefId; +use crate::type_error_struct; +use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::lang_items::LangItem; +use rustc_middle::mir::Mutability; +use rustc_middle::ty::adjustment::AllowTwoPhase; +use rustc_middle::ty::cast::{CastKind, CastTy}; +use rustc_middle::ty::error::TypeError; +use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::{self, Ty, TypeAndMut, TypeVisitable}; +use rustc_session::lint; +use rustc_session::Session; +use rustc_span::symbol::sym; +use rustc_span::Span; +use rustc_trait_selection::infer::InferCtxtExt; +use rustc_trait_selection::traits; +use rustc_trait_selection::traits::error_reporting::report_object_safety_error; + +/// Reifies a cast check to be checked once we have full type information for +/// a function context. +#[derive(Debug)] +pub struct CastCheck<'tcx> { + expr: &'tcx hir::Expr<'tcx>, + expr_ty: Ty<'tcx>, + expr_span: Span, + cast_ty: Ty<'tcx>, + cast_span: Span, + span: Span, +} + +/// The kind of pointer and associated metadata (thin, length or vtable) - we +/// only allow casts between fat pointers if their metadata have the same +/// kind. +#[derive(Copy, Clone, PartialEq, Eq)] +enum PointerKind<'tcx> { + /// No metadata attached, ie pointer to sized type or foreign type + Thin, + /// A trait object + VTable(Option), + /// Slice + Length, + /// The unsize info of this projection + OfProjection(&'tcx ty::ProjectionTy<'tcx>), + /// The unsize info of this opaque ty + OfOpaque(DefId, SubstsRef<'tcx>), + /// The unsize info of this parameter + OfParam(&'tcx ty::ParamTy), +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Returns the kind of unsize information of t, or None + /// if t is unknown. + fn pointer_kind( + &self, + t: Ty<'tcx>, + span: Span, + ) -> Result>, ErrorGuaranteed> { + debug!("pointer_kind({:?}, {:?})", t, span); + + let t = self.resolve_vars_if_possible(t); + + if let Some(reported) = t.error_reported() { + return Err(reported); + } + + if self.type_is_known_to_be_sized_modulo_regions(t, span) { + return Ok(Some(PointerKind::Thin)); + } + + Ok(match *t.kind() { + ty::Slice(_) | ty::Str => Some(PointerKind::Length), + ty::Dynamic(ref tty, ..) => Some(PointerKind::VTable(tty.principal_def_id())), + ty::Adt(def, substs) if def.is_struct() => match def.non_enum_variant().fields.last() { + None => Some(PointerKind::Thin), + Some(f) => { + let field_ty = self.field_ty(span, f, substs); + self.pointer_kind(field_ty, span)? + } + }, + ty::Tuple(fields) => match fields.last() { + None => Some(PointerKind::Thin), + Some(&f) => self.pointer_kind(f, span)?, + }, + + // Pointers to foreign types are thin, despite being unsized + ty::Foreign(..) => Some(PointerKind::Thin), + // We should really try to normalize here. + ty::Projection(ref pi) => Some(PointerKind::OfProjection(pi)), + ty::Opaque(def_id, substs) => Some(PointerKind::OfOpaque(def_id, substs)), + ty::Param(ref p) => Some(PointerKind::OfParam(p)), + // Insufficient type information. + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) => None, + + ty::Bool + | ty::Char + | ty::Int(..) + | ty::Uint(..) + | ty::Float(_) + | ty::Array(..) + | ty::GeneratorWitness(..) + | ty::RawPtr(_) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(..) + | ty::Closure(..) + | ty::Generator(..) + | ty::Adt(..) + | ty::Never + | ty::Error(_) => { + let reported = self + .tcx + .sess + .delay_span_bug(span, &format!("`{:?}` should be sized but is not?", t)); + return Err(reported); + } + }) + } +} + +#[derive(Copy, Clone)] +pub enum CastError { + ErrorGuaranteed, + + CastToBool, + CastToChar, + DifferingKinds, + /// Cast of thin to fat raw ptr (e.g., `*const () as *const [u8]`). + SizedUnsizedCast, + IllegalCast, + NeedDeref, + NeedViaPtr, + NeedViaThinPtr, + NeedViaInt, + NonScalar, + UnknownExprPtrKind, + UnknownCastPtrKind, + /// Cast of int to (possibly) fat raw pointer. + /// + /// Argument is the specific name of the metadata in plain words, such as "a vtable" + /// or "a length". If this argument is None, then the metadata is unknown, for example, + /// when we're typechecking a type parameter with a ?Sized bound. + IntToFatCast(Option<&'static str>), +} + +impl From for CastError { + fn from(_: ErrorGuaranteed) -> Self { + CastError::ErrorGuaranteed + } +} + +fn make_invalid_casting_error<'a, 'tcx>( + sess: &'a Session, + span: Span, + expr_ty: Ty<'tcx>, + cast_ty: Ty<'tcx>, + fcx: &FnCtxt<'a, 'tcx>, +) -> DiagnosticBuilder<'a, ErrorGuaranteed> { + type_error_struct!( + sess, + span, + expr_ty, + E0606, + "casting `{}` as `{}` is invalid", + fcx.ty_to_string(expr_ty), + fcx.ty_to_string(cast_ty) + ) +} + +impl<'a, 'tcx> CastCheck<'tcx> { + pub fn new( + fcx: &FnCtxt<'a, 'tcx>, + expr: &'tcx hir::Expr<'tcx>, + expr_ty: Ty<'tcx>, + cast_ty: Ty<'tcx>, + cast_span: Span, + span: Span, + ) -> Result, ErrorGuaranteed> { + let expr_span = expr.span.find_ancestor_inside(span).unwrap_or(expr.span); + let check = CastCheck { expr, expr_ty, expr_span, cast_ty, cast_span, span }; + + // For better error messages, check for some obviously unsized + // cases now. We do a more thorough check at the end, once + // inference is more completely known. + match cast_ty.kind() { + ty::Dynamic(..) | ty::Slice(..) => { + let reported = check.report_cast_to_unsized_type(fcx); + Err(reported) + } + _ => Ok(check), + } + } + + fn report_cast_error(&self, fcx: &FnCtxt<'a, 'tcx>, e: CastError) { + match e { + CastError::ErrorGuaranteed => { + // an error has already been reported + } + CastError::NeedDeref => { + let error_span = self.span; + let mut err = make_invalid_casting_error( + fcx.tcx.sess, + self.span, + self.expr_ty, + self.cast_ty, + fcx, + ); + let cast_ty = fcx.ty_to_string(self.cast_ty); + err.span_label( + error_span, + format!("cannot cast `{}` as `{}`", fcx.ty_to_string(self.expr_ty), cast_ty), + ); + if let Ok(snippet) = fcx.sess().source_map().span_to_snippet(self.expr_span) { + err.span_suggestion( + self.expr_span, + "dereference the expression", + format!("*{}", snippet), + Applicability::MaybeIncorrect, + ); + } else { + err.span_help(self.expr_span, "dereference the expression with `*`"); + } + err.emit(); + } + CastError::NeedViaThinPtr | CastError::NeedViaPtr => { + let mut err = make_invalid_casting_error( + fcx.tcx.sess, + self.span, + self.expr_ty, + self.cast_ty, + fcx, + ); + if self.cast_ty.is_integral() { + err.help(&format!( + "cast through {} first", + match e { + CastError::NeedViaPtr => "a raw pointer", + CastError::NeedViaThinPtr => "a thin pointer", + _ => bug!(), + } + )); + } + err.emit(); + } + CastError::NeedViaInt => { + make_invalid_casting_error( + fcx.tcx.sess, + self.span, + self.expr_ty, + self.cast_ty, + fcx, + ) + .help(&format!( + "cast through {} first", + match e { + CastError::NeedViaInt => "an integer", + _ => bug!(), + } + )) + .emit(); + } + CastError::IllegalCast => { + make_invalid_casting_error( + fcx.tcx.sess, + self.span, + self.expr_ty, + self.cast_ty, + fcx, + ) + .emit(); + } + CastError::DifferingKinds => { + make_invalid_casting_error( + fcx.tcx.sess, + self.span, + self.expr_ty, + self.cast_ty, + fcx, + ) + .note("vtable kinds may not match") + .emit(); + } + CastError::CastToBool => { + let mut err = + struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`"); + + if self.expr_ty.is_numeric() { + match fcx.tcx.sess.source_map().span_to_snippet(self.expr_span) { + Ok(snippet) => { + err.span_suggestion( + self.span, + "compare with zero instead", + format!("{snippet} != 0"), + Applicability::MachineApplicable, + ); + } + Err(_) => { + err.span_help(self.span, "compare with zero instead"); + } + } + } else { + err.span_label(self.span, "unsupported cast"); + } + + err.emit(); + } + CastError::CastToChar => { + let mut err = type_error_struct!( + fcx.tcx.sess, + self.span, + self.expr_ty, + E0604, + "only `u8` can be cast as `char`, not `{}`", + self.expr_ty + ); + err.span_label(self.span, "invalid cast"); + if self.expr_ty.is_numeric() { + if self.expr_ty == fcx.tcx.types.u32 { + match fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) { + Ok(snippet) => err.span_suggestion( + self.span, + "try `char::from_u32` instead", + format!("char::from_u32({snippet})"), + Applicability::MachineApplicable, + ), + + Err(_) => err.span_help(self.span, "try `char::from_u32` instead"), + }; + } else if self.expr_ty == fcx.tcx.types.i8 { + err.span_help(self.span, "try casting from `u8` instead"); + } else { + err.span_help(self.span, "try `char::from_u32` instead (via a `u32`)"); + }; + } + err.emit(); + } + CastError::NonScalar => { + let mut err = type_error_struct!( + fcx.tcx.sess, + self.span, + self.expr_ty, + E0605, + "non-primitive cast: `{}` as `{}`", + self.expr_ty, + fcx.ty_to_string(self.cast_ty) + ); + let mut sugg = None; + let mut sugg_mutref = false; + if let ty::Ref(reg, cast_ty, mutbl) = *self.cast_ty.kind() { + if let ty::RawPtr(TypeAndMut { ty: expr_ty, .. }) = *self.expr_ty.kind() + && fcx + .try_coerce( + self.expr, + fcx.tcx.mk_ref( + fcx.tcx.lifetimes.re_erased, + TypeAndMut { ty: expr_ty, mutbl }, + ), + self.cast_ty, + AllowTwoPhase::No, + None, + ) + .is_ok() + { + sugg = Some((format!("&{}*", mutbl.prefix_str()), cast_ty == expr_ty)); + } else if let ty::Ref(expr_reg, expr_ty, expr_mutbl) = *self.expr_ty.kind() + && expr_mutbl == Mutability::Not + && mutbl == Mutability::Mut + && fcx + .try_coerce( + self.expr, + fcx.tcx.mk_ref( + expr_reg, + TypeAndMut { ty: expr_ty, mutbl: Mutability::Mut }, + ), + self.cast_ty, + AllowTwoPhase::No, + None, + ) + .is_ok() + { + sugg_mutref = true; + } + + if !sugg_mutref + && sugg == None + && fcx + .try_coerce( + self.expr, + fcx.tcx.mk_ref(reg, TypeAndMut { ty: self.expr_ty, mutbl }), + self.cast_ty, + AllowTwoPhase::No, + None, + ) + .is_ok() + { + sugg = Some((format!("&{}", mutbl.prefix_str()), false)); + } + } else if let ty::RawPtr(TypeAndMut { mutbl, .. }) = *self.cast_ty.kind() + && fcx + .try_coerce( + self.expr, + fcx.tcx.mk_ref( + fcx.tcx.lifetimes.re_erased, + TypeAndMut { ty: self.expr_ty, mutbl }, + ), + self.cast_ty, + AllowTwoPhase::No, + None, + ) + .is_ok() + { + sugg = Some((format!("&{}", mutbl.prefix_str()), false)); + } + if sugg_mutref { + err.span_label(self.span, "invalid cast"); + err.span_note(self.expr_span, "this reference is immutable"); + err.span_note(self.cast_span, "trying to cast to a mutable reference type"); + } else if let Some((sugg, remove_cast)) = sugg { + err.span_label(self.span, "invalid cast"); + + let has_parens = fcx + .tcx + .sess + .source_map() + .span_to_snippet(self.expr_span) + .map_or(false, |snip| snip.starts_with('(')); + + // Very crude check to see whether the expression must be wrapped + // in parentheses for the suggestion to work (issue #89497). + // Can/should be extended in the future. + let needs_parens = + !has_parens && matches!(self.expr.kind, hir::ExprKind::Cast(..)); + + let mut suggestion = vec![(self.expr_span.shrink_to_lo(), sugg)]; + if needs_parens { + suggestion[0].1 += "("; + suggestion.push((self.expr_span.shrink_to_hi(), ")".to_string())); + } + if remove_cast { + suggestion.push(( + self.expr_span.shrink_to_hi().to(self.cast_span), + String::new(), + )); + } + + err.multipart_suggestion_verbose( + "consider borrowing the value", + suggestion, + Applicability::MachineApplicable, + ); + } else if !matches!( + self.cast_ty.kind(), + ty::FnDef(..) | ty::FnPtr(..) | ty::Closure(..) + ) { + let mut label = true; + // Check `impl From for self.cast_ty {}` for accurate suggestion: + if let Ok(snippet) = fcx.tcx.sess.source_map().span_to_snippet(self.expr_span) + && let Some(from_trait) = fcx.tcx.get_diagnostic_item(sym::From) + { + let ty = fcx.resolve_vars_if_possible(self.cast_ty); + // Erase regions to avoid panic in `prove_value` when calling + // `type_implements_trait`. + let ty = fcx.tcx.erase_regions(ty); + let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty); + let expr_ty = fcx.tcx.erase_regions(expr_ty); + let ty_params = fcx.tcx.mk_substs_trait(expr_ty, &[]); + if fcx + .infcx + .type_implements_trait(from_trait, ty, ty_params, fcx.param_env) + .must_apply_modulo_regions() + { + label = false; + err.span_suggestion( + self.span, + "consider using the `From` trait instead", + format!("{}::from({})", self.cast_ty, snippet), + Applicability::MaybeIncorrect, + ); + } + } + let msg = "an `as` expression can only be used to convert between primitive \ + types or to coerce to a specific trait object"; + if label { + err.span_label(self.span, msg); + } else { + err.note(msg); + } + } else { + err.span_label(self.span, "invalid cast"); + } + err.emit(); + } + CastError::SizedUnsizedCast => { + use crate::structured_errors::{SizedUnsizedCast, StructuredDiagnostic}; + + SizedUnsizedCast { + sess: &fcx.tcx.sess, + span: self.span, + expr_ty: self.expr_ty, + cast_ty: fcx.ty_to_string(self.cast_ty), + } + .diagnostic() + .emit(); + } + CastError::IntToFatCast(known_metadata) => { + let mut err = struct_span_err!( + fcx.tcx.sess, + self.cast_span, + E0606, + "cannot cast `{}` to a pointer that {} wide", + fcx.ty_to_string(self.expr_ty), + if known_metadata.is_some() { "is" } else { "may be" } + ); + + err.span_label( + self.cast_span, + format!( + "creating a `{}` requires both an address and {}", + self.cast_ty, + known_metadata.unwrap_or("type-specific metadata"), + ), + ); + + if fcx.tcx.sess.is_nightly_build() { + err.span_label( + self.expr_span, + "consider casting this expression to `*const ()`, \ + then using `core::ptr::from_raw_parts`", + ); + } + + err.emit(); + } + CastError::UnknownCastPtrKind | CastError::UnknownExprPtrKind => { + let unknown_cast_to = match e { + CastError::UnknownCastPtrKind => true, + CastError::UnknownExprPtrKind => false, + _ => bug!(), + }; + let mut err = struct_span_err!( + fcx.tcx.sess, + if unknown_cast_to { self.cast_span } else { self.span }, + E0641, + "cannot cast {} a pointer of an unknown kind", + if unknown_cast_to { "to" } else { "from" } + ); + if unknown_cast_to { + err.span_label(self.cast_span, "needs more type information"); + err.note( + "the type information given here is insufficient to check whether \ + the pointer cast is valid", + ); + } else { + err.span_label( + self.span, + "the type information given here is insufficient to check whether \ + the pointer cast is valid", + ); + } + err.emit(); + } + } + } + + fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'tcx>) -> ErrorGuaranteed { + if let Some(reported) = + self.cast_ty.error_reported().or_else(|| self.expr_ty.error_reported()) + { + return reported; + } + + let tstr = fcx.ty_to_string(self.cast_ty); + let mut err = type_error_struct!( + fcx.tcx.sess, + self.span, + self.expr_ty, + E0620, + "cast to unsized type: `{}` as `{}`", + fcx.resolve_vars_if_possible(self.expr_ty), + tstr + ); + match self.expr_ty.kind() { + ty::Ref(_, _, mt) => { + let mtstr = mt.prefix_str(); + if self.cast_ty.is_trait() { + match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) { + Ok(s) => { + err.span_suggestion( + self.cast_span, + "try casting to a reference instead", + format!("&{}{}", mtstr, s), + Applicability::MachineApplicable, + ); + } + Err(_) => { + let msg = &format!("did you mean `&{}{}`?", mtstr, tstr); + err.span_help(self.cast_span, msg); + } + } + } else { + let msg = + &format!("consider using an implicit coercion to `&{mtstr}{tstr}` instead"); + err.span_help(self.span, msg); + } + } + ty::Adt(def, ..) if def.is_box() => { + match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) { + Ok(s) => { + err.span_suggestion( + self.cast_span, + "you can cast to a `Box` instead", + format!("Box<{s}>"), + Applicability::MachineApplicable, + ); + } + Err(_) => { + err.span_help( + self.cast_span, + &format!("you might have meant `Box<{tstr}>`"), + ); + } + } + } + _ => { + err.span_help(self.expr_span, "consider using a box or reference as appropriate"); + } + } + err.emit() + } + + fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'tcx>) { + let t_cast = self.cast_ty; + let t_expr = self.expr_ty; + let type_asc_or = + if fcx.tcx.features().type_ascription { "type ascription or " } else { "" }; + let (adjective, lint) = if t_cast.is_numeric() && t_expr.is_numeric() { + ("numeric ", lint::builtin::TRIVIAL_NUMERIC_CASTS) + } else { + ("", lint::builtin::TRIVIAL_CASTS) + }; + fcx.tcx.struct_span_lint_hir(lint, self.expr.hir_id, self.span, |err| { + err.build(&format!( + "trivial {}cast: `{}` as `{}`", + adjective, + fcx.ty_to_string(t_expr), + fcx.ty_to_string(t_cast) + )) + .help(&format!( + "cast can be replaced by coercion; this might \ + require {type_asc_or}a temporary variable" + )) + .emit(); + }); + } + + #[instrument(skip(fcx), level = "debug")] + pub fn check(mut self, fcx: &FnCtxt<'a, 'tcx>) { + self.expr_ty = fcx.structurally_resolved_type(self.expr_span, self.expr_ty); + self.cast_ty = fcx.structurally_resolved_type(self.cast_span, self.cast_ty); + + debug!("check_cast({}, {:?} as {:?})", self.expr.hir_id, self.expr_ty, self.cast_ty); + + if !fcx.type_is_known_to_be_sized_modulo_regions(self.cast_ty, self.span) + && !self.cast_ty.has_infer_types() + { + self.report_cast_to_unsized_type(fcx); + } else if self.expr_ty.references_error() || self.cast_ty.references_error() { + // No sense in giving duplicate error messages + } else { + match self.try_coercion_cast(fcx) { + Ok(()) => { + self.trivial_cast_lint(fcx); + debug!(" -> CoercionCast"); + fcx.typeck_results.borrow_mut().set_coercion_cast(self.expr.hir_id.local_id); + } + Err(ty::error::TypeError::ObjectUnsafeCoercion(did)) => { + self.report_object_unsafe_cast(&fcx, did); + } + Err(_) => { + match self.do_check(fcx) { + Ok(k) => { + debug!(" -> {:?}", k); + } + Err(e) => self.report_cast_error(fcx, e), + }; + } + }; + } + } + + fn report_object_unsafe_cast(&self, fcx: &FnCtxt<'a, 'tcx>, did: DefId) { + let violations = fcx.tcx.object_safety_violations(did); + let mut err = report_object_safety_error(fcx.tcx, self.cast_span, did, violations); + err.note(&format!("required by cast to type '{}'", fcx.ty_to_string(self.cast_ty))); + err.emit(); + } + + /// Checks a cast, and report an error if one exists. In some cases, this + /// can return Ok and create type errors in the fcx rather than returning + /// directly. coercion-cast is handled in check instead of here. + pub fn do_check(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result { + use rustc_middle::ty::cast::CastTy::*; + use rustc_middle::ty::cast::IntTy::*; + + let (t_from, t_cast) = match (CastTy::from_ty(self.expr_ty), CastTy::from_ty(self.cast_ty)) + { + (Some(t_from), Some(t_cast)) => (t_from, t_cast), + // Function item types may need to be reified before casts. + (None, Some(t_cast)) => { + match *self.expr_ty.kind() { + ty::FnDef(..) => { + // Attempt a coercion to a fn pointer type. + let f = fcx.normalize_associated_types_in( + self.expr_span, + self.expr_ty.fn_sig(fcx.tcx), + ); + let res = fcx.try_coerce( + self.expr, + self.expr_ty, + fcx.tcx.mk_fn_ptr(f), + AllowTwoPhase::No, + None, + ); + if let Err(TypeError::IntrinsicCast) = res { + return Err(CastError::IllegalCast); + } + if res.is_err() { + return Err(CastError::NonScalar); + } + (FnPtr, t_cast) + } + // Special case some errors for references, and check for + // array-ptr-casts. `Ref` is not a CastTy because the cast + // is split into a coercion to a pointer type, followed by + // a cast. + ty::Ref(_, inner_ty, mutbl) => { + return match t_cast { + Int(_) | Float => match *inner_ty.kind() { + ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) => { + Err(CastError::NeedDeref) + } + _ => Err(CastError::NeedViaPtr), + }, + // array-ptr-cast + Ptr(mt) => { + self.check_ref_cast(fcx, TypeAndMut { mutbl, ty: inner_ty }, mt) + } + _ => Err(CastError::NonScalar), + }; + } + _ => return Err(CastError::NonScalar), + } + } + _ => return Err(CastError::NonScalar), + }; + + match (t_from, t_cast) { + // These types have invariants! can't cast into them. + (_, Int(CEnum) | FnPtr) => Err(CastError::NonScalar), + + // * -> Bool + (_, Int(Bool)) => Err(CastError::CastToBool), + + // * -> Char + (Int(U(ty::UintTy::U8)), Int(Char)) => Ok(CastKind::U8CharCast), // u8-char-cast + (_, Int(Char)) => Err(CastError::CastToChar), + + // prim -> float,ptr + (Int(Bool) | Int(CEnum) | Int(Char), Float) => Err(CastError::NeedViaInt), + + (Int(Bool) | Int(CEnum) | Int(Char) | Float, Ptr(_)) | (Ptr(_) | FnPtr, Float) => { + Err(CastError::IllegalCast) + } + + // ptr -> * + (Ptr(m_e), Ptr(m_c)) => self.check_ptr_ptr_cast(fcx, m_e, m_c), // ptr-ptr-cast + + // ptr-addr-cast + (Ptr(m_expr), Int(t_c)) => { + self.lossy_provenance_ptr2int_lint(fcx, t_c); + self.check_ptr_addr_cast(fcx, m_expr) + } + (FnPtr, Int(_)) => { + // FIXME(#95489): there should eventually be a lint for these casts + Ok(CastKind::FnPtrAddrCast) + } + // addr-ptr-cast + (Int(_), Ptr(mt)) => { + self.fuzzy_provenance_int2ptr_lint(fcx); + self.check_addr_ptr_cast(fcx, mt) + } + // fn-ptr-cast + (FnPtr, Ptr(mt)) => self.check_fptr_ptr_cast(fcx, mt), + + // prim -> prim + (Int(CEnum), Int(_)) => { + self.cenum_impl_drop_lint(fcx); + Ok(CastKind::EnumCast) + } + (Int(Char) | Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast), + + (Int(_) | Float, Int(_) | Float) => Ok(CastKind::NumericCast), + } + } + + fn check_ptr_ptr_cast( + &self, + fcx: &FnCtxt<'a, 'tcx>, + m_expr: ty::TypeAndMut<'tcx>, + m_cast: ty::TypeAndMut<'tcx>, + ) -> Result { + debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}", m_expr, m_cast); + // ptr-ptr cast. vtables must match. + + let expr_kind = fcx.pointer_kind(m_expr.ty, self.span)?; + let cast_kind = fcx.pointer_kind(m_cast.ty, self.span)?; + + let Some(cast_kind) = cast_kind else { + // We can't cast if target pointer kind is unknown + return Err(CastError::UnknownCastPtrKind); + }; + + // Cast to thin pointer is OK + if cast_kind == PointerKind::Thin { + return Ok(CastKind::PtrPtrCast); + } + + let Some(expr_kind) = expr_kind else { + // We can't cast to fat pointer if source pointer kind is unknown + return Err(CastError::UnknownExprPtrKind); + }; + + // thin -> fat? report invalid cast (don't complain about vtable kinds) + if expr_kind == PointerKind::Thin { + return Err(CastError::SizedUnsizedCast); + } + + // vtable kinds must match + if cast_kind == expr_kind { + Ok(CastKind::PtrPtrCast) + } else { + Err(CastError::DifferingKinds) + } + } + + fn check_fptr_ptr_cast( + &self, + fcx: &FnCtxt<'a, 'tcx>, + m_cast: ty::TypeAndMut<'tcx>, + ) -> Result { + // fptr-ptr cast. must be to thin ptr + + match fcx.pointer_kind(m_cast.ty, self.span)? { + None => Err(CastError::UnknownCastPtrKind), + Some(PointerKind::Thin) => Ok(CastKind::FnPtrPtrCast), + _ => Err(CastError::IllegalCast), + } + } + + fn check_ptr_addr_cast( + &self, + fcx: &FnCtxt<'a, 'tcx>, + m_expr: ty::TypeAndMut<'tcx>, + ) -> Result { + // ptr-addr cast. must be from thin ptr + + match fcx.pointer_kind(m_expr.ty, self.span)? { + None => Err(CastError::UnknownExprPtrKind), + Some(PointerKind::Thin) => Ok(CastKind::PtrAddrCast), + _ => Err(CastError::NeedViaThinPtr), + } + } + + fn check_ref_cast( + &self, + fcx: &FnCtxt<'a, 'tcx>, + m_expr: ty::TypeAndMut<'tcx>, + m_cast: ty::TypeAndMut<'tcx>, + ) -> Result { + // array-ptr-cast: allow mut-to-mut, mut-to-const, const-to-const + if m_expr.mutbl == hir::Mutability::Mut || m_cast.mutbl == hir::Mutability::Not { + if let ty::Array(ety, _) = m_expr.ty.kind() { + // Due to the limitations of LLVM global constants, + // region pointers end up pointing at copies of + // vector elements instead of the original values. + // To allow raw pointers to work correctly, we + // need to special-case obtaining a raw pointer + // from a region pointer to a vector. + + // Coerce to a raw pointer so that we generate AddressOf in MIR. + let array_ptr_type = fcx.tcx.mk_ptr(m_expr); + fcx.try_coerce(self.expr, self.expr_ty, array_ptr_type, AllowTwoPhase::No, None) + .unwrap_or_else(|_| { + bug!( + "could not cast from reference to array to pointer to array ({:?} to {:?})", + self.expr_ty, + array_ptr_type, + ) + }); + + // this will report a type mismatch if needed + fcx.demand_eqtype(self.span, *ety, m_cast.ty); + return Ok(CastKind::ArrayPtrCast); + } + } + + Err(CastError::IllegalCast) + } + + fn check_addr_ptr_cast( + &self, + fcx: &FnCtxt<'a, 'tcx>, + m_cast: TypeAndMut<'tcx>, + ) -> Result { + // ptr-addr cast. pointer must be thin. + match fcx.pointer_kind(m_cast.ty, self.span)? { + None => Err(CastError::UnknownCastPtrKind), + Some(PointerKind::Thin) => Ok(CastKind::AddrPtrCast), + Some(PointerKind::VTable(_)) => Err(CastError::IntToFatCast(Some("a vtable"))), + Some(PointerKind::Length) => Err(CastError::IntToFatCast(Some("a length"))), + Some( + PointerKind::OfProjection(_) + | PointerKind::OfOpaque(_, _) + | PointerKind::OfParam(_), + ) => Err(CastError::IntToFatCast(None)), + } + } + + fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<(), ty::error::TypeError<'tcx>> { + match fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty, AllowTwoPhase::No, None) { + Ok(_) => Ok(()), + Err(err) => Err(err), + } + } + + fn cenum_impl_drop_lint(&self, fcx: &FnCtxt<'a, 'tcx>) { + if let ty::Adt(d, _) = self.expr_ty.kind() + && d.has_dtor(fcx.tcx) + { + fcx.tcx.struct_span_lint_hir( + lint::builtin::CENUM_IMPL_DROP_CAST, + self.expr.hir_id, + self.span, + |err| { + err.build(&format!( + "cannot cast enum `{}` into integer `{}` because it implements `Drop`", + self.expr_ty, self.cast_ty + )) + .emit(); + }, + ); + } + } + + fn lossy_provenance_ptr2int_lint(&self, fcx: &FnCtxt<'a, 'tcx>, t_c: ty::cast::IntTy) { + fcx.tcx.struct_span_lint_hir( + lint::builtin::LOSSY_PROVENANCE_CASTS, + self.expr.hir_id, + self.span, + |err| { + let mut err = err.build(&format!( + "under strict provenance it is considered bad style to cast pointer `{}` to integer `{}`", + self.expr_ty, self.cast_ty + )); + + let msg = "use `.addr()` to obtain the address of a pointer"; + + let expr_prec = self.expr.precedence().order(); + let needs_parens = expr_prec < rustc_ast::util::parser::PREC_POSTFIX; + + let scalar_cast = match t_c { + ty::cast::IntTy::U(ty::UintTy::Usize) => String::new(), + _ => format!(" as {}", self.cast_ty), + }; + + let cast_span = self.expr_span.shrink_to_hi().to(self.cast_span); + + if needs_parens { + let suggestions = vec![ + (self.expr_span.shrink_to_lo(), String::from("(")), + (cast_span, format!(").addr(){scalar_cast}")), + ]; + + err.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect); + } else { + err.span_suggestion( + cast_span, + msg, + format!(".addr(){scalar_cast}"), + Applicability::MaybeIncorrect, + ); + } + + err.help( + "if you can't comply with strict provenance and need to expose the pointer \ + provenance you can use `.expose_addr()` instead" + ); + + err.emit(); + }, + ); + } + + fn fuzzy_provenance_int2ptr_lint(&self, fcx: &FnCtxt<'a, 'tcx>) { + fcx.tcx.struct_span_lint_hir( + lint::builtin::FUZZY_PROVENANCE_CASTS, + self.expr.hir_id, + self.span, + |err| { + let mut err = err.build(&format!( + "strict provenance disallows casting integer `{}` to pointer `{}`", + self.expr_ty, self.cast_ty + )); + let msg = "use `.with_addr()` to adjust a valid pointer in the same allocation, to this address"; + let suggestions = vec![ + (self.expr_span.shrink_to_lo(), String::from("(...).with_addr(")), + (self.expr_span.shrink_to_hi().to(self.cast_span), String::from(")")), + ]; + + err.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect); + err.help( + "if you can't comply with strict provenance and don't have a pointer with \ + the correct provenance you can use `std::ptr::from_exposed_addr()` instead" + ); + + err.emit(); + }, + ); + } +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + fn type_is_known_to_be_sized_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool { + let lang_item = self.tcx.require_lang_item(LangItem::Sized, None); + traits::type_known_to_meet_bound_modulo_regions(self, self.param_env, ty, lang_item, span) + } +} diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs new file mode 100644 index 000000000..9c1fd9b30 --- /dev/null +++ b/compiler/rustc_typeck/src/check/check.rs @@ -0,0 +1,1712 @@ +use crate::check::intrinsicck::InlineAsmCtxt; + +use super::coercion::CoerceMany; +use super::compare_method::check_type_bounds; +use super::compare_method::{compare_const_impl, compare_impl_method, compare_ty_impl}; +use super::*; +use rustc_attr as attr; +use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def::{DefKind, Res}; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::intravisit::Visitor; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{ItemKind, Node, PathSegment}; +use rustc_infer::infer::outlives::env::OutlivesEnvironment; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::{DefiningAnchor, RegionVariableOrigin, TyCtxtInferExt}; +use rustc_infer::traits::Obligation; +use rustc_lint::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS; +use rustc_middle::hir::nested_filter; +use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES}; +use rustc_middle::ty::subst::GenericArgKind; +use rustc_middle::ty::util::{Discr, IntTypeExt}; +use rustc_middle::ty::{ + self, ParamEnv, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, +}; +use rustc_session::lint::builtin::{UNINHABITED_STATIC, UNSUPPORTED_CALLING_CONVENTIONS}; +use rustc_span::symbol::sym; +use rustc_span::{self, Span}; +use rustc_target::spec::abi::Abi; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _; +use rustc_trait_selection::traits::{self, ObligationCtxt}; +use rustc_ty_utils::representability::{self, Representability}; + +use std::iter; +use std::ops::ControlFlow; + +pub(super) fn check_abi(tcx: TyCtxt<'_>, hir_id: hir::HirId, span: Span, abi: Abi) { + match tcx.sess.target.is_abi_supported(abi) { + Some(true) => (), + Some(false) => { + struct_span_err!( + tcx.sess, + span, + E0570, + "`{abi}` is not a supported ABI for the current target", + ) + .emit(); + } + None => { + tcx.struct_span_lint_hir(UNSUPPORTED_CALLING_CONVENTIONS, hir_id, span, |lint| { + lint.build("use of calling convention not supported on this target").emit(); + }); + } + } + + // This ABI is only allowed on function pointers + if abi == Abi::CCmseNonSecureCall { + struct_span_err!( + tcx.sess, + span, + E0781, + "the `\"C-cmse-nonsecure-call\"` ABI is only allowed on function pointers" + ) + .emit(); + } +} + +/// Helper used for fns and closures. Does the grungy work of checking a function +/// body and returns the function context used for that purpose, since in the case of a fn item +/// there is still a bit more to do. +/// +/// * ... +/// * inherited: other fields inherited from the enclosing fn (if any) +#[instrument(skip(inherited, body), level = "debug")] +pub(super) fn check_fn<'a, 'tcx>( + inherited: &'a Inherited<'a, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + fn_sig: ty::FnSig<'tcx>, + decl: &'tcx hir::FnDecl<'tcx>, + fn_id: hir::HirId, + body: &'tcx hir::Body<'tcx>, + can_be_generator: Option, + return_type_pre_known: bool, +) -> (FnCtxt<'a, 'tcx>, Option>) { + // Create the function context. This is either derived from scratch or, + // in the case of closures, based on the outer context. + let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id); + fcx.ps.set(UnsafetyState::function(fn_sig.unsafety, fn_id)); + fcx.return_type_pre_known = return_type_pre_known; + + let tcx = fcx.tcx; + let hir = tcx.hir(); + + let declared_ret_ty = fn_sig.output(); + + let ret_ty = + fcx.register_infer_ok_obligations(fcx.infcx.replace_opaque_types_with_inference_vars( + declared_ret_ty, + body.value.hir_id, + decl.output.span(), + param_env, + )); + // If we replaced declared_ret_ty with infer vars, then we must be infering + // an opaque type, so set a flag so we can improve diagnostics. + fcx.return_type_has_opaque = ret_ty != declared_ret_ty; + + fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(ret_ty))); + fcx.ret_type_span = Some(decl.output.span()); + + let span = body.value.span; + + fn_maybe_err(tcx, span, fn_sig.abi); + + if fn_sig.abi == Abi::RustCall { + let expected_args = if let ImplicitSelfKind::None = decl.implicit_self { 1 } else { 2 }; + + let err = || { + let item = match tcx.hir().get(fn_id) { + Node::Item(hir::Item { kind: ItemKind::Fn(header, ..), .. }) => Some(header), + Node::ImplItem(hir::ImplItem { + kind: hir::ImplItemKind::Fn(header, ..), .. + }) => Some(header), + Node::TraitItem(hir::TraitItem { + kind: hir::TraitItemKind::Fn(header, ..), + .. + }) => Some(header), + // Closures are RustCall, but they tuple their arguments, so shouldn't be checked + Node::Expr(hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => None, + node => bug!("Item being checked wasn't a function/closure: {:?}", node), + }; + + if let Some(header) = item { + tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple"); + } + }; + + if fn_sig.inputs().len() != expected_args { + err() + } else { + // FIXME(CraftSpider) Add a check on parameter expansion, so we don't just make the ICE happen later on + // This will probably require wide-scale changes to support a TupleKind obligation + // We can't resolve this without knowing the type of the param + if !matches!(fn_sig.inputs()[expected_args - 1].kind(), ty::Tuple(_) | ty::Param(_)) { + err() + } + } + } + + if body.generator_kind.is_some() && can_be_generator.is_some() { + let yield_ty = fcx + .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span }); + fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType); + + // Resume type defaults to `()` if the generator has no argument. + let resume_ty = fn_sig.inputs().get(0).copied().unwrap_or_else(|| tcx.mk_unit()); + + fcx.resume_yield_tys = Some((resume_ty, yield_ty)); + } + + GatherLocalsVisitor::new(&fcx).visit_body(body); + + // C-variadic fns also have a `VaList` input that's not listed in `fn_sig` + // (as it's created inside the body itself, not passed in from outside). + let maybe_va_list = if fn_sig.c_variadic { + let span = body.params.last().unwrap().span; + let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(span)); + let region = fcx.next_region_var(RegionVariableOrigin::MiscVariable(span)); + + Some(tcx.bound_type_of(va_list_did).subst(tcx, &[region.into()])) + } else { + None + }; + + // Add formal parameters. + let inputs_hir = hir.fn_decl_by_hir_id(fn_id).map(|decl| &decl.inputs); + let inputs_fn = fn_sig.inputs().iter().copied(); + for (idx, (param_ty, param)) in inputs_fn.chain(maybe_va_list).zip(body.params).enumerate() { + // Check the pattern. + let ty_span = try { inputs_hir?.get(idx)?.span }; + fcx.check_pat_top(¶m.pat, param_ty, ty_span, false); + + // Check that argument is Sized. + // The check for a non-trivial pattern is a hack to avoid duplicate warnings + // for simple cases like `fn foo(x: Trait)`, + // where we would error once on the parameter as a whole, and once on the binding `x`. + if param.pat.simple_ident().is_none() && !tcx.features().unsized_fn_params { + fcx.require_type_is_sized(param_ty, param.pat.span, traits::SizedArgumentType(ty_span)); + } + + fcx.write_ty(param.hir_id, param_ty); + } + + inherited.typeck_results.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig); + + fcx.in_tail_expr = true; + if let ty::Dynamic(..) = declared_ret_ty.kind() { + // FIXME: We need to verify that the return type is `Sized` after the return expression has + // been evaluated so that we have types available for all the nodes being returned, but that + // requires the coerced evaluated type to be stored. Moving `check_return_expr` before this + // causes unsized errors caused by the `declared_ret_ty` to point at the return expression, + // while keeping the current ordering we will ignore the tail expression's type because we + // don't know it yet. We can't do `check_expr_kind` while keeping `check_return_expr` + // because we will trigger "unreachable expression" lints unconditionally. + // Because of all of this, we perform a crude check to know whether the simplest `!Sized` + // case that a newcomer might make, returning a bare trait, and in that case we populate + // the tail expression's type so that the suggestion will be correct, but ignore all other + // possible cases. + fcx.check_expr(&body.value); + fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType); + } else { + fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType); + fcx.check_return_expr(&body.value, false); + } + fcx.in_tail_expr = false; + + // We insert the deferred_generator_interiors entry after visiting the body. + // This ensures that all nested generators appear before the entry of this generator. + // resolve_generator_interiors relies on this property. + let gen_ty = if let (Some(_), Some(gen_kind)) = (can_be_generator, body.generator_kind) { + let interior = fcx + .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span }); + fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior, gen_kind)); + + let (resume_ty, yield_ty) = fcx.resume_yield_tys.unwrap(); + Some(GeneratorTypes { + resume_ty, + yield_ty, + interior, + movability: can_be_generator.unwrap(), + }) + } else { + None + }; + + // Finalize the return check by taking the LUB of the return types + // we saw and assigning it to the expected return type. This isn't + // really expected to fail, since the coercions would have failed + // earlier when trying to find a LUB. + let coercion = fcx.ret_coercion.take().unwrap().into_inner(); + let mut actual_return_ty = coercion.complete(&fcx); + debug!("actual_return_ty = {:?}", actual_return_ty); + if let ty::Dynamic(..) = declared_ret_ty.kind() { + // We have special-cased the case where the function is declared + // `-> dyn Foo` and we don't actually relate it to the + // `fcx.ret_coercion`, so just substitute a type variable. + actual_return_ty = + fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::DynReturnFn, span }); + debug!("actual_return_ty replaced with {:?}", actual_return_ty); + } + + // HACK(oli-obk, compiler-errors): We should be comparing this against + // `declared_ret_ty`, but then anything uninferred would be inferred to + // the opaque type itself. That again would cause writeback to assume + // we have a recursive call site and do the sadly stabilized fallback to `()`. + fcx.demand_suptype(span, ret_ty, actual_return_ty); + + // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !` + if let Some(panic_impl_did) = tcx.lang_items().panic_impl() + && panic_impl_did == hir.local_def_id(fn_id).to_def_id() + { + check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig, decl, declared_ret_ty); + } + + // Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !` + if let Some(alloc_error_handler_did) = tcx.lang_items().oom() + && alloc_error_handler_did == hir.local_def_id(fn_id).to_def_id() + { + check_alloc_error_fn(tcx, alloc_error_handler_did.expect_local(), fn_sig, decl, declared_ret_ty); + } + + (fcx, gen_ty) +} + +fn check_panic_info_fn( + tcx: TyCtxt<'_>, + fn_id: LocalDefId, + fn_sig: ty::FnSig<'_>, + decl: &hir::FnDecl<'_>, + declared_ret_ty: Ty<'_>, +) { + let Some(panic_info_did) = tcx.lang_items().panic_info() else { + tcx.sess.err("language item required, but not found: `panic_info`"); + return; + }; + + if *declared_ret_ty.kind() != ty::Never { + tcx.sess.span_err(decl.output.span(), "return type should be `!`"); + } + + let inputs = fn_sig.inputs(); + if inputs.len() != 1 { + tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument"); + return; + } + + let arg_is_panic_info = match *inputs[0].kind() { + ty::Ref(region, ty, mutbl) => match *ty.kind() { + ty::Adt(ref adt, _) => { + adt.did() == panic_info_did && mutbl == hir::Mutability::Not && !region.is_static() + } + _ => false, + }, + _ => false, + }; + + if !arg_is_panic_info { + tcx.sess.span_err(decl.inputs[0].span, "argument should be `&PanicInfo`"); + } + + let DefKind::Fn = tcx.def_kind(fn_id) else { + let span = tcx.def_span(fn_id); + tcx.sess.span_err(span, "should be a function"); + return; + }; + + let generic_counts = tcx.generics_of(fn_id).own_counts(); + if generic_counts.types != 0 { + let span = tcx.def_span(fn_id); + tcx.sess.span_err(span, "should have no type parameters"); + } + if generic_counts.consts != 0 { + let span = tcx.def_span(fn_id); + tcx.sess.span_err(span, "should have no const parameters"); + } +} + +fn check_alloc_error_fn( + tcx: TyCtxt<'_>, + fn_id: LocalDefId, + fn_sig: ty::FnSig<'_>, + decl: &hir::FnDecl<'_>, + declared_ret_ty: Ty<'_>, +) { + let Some(alloc_layout_did) = tcx.lang_items().alloc_layout() else { + tcx.sess.err("language item required, but not found: `alloc_layout`"); + return; + }; + + if *declared_ret_ty.kind() != ty::Never { + tcx.sess.span_err(decl.output.span(), "return type should be `!`"); + } + + let inputs = fn_sig.inputs(); + if inputs.len() != 1 { + tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument"); + return; + } + + let arg_is_alloc_layout = match inputs[0].kind() { + ty::Adt(ref adt, _) => adt.did() == alloc_layout_did, + _ => false, + }; + + if !arg_is_alloc_layout { + tcx.sess.span_err(decl.inputs[0].span, "argument should be `Layout`"); + } + + let DefKind::Fn = tcx.def_kind(fn_id) else { + let span = tcx.def_span(fn_id); + tcx.sess.span_err(span, "`#[alloc_error_handler]` should be a function"); + return; + }; + + let generic_counts = tcx.generics_of(fn_id).own_counts(); + if generic_counts.types != 0 { + let span = tcx.def_span(fn_id); + tcx.sess.span_err(span, "`#[alloc_error_handler]` function should have no type parameters"); + } + if generic_counts.consts != 0 { + let span = tcx.def_span(fn_id); + tcx.sess + .span_err(span, "`#[alloc_error_handler]` function should have no const parameters"); + } +} + +fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) { + let def = tcx.adt_def(def_id); + let span = tcx.def_span(def_id); + def.destructor(tcx); // force the destructor to be evaluated + check_representable(tcx, span, def_id); + + if def.repr().simd() { + check_simd(tcx, span, def_id); + } + + check_transparent(tcx, span, def); + check_packed(tcx, span, def); +} + +fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) { + let def = tcx.adt_def(def_id); + let span = tcx.def_span(def_id); + def.destructor(tcx); // force the destructor to be evaluated + check_representable(tcx, span, def_id); + check_transparent(tcx, span, def); + check_union_fields(tcx, span, def_id); + check_packed(tcx, span, def); +} + +/// Check that the fields of the `union` do not need dropping. +fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool { + let item_type = tcx.type_of(item_def_id); + if let ty::Adt(def, substs) = item_type.kind() { + assert!(def.is_union()); + + fn allowed_union_field<'tcx>( + ty: Ty<'tcx>, + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, + span: Span, + ) -> bool { + // We don't just accept all !needs_drop fields, due to semver concerns. + match ty.kind() { + ty::Ref(..) => true, // references never drop (even mutable refs, which are non-Copy and hence fail the later check) + ty::Tuple(tys) => { + // allow tuples of allowed types + tys.iter().all(|ty| allowed_union_field(ty, tcx, param_env, span)) + } + ty::Array(elem, _len) => { + // Like `Copy`, we do *not* special-case length 0. + allowed_union_field(*elem, tcx, param_env, span) + } + _ => { + // Fallback case: allow `ManuallyDrop` and things that are `Copy`. + ty.ty_adt_def().is_some_and(|adt_def| adt_def.is_manually_drop()) + || ty.is_copy_modulo_regions(tcx.at(span), param_env) + } + } + } + + let param_env = tcx.param_env(item_def_id); + for field in &def.non_enum_variant().fields { + let field_ty = field.ty(tcx, substs); + + if !allowed_union_field(field_ty, tcx, param_env, span) { + let (field_span, ty_span) = match tcx.hir().get_if_local(field.did) { + // We are currently checking the type this field came from, so it must be local. + Some(Node::Field(field)) => (field.span, field.ty.span), + _ => unreachable!("mir field has to correspond to hir field"), + }; + struct_span_err!( + tcx.sess, + field_span, + E0740, + "unions cannot contain fields that may need dropping" + ) + .note( + "a type is guaranteed not to need dropping \ + when it implements `Copy`, or when it is the special `ManuallyDrop<_>` type", + ) + .multipart_suggestion_verbose( + "when the type does not implement `Copy`, \ + wrap it inside a `ManuallyDrop<_>` and ensure it is manually dropped", + vec![ + (ty_span.shrink_to_lo(), "std::mem::ManuallyDrop<".into()), + (ty_span.shrink_to_hi(), ">".into()), + ], + Applicability::MaybeIncorrect, + ) + .emit(); + return false; + } else if field_ty.needs_drop(tcx, param_env) { + // This should never happen. But we can get here e.g. in case of name resolution errors. + tcx.sess.delay_span_bug(span, "we should never accept maybe-dropping union fields"); + } + } + } else { + span_bug!(span, "unions must be ty::Adt, but got {:?}", item_type.kind()); + } + true +} + +/// Check that a `static` is inhabited. +fn check_static_inhabited<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) { + // Make sure statics are inhabited. + // Other parts of the compiler assume that there are no uninhabited places. In principle it + // would be enough to check this for `extern` statics, as statics with an initializer will + // have UB during initialization if they are uninhabited, but there also seems to be no good + // reason to allow any statics to be uninhabited. + let ty = tcx.type_of(def_id); + let span = tcx.def_span(def_id); + let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) { + Ok(l) => l, + // Foreign statics that overflow their allowed size should emit an error + Err(LayoutError::SizeOverflow(_)) + if { + let node = tcx.hir().get_by_def_id(def_id); + matches!( + node, + hir::Node::ForeignItem(hir::ForeignItem { + kind: hir::ForeignItemKind::Static(..), + .. + }) + ) + } => + { + tcx.sess + .struct_span_err(span, "extern static is too large for the current architecture") + .emit(); + return; + } + // Generic statics are rejected, but we still reach this case. + Err(e) => { + tcx.sess.delay_span_bug(span, &e.to_string()); + return; + } + }; + if layout.abi.is_uninhabited() { + tcx.struct_span_lint_hir( + UNINHABITED_STATIC, + tcx.hir().local_def_id_to_hir_id(def_id), + span, + |lint| { + lint.build("static of uninhabited type") + .note("uninhabited statics cannot be initialized, and any access would be an immediate error") + .emit(); + }, + ); + } +} + +/// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo` +/// projections that would result in "inheriting lifetimes". +pub(super) fn check_opaque<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, + substs: SubstsRef<'tcx>, + origin: &hir::OpaqueTyOrigin, +) { + let span = tcx.def_span(def_id); + check_opaque_for_inheriting_lifetimes(tcx, def_id, span); + if tcx.type_of(def_id).references_error() { + return; + } + if check_opaque_for_cycles(tcx, def_id, substs, span, origin).is_err() { + return; + } + check_opaque_meets_bounds(tcx, def_id, substs, span, origin); +} + +/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result +/// in "inheriting lifetimes". +#[instrument(level = "debug", skip(tcx, span))] +pub(super) fn check_opaque_for_inheriting_lifetimes<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, + span: Span, +) { + let item = tcx.hir().expect_item(def_id); + debug!(?item, ?span); + + struct FoundParentLifetime; + struct FindParentLifetimeVisitor<'tcx>(&'tcx ty::Generics); + impl<'tcx> ty::visit::TypeVisitor<'tcx> for FindParentLifetimeVisitor<'tcx> { + type BreakTy = FoundParentLifetime; + + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { + debug!("FindParentLifetimeVisitor: r={:?}", r); + if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *r { + if index < self.0.parent_count as u32 { + return ControlFlow::Break(FoundParentLifetime); + } else { + return ControlFlow::CONTINUE; + } + } + + r.super_visit_with(self) + } + + fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow { + if let ty::ConstKind::Unevaluated(..) = c.kind() { + // FIXME(#72219) We currently don't detect lifetimes within substs + // which would violate this check. Even though the particular substitution is not used + // within the const, this should still be fixed. + return ControlFlow::CONTINUE; + } + c.super_visit_with(self) + } + } + + struct ProhibitOpaqueVisitor<'tcx> { + tcx: TyCtxt<'tcx>, + opaque_identity_ty: Ty<'tcx>, + generics: &'tcx ty::Generics, + selftys: Vec<(Span, Option)>, + } + + impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> { + type BreakTy = Ty<'tcx>; + + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t); + if t == self.opaque_identity_ty { + ControlFlow::CONTINUE + } else { + t.super_visit_with(&mut FindParentLifetimeVisitor(self.generics)) + .map_break(|FoundParentLifetime| t) + } + } + } + + impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> { + type NestedFilter = nested_filter::OnlyBodies; + + fn nested_visit_map(&mut self) -> Self::Map { + self.tcx.hir() + } + + fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) { + match arg.kind { + hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments { + [ + PathSegment { + res: Some(Res::SelfTy { trait_: _, alias_to: impl_ref }), + .. + }, + ] => { + let impl_ty_name = + impl_ref.map(|(def_id, _)| self.tcx.def_path_str(def_id)); + self.selftys.push((path.span, impl_ty_name)); + } + _ => {} + }, + _ => {} + } + hir::intravisit::walk_ty(self, arg); + } + } + + if let ItemKind::OpaqueTy(hir::OpaqueTy { + origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..), + .. + }) = item.kind + { + let mut visitor = ProhibitOpaqueVisitor { + opaque_identity_ty: tcx.mk_opaque( + def_id.to_def_id(), + InternalSubsts::identity_for_item(tcx, def_id.to_def_id()), + ), + generics: tcx.generics_of(def_id), + tcx, + selftys: vec![], + }; + let prohibit_opaque = tcx + .explicit_item_bounds(def_id) + .iter() + .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor)); + debug!( + "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor.opaque_identity_ty={:?}, visitor.generics={:?}", + prohibit_opaque, visitor.opaque_identity_ty, visitor.generics + ); + + if let Some(ty) = prohibit_opaque.break_value() { + visitor.visit_item(&item); + let is_async = match item.kind { + ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => { + matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..)) + } + _ => unreachable!(), + }; + + let mut err = struct_span_err!( + tcx.sess, + span, + E0760, + "`{}` return type cannot contain a projection or `Self` that references lifetimes from \ + a parent scope", + if is_async { "async fn" } else { "impl Trait" }, + ); + + for (span, name) in visitor.selftys { + err.span_suggestion( + span, + "consider spelling out the type instead", + name.unwrap_or_else(|| format!("{:?}", ty)), + Applicability::MaybeIncorrect, + ); + } + err.emit(); + } + } +} + +/// Checks that an opaque type does not contain cycles. +pub(super) fn check_opaque_for_cycles<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, + substs: SubstsRef<'tcx>, + span: Span, + origin: &hir::OpaqueTyOrigin, +) -> Result<(), ErrorGuaranteed> { + if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() { + let reported = match origin { + hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span), + _ => opaque_type_cycle_error(tcx, def_id, span), + }; + Err(reported) + } else { + Ok(()) + } +} + +/// Check that the concrete type behind `impl Trait` actually implements `Trait`. +/// +/// This is mostly checked at the places that specify the opaque type, but we +/// check those cases in the `param_env` of that function, which may have +/// bounds not on this opaque type: +/// +/// type X = impl Clone +/// fn f(t: T) -> X { +/// t +/// } +/// +/// Without this check the above code is incorrectly accepted: we would ICE if +/// some tried, for example, to clone an `Option>`. +#[instrument(level = "debug", skip(tcx))] +fn check_opaque_meets_bounds<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, + substs: SubstsRef<'tcx>, + span: Span, + origin: &hir::OpaqueTyOrigin, +) { + let hidden_type = tcx.bound_type_of(def_id.to_def_id()).subst(tcx, substs); + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let defining_use_anchor = match *origin { + hir::OpaqueTyOrigin::FnReturn(did) | hir::OpaqueTyOrigin::AsyncFn(did) => did, + hir::OpaqueTyOrigin::TyAlias => def_id, + }; + let param_env = tcx.param_env(defining_use_anchor); + + tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(defining_use_anchor)).enter( + move |infcx| { + let ocx = ObligationCtxt::new(&infcx); + let opaque_ty = tcx.mk_opaque(def_id.to_def_id(), substs); + + let misc_cause = traits::ObligationCause::misc(span, hir_id); + + match infcx.at(&misc_cause, param_env).eq(opaque_ty, hidden_type) { + Ok(infer_ok) => ocx.register_infer_ok_obligations(infer_ok), + Err(ty_err) => { + tcx.sess.delay_span_bug( + span, + &format!("could not unify `{hidden_type}` with revealed type:\n{ty_err}"), + ); + } + } + + // Additionally require the hidden type to be well-formed with only the generics of the opaque type. + // Defining use functions may have more bounds than the opaque type, which is ok, as long as the + // hidden type is well formed even without those bounds. + let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(hidden_type.into())) + .to_predicate(tcx); + ocx.register_obligation(Obligation::new(misc_cause, param_env, predicate)); + + // Check that all obligations are satisfied by the implementation's + // version. + let errors = ocx.select_all_or_error(); + if !errors.is_empty() { + infcx.report_fulfillment_errors(&errors, None, false); + } + match origin { + // Checked when type checking the function containing them. + hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {} + // Can have different predicates to their defining use + hir::OpaqueTyOrigin::TyAlias => { + let outlives_environment = OutlivesEnvironment::new(param_env); + infcx.check_region_obligations_and_report_errors( + defining_use_anchor, + &outlives_environment, + ); + } + } + // Clean up after ourselves + let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types(); + }, + ); +} + +fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) { + debug!( + "check_item_type(it.def_id={:?}, it.name={})", + id.def_id, + tcx.def_path_str(id.def_id.to_def_id()) + ); + let _indenter = indenter(); + match tcx.def_kind(id.def_id) { + DefKind::Static(..) => { + tcx.ensure().typeck(id.def_id); + maybe_check_static_with_link_section(tcx, id.def_id); + check_static_inhabited(tcx, id.def_id); + } + DefKind::Const => { + tcx.ensure().typeck(id.def_id); + } + DefKind::Enum => { + let item = tcx.hir().item(id); + let hir::ItemKind::Enum(ref enum_definition, _) = item.kind else { + return; + }; + check_enum(tcx, &enum_definition.variants, item.def_id); + } + DefKind::Fn => {} // entirely within check_item_body + DefKind::Impl => { + let it = tcx.hir().item(id); + let hir::ItemKind::Impl(ref impl_) = it.kind else { + return; + }; + debug!("ItemKind::Impl {} with id {:?}", it.ident, it.def_id); + if let Some(impl_trait_ref) = tcx.impl_trait_ref(it.def_id) { + check_impl_items_against_trait( + tcx, + it.span, + it.def_id, + impl_trait_ref, + &impl_.items, + ); + check_on_unimplemented(tcx, it); + } + } + DefKind::Trait => { + let it = tcx.hir().item(id); + let hir::ItemKind::Trait(_, _, _, _, ref items) = it.kind else { + return; + }; + check_on_unimplemented(tcx, it); + + for item in items.iter() { + let item = tcx.hir().trait_item(item.id); + match item.kind { + hir::TraitItemKind::Fn(ref sig, _) => { + let abi = sig.header.abi; + fn_maybe_err(tcx, item.ident.span, abi); + } + hir::TraitItemKind::Type(.., Some(default)) => { + let assoc_item = tcx.associated_item(item.def_id); + let trait_substs = + InternalSubsts::identity_for_item(tcx, it.def_id.to_def_id()); + let _: Result<_, rustc_errors::ErrorGuaranteed> = check_type_bounds( + tcx, + assoc_item, + assoc_item, + default.span, + ty::TraitRef { def_id: it.def_id.to_def_id(), substs: trait_substs }, + ); + } + _ => {} + } + } + } + DefKind::Struct => { + check_struct(tcx, id.def_id); + } + DefKind::Union => { + check_union(tcx, id.def_id); + } + DefKind::OpaqueTy => { + let item = tcx.hir().item(id); + let hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) = item.kind else { + return; + }; + // HACK(jynelson): trying to infer the type of `impl trait` breaks documenting + // `async-std` (and `pub async fn` in general). + // Since rustdoc doesn't care about the concrete type behind `impl Trait`, just don't look at it! + // See https://github.com/rust-lang/rust/issues/75100 + if !tcx.sess.opts.actually_rustdoc { + let substs = InternalSubsts::identity_for_item(tcx, item.def_id.to_def_id()); + check_opaque(tcx, item.def_id, substs, &origin); + } + } + DefKind::TyAlias => { + let pty_ty = tcx.type_of(id.def_id); + let generics = tcx.generics_of(id.def_id); + check_type_params_are_used(tcx, &generics, pty_ty); + } + DefKind::ForeignMod => { + let it = tcx.hir().item(id); + let hir::ItemKind::ForeignMod { abi, items } = it.kind else { + return; + }; + check_abi(tcx, it.hir_id(), it.span, abi); + + if abi == Abi::RustIntrinsic { + for item in items { + let item = tcx.hir().foreign_item(item.id); + intrinsic::check_intrinsic_type(tcx, item); + } + } else if abi == Abi::PlatformIntrinsic { + for item in items { + let item = tcx.hir().foreign_item(item.id); + intrinsic::check_platform_intrinsic_type(tcx, item); + } + } else { + for item in items { + let def_id = item.id.def_id; + let generics = tcx.generics_of(def_id); + let own_counts = generics.own_counts(); + if generics.params.len() - own_counts.lifetimes != 0 { + let (kinds, kinds_pl, egs) = match (own_counts.types, own_counts.consts) { + (_, 0) => ("type", "types", Some("u32")), + // We don't specify an example value, because we can't generate + // a valid value for any type. + (0, _) => ("const", "consts", None), + _ => ("type or const", "types or consts", None), + }; + struct_span_err!( + tcx.sess, + item.span, + E0044, + "foreign items may not have {kinds} parameters", + ) + .span_label(item.span, &format!("can't have {kinds} parameters")) + .help( + // FIXME: once we start storing spans for type arguments, turn this + // into a suggestion. + &format!( + "replace the {} parameters with concrete {}{}", + kinds, + kinds_pl, + egs.map(|egs| format!(" like `{}`", egs)).unwrap_or_default(), + ), + ) + .emit(); + } + + let item = tcx.hir().foreign_item(item.id); + match item.kind { + hir::ForeignItemKind::Fn(ref fn_decl, _, _) => { + require_c_abi_if_c_variadic(tcx, fn_decl, abi, item.span); + } + hir::ForeignItemKind::Static(..) => { + check_static_inhabited(tcx, def_id); + } + _ => {} + } + } + } + } + DefKind::GlobalAsm => { + let it = tcx.hir().item(id); + let hir::ItemKind::GlobalAsm(asm) = it.kind else { span_bug!(it.span, "DefKind::GlobalAsm but got {:#?}", it) }; + InlineAsmCtxt::new_global_asm(tcx).check_asm(asm, id.hir_id()); + } + _ => {} + } +} + +pub(super) fn check_on_unimplemented(tcx: TyCtxt<'_>, item: &hir::Item<'_>) { + // an error would be reported if this fails. + let _ = traits::OnUnimplementedDirective::of_item(tcx, item.def_id.to_def_id()); +} + +pub(super) fn check_specialization_validity<'tcx>( + tcx: TyCtxt<'tcx>, + trait_def: &ty::TraitDef, + trait_item: &ty::AssocItem, + impl_id: DefId, + impl_item: &hir::ImplItemRef, +) { + let Ok(ancestors) = trait_def.ancestors(tcx, impl_id) else { return }; + let mut ancestor_impls = ancestors.skip(1).filter_map(|parent| { + if parent.is_from_trait() { + None + } else { + Some((parent, parent.item(tcx, trait_item.def_id))) + } + }); + + let opt_result = ancestor_impls.find_map(|(parent_impl, parent_item)| { + match parent_item { + // Parent impl exists, and contains the parent item we're trying to specialize, but + // doesn't mark it `default`. + Some(parent_item) if traits::impl_item_is_final(tcx, &parent_item) => { + Some(Err(parent_impl.def_id())) + } + + // Parent impl contains item and makes it specializable. + Some(_) => Some(Ok(())), + + // Parent impl doesn't mention the item. This means it's inherited from the + // grandparent. In that case, if parent is a `default impl`, inherited items use the + // "defaultness" from the grandparent, else they are final. + None => { + if tcx.impl_defaultness(parent_impl.def_id()).is_default() { + None + } else { + Some(Err(parent_impl.def_id())) + } + } + } + }); + + // If `opt_result` is `None`, we have only encountered `default impl`s that don't contain the + // item. This is allowed, the item isn't actually getting specialized here. + let result = opt_result.unwrap_or(Ok(())); + + if let Err(parent_impl) = result { + report_forbidden_specialization(tcx, impl_item, parent_impl); + } +} + +fn check_impl_items_against_trait<'tcx>( + tcx: TyCtxt<'tcx>, + full_impl_span: Span, + impl_id: LocalDefId, + impl_trait_ref: ty::TraitRef<'tcx>, + impl_item_refs: &[hir::ImplItemRef], +) { + // If the trait reference itself is erroneous (so the compilation is going + // to fail), skip checking the items here -- the `impl_item` table in `tcx` + // isn't populated for such impls. + if impl_trait_ref.references_error() { + return; + } + + // Negative impls are not expected to have any items + match tcx.impl_polarity(impl_id) { + ty::ImplPolarity::Reservation | ty::ImplPolarity::Positive => {} + ty::ImplPolarity::Negative => { + if let [first_item_ref, ..] = impl_item_refs { + let first_item_span = tcx.hir().impl_item(first_item_ref.id).span; + struct_span_err!( + tcx.sess, + first_item_span, + E0749, + "negative impls cannot have any items" + ) + .emit(); + } + return; + } + } + + let trait_def = tcx.trait_def(impl_trait_ref.def_id); + + for impl_item in impl_item_refs { + let ty_impl_item = tcx.associated_item(impl_item.id.def_id); + let ty_trait_item = if let Some(trait_item_id) = ty_impl_item.trait_item_def_id { + tcx.associated_item(trait_item_id) + } else { + // Checked in `associated_item`. + tcx.sess.delay_span_bug(impl_item.span, "missing associated item in trait"); + continue; + }; + let impl_item_full = tcx.hir().impl_item(impl_item.id); + match impl_item_full.kind { + hir::ImplItemKind::Const(..) => { + // Find associated const definition. + compare_const_impl( + tcx, + &ty_impl_item, + impl_item.span, + &ty_trait_item, + impl_trait_ref, + ); + } + hir::ImplItemKind::Fn(..) => { + let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id); + compare_impl_method( + tcx, + &ty_impl_item, + &ty_trait_item, + impl_trait_ref, + opt_trait_span, + ); + } + hir::ImplItemKind::TyAlias(impl_ty) => { + let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id); + compare_ty_impl( + tcx, + &ty_impl_item, + impl_ty.span, + &ty_trait_item, + impl_trait_ref, + opt_trait_span, + ); + } + } + + check_specialization_validity( + tcx, + trait_def, + &ty_trait_item, + impl_id.to_def_id(), + impl_item, + ); + } + + if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) { + // Check for missing items from trait + let mut missing_items = Vec::new(); + + let mut must_implement_one_of: Option<&[Ident]> = + trait_def.must_implement_one_of.as_deref(); + + for &trait_item_id in tcx.associated_item_def_ids(impl_trait_ref.def_id) { + let is_implemented = ancestors + .leaf_def(tcx, trait_item_id) + .map_or(false, |node_item| node_item.item.defaultness(tcx).has_value()); + + if !is_implemented && tcx.impl_defaultness(impl_id).is_final() { + missing_items.push(tcx.associated_item(trait_item_id)); + } + + if let Some(required_items) = &must_implement_one_of { + // true if this item is specifically implemented in this impl + let is_implemented_here = ancestors + .leaf_def(tcx, trait_item_id) + .map_or(false, |node_item| !node_item.defining_node.is_from_trait()); + + if is_implemented_here { + let trait_item = tcx.associated_item(trait_item_id); + if required_items.contains(&trait_item.ident(tcx)) { + must_implement_one_of = None; + } + } + } + } + + if !missing_items.is_empty() { + missing_items_err(tcx, tcx.def_span(impl_id), &missing_items, full_impl_span); + } + + if let Some(missing_items) = must_implement_one_of { + let attr_span = tcx + .get_attr(impl_trait_ref.def_id, sym::rustc_must_implement_one_of) + .map(|attr| attr.span); + + missing_items_must_implement_one_of_err( + tcx, + tcx.def_span(impl_id), + missing_items, + attr_span, + ); + } + } +} + +/// Checks whether a type can be represented in memory. In particular, it +/// identifies types that contain themselves without indirection through a +/// pointer, which would mean their size is unbounded. +pub(super) fn check_representable(tcx: TyCtxt<'_>, sp: Span, item_def_id: LocalDefId) -> bool { + let rty = tcx.type_of(item_def_id); + + // Check that it is possible to represent this type. This call identifies + // (1) types that contain themselves and (2) types that contain a different + // recursive type. It is only necessary to throw an error on those that + // contain themselves. For case 2, there must be an inner type that will be + // caught by case 1. + match representability::ty_is_representable(tcx, rty, sp, None) { + Representability::SelfRecursive(spans) => { + recursive_type_with_infinite_size_error(tcx, item_def_id.to_def_id(), spans); + return false; + } + Representability::Representable | Representability::ContainsRecursive => (), + } + true +} + +pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) { + let t = tcx.type_of(def_id); + if let ty::Adt(def, substs) = t.kind() + && def.is_struct() + { + let fields = &def.non_enum_variant().fields; + if fields.is_empty() { + struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit(); + return; + } + let e = fields[0].ty(tcx, substs); + if !fields.iter().all(|f| f.ty(tcx, substs) == e) { + struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous") + .span_label(sp, "SIMD elements must have the same type") + .emit(); + return; + } + + let len = if let ty::Array(_ty, c) = e.kind() { + c.try_eval_usize(tcx, tcx.param_env(def.did())) + } else { + Some(fields.len() as u64) + }; + if let Some(len) = len { + if len == 0 { + struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit(); + return; + } else if len > MAX_SIMD_LANES { + struct_span_err!( + tcx.sess, + sp, + E0075, + "SIMD vector cannot have more than {MAX_SIMD_LANES} elements", + ) + .emit(); + return; + } + } + + // Check that we use types valid for use in the lanes of a SIMD "vector register" + // These are scalar types which directly match a "machine" type + // Yes: Integers, floats, "thin" pointers + // No: char, "fat" pointers, compound types + match e.kind() { + ty::Param(_) => (), // pass struct(T, T, T, T) through, let monomorphization catch errors + ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_) => (), // struct(u8, u8, u8, u8) is ok + ty::Array(t, _) if matches!(t.kind(), ty::Param(_)) => (), // pass struct([T; N]) through, let monomorphization catch errors + ty::Array(t, _clen) + if matches!( + t.kind(), + ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_) + ) => + { /* struct([f32; 4]) is ok */ } + _ => { + struct_span_err!( + tcx.sess, + sp, + E0077, + "SIMD vector element type should be a \ + primitive scalar (integer/float/pointer) type" + ) + .emit(); + return; + } + } + } +} + +pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) { + let repr = def.repr(); + if repr.packed() { + for attr in tcx.get_attrs(def.did(), sym::repr) { + for r in attr::parse_repr_attr(&tcx.sess, attr) { + if let attr::ReprPacked(pack) = r + && let Some(repr_pack) = repr.pack + && pack as u64 != repr_pack.bytes() + { + struct_span_err!( + tcx.sess, + sp, + E0634, + "type has conflicting packed representation hints" + ) + .emit(); + } + } + } + if repr.align.is_some() { + struct_span_err!( + tcx.sess, + sp, + E0587, + "type has conflicting packed and align representation hints" + ) + .emit(); + } else { + if let Some(def_spans) = check_packed_inner(tcx, def.did(), &mut vec![]) { + let mut err = struct_span_err!( + tcx.sess, + sp, + E0588, + "packed type cannot transitively contain a `#[repr(align)]` type" + ); + + err.span_note( + tcx.def_span(def_spans[0].0), + &format!( + "`{}` has a `#[repr(align)]` attribute", + tcx.item_name(def_spans[0].0) + ), + ); + + if def_spans.len() > 2 { + let mut first = true; + for (adt_def, span) in def_spans.iter().skip(1).rev() { + let ident = tcx.item_name(*adt_def); + err.span_note( + *span, + &if first { + format!( + "`{}` contains a field of type `{}`", + tcx.type_of(def.did()), + ident + ) + } else { + format!("...which contains a field of type `{ident}`") + }, + ); + first = false; + } + } + + err.emit(); + } + } + } +} + +pub(super) fn check_packed_inner( + tcx: TyCtxt<'_>, + def_id: DefId, + stack: &mut Vec, +) -> Option> { + if let ty::Adt(def, substs) = tcx.type_of(def_id).kind() { + if def.is_struct() || def.is_union() { + if def.repr().align.is_some() { + return Some(vec![(def.did(), DUMMY_SP)]); + } + + stack.push(def_id); + for field in &def.non_enum_variant().fields { + if let ty::Adt(def, _) = field.ty(tcx, substs).kind() + && !stack.contains(&def.did()) + && let Some(mut defs) = check_packed_inner(tcx, def.did(), stack) + { + defs.push((def.did(), field.ident(tcx).span)); + return Some(defs); + } + } + stack.pop(); + } + } + + None +} + +pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: ty::AdtDef<'tcx>) { + if !adt.repr().transparent() { + return; + } + + if adt.is_union() && !tcx.features().transparent_unions { + feature_err( + &tcx.sess.parse_sess, + sym::transparent_unions, + sp, + "transparent unions are unstable", + ) + .emit(); + } + + if adt.variants().len() != 1 { + bad_variant_count(tcx, adt, sp, adt.did()); + if adt.variants().is_empty() { + // Don't bother checking the fields. No variants (and thus no fields) exist. + return; + } + } + + // For each field, figure out if it's known to be a ZST and align(1), with "known" + // respecting #[non_exhaustive] attributes. + let field_infos = adt.all_fields().map(|field| { + let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did)); + let param_env = tcx.param_env(field.did); + let layout = tcx.layout_of(param_env.and(ty)); + // We are currently checking the type this field came from, so it must be local + let span = tcx.hir().span_if_local(field.did).unwrap(); + let zst = layout.map_or(false, |layout| layout.is_zst()); + let align1 = layout.map_or(false, |layout| layout.align.abi.bytes() == 1); + if !zst { + return (span, zst, align1, None); + } + + fn check_non_exhaustive<'tcx>( + tcx: TyCtxt<'tcx>, + t: Ty<'tcx>, + ) -> ControlFlow<(&'static str, DefId, SubstsRef<'tcx>, bool)> { + match t.kind() { + ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)), + ty::Array(ty, _) => check_non_exhaustive(tcx, *ty), + ty::Adt(def, subst) => { + if !def.did().is_local() { + let non_exhaustive = def.is_variant_list_non_exhaustive() + || def + .variants() + .iter() + .any(ty::VariantDef::is_field_list_non_exhaustive); + let has_priv = def.all_fields().any(|f| !f.vis.is_public()); + if non_exhaustive || has_priv { + return ControlFlow::Break(( + def.descr(), + def.did(), + subst, + non_exhaustive, + )); + } + } + def.all_fields() + .map(|field| field.ty(tcx, subst)) + .try_for_each(|t| check_non_exhaustive(tcx, t)) + } + _ => ControlFlow::Continue(()), + } + } + + (span, zst, align1, check_non_exhaustive(tcx, ty).break_value()) + }); + + let non_zst_fields = field_infos + .clone() + .filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None }); + let non_zst_count = non_zst_fields.clone().count(); + if non_zst_count >= 2 { + bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, sp); + } + let incompatible_zst_fields = + field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count(); + let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2; + for (span, zst, align1, non_exhaustive) in field_infos { + if zst && !align1 { + struct_span_err!( + tcx.sess, + span, + E0691, + "zero-sized field in transparent {} has alignment larger than 1", + adt.descr(), + ) + .span_label(span, "has alignment larger than 1") + .emit(); + } + if incompat && let Some((descr, def_id, substs, non_exhaustive)) = non_exhaustive { + tcx.struct_span_lint_hir( + REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS, + tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()), + span, + |lint| { + let note = if non_exhaustive { + "is marked with `#[non_exhaustive]`" + } else { + "contains private fields" + }; + let field_ty = tcx.def_path_str_with_substs(def_id, substs); + lint.build("zero-sized fields in repr(transparent) cannot contain external non-exhaustive types") + .note(format!("this {descr} contains `{field_ty}`, which {note}, \ + and makes it not a breaking change to become non-zero-sized in the future.")) + .emit(); + }, + ) + } + } +} + +#[allow(trivial_numeric_casts)] +fn check_enum<'tcx>(tcx: TyCtxt<'tcx>, vs: &'tcx [hir::Variant<'tcx>], def_id: LocalDefId) { + let def = tcx.adt_def(def_id); + let sp = tcx.def_span(def_id); + def.destructor(tcx); // force the destructor to be evaluated + + if vs.is_empty() { + if let Some(attr) = tcx.get_attr(def_id.to_def_id(), sym::repr) { + struct_span_err!( + tcx.sess, + attr.span, + E0084, + "unsupported representation for zero-variant enum" + ) + .span_label(sp, "zero-variant enum") + .emit(); + } + } + + let repr_type_ty = def.repr().discr_type().to_ty(tcx); + if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 { + if !tcx.features().repr128 { + feature_err( + &tcx.sess.parse_sess, + sym::repr128, + sp, + "repr with 128-bit type is unstable", + ) + .emit(); + } + } + + for v in vs { + if let Some(ref e) = v.disr_expr { + tcx.ensure().typeck(tcx.hir().local_def_id(e.hir_id)); + } + } + + if tcx.adt_def(def_id).repr().int.is_none() && tcx.features().arbitrary_enum_discriminant { + let is_unit = |var: &hir::Variant<'_>| matches!(var.data, hir::VariantData::Unit(..)); + + let has_disr = |var: &hir::Variant<'_>| var.disr_expr.is_some(); + let has_non_units = vs.iter().any(|var| !is_unit(var)); + let disr_units = vs.iter().any(|var| is_unit(&var) && has_disr(&var)); + let disr_non_unit = vs.iter().any(|var| !is_unit(&var) && has_disr(&var)); + + if disr_non_unit || (disr_units && has_non_units) { + let mut err = + struct_span_err!(tcx.sess, sp, E0732, "`#[repr(inttype)]` must be specified"); + err.emit(); + } + } + + let mut disr_vals: Vec> = Vec::with_capacity(vs.len()); + // This tracks the previous variant span (in the loop) incase we need it for diagnostics + let mut prev_variant_span: Span = DUMMY_SP; + for ((_, discr), v) in iter::zip(def.discriminants(tcx), vs) { + // Check for duplicate discriminant values + if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) { + let variant_did = def.variant(VariantIdx::new(i)).def_id; + let variant_i_hir_id = tcx.hir().local_def_id_to_hir_id(variant_did.expect_local()); + let variant_i = tcx.hir().expect_variant(variant_i_hir_id); + let i_span = match variant_i.disr_expr { + Some(ref expr) => tcx.hir().span(expr.hir_id), + None => tcx.def_span(variant_did), + }; + let span = match v.disr_expr { + Some(ref expr) => tcx.hir().span(expr.hir_id), + None => v.span, + }; + let display_discr = format_discriminant_overflow(tcx, v, discr); + let display_discr_i = format_discriminant_overflow(tcx, variant_i, disr_vals[i]); + let no_disr = v.disr_expr.is_none(); + let mut err = struct_span_err!( + tcx.sess, + sp, + E0081, + "discriminant value `{}` assigned more than once", + discr, + ); + + err.span_label(i_span, format!("first assignment of {display_discr_i}")); + err.span_label(span, format!("second assignment of {display_discr}")); + + if no_disr { + err.span_label( + prev_variant_span, + format!( + "assigned discriminant for `{}` was incremented from this discriminant", + v.ident + ), + ); + } + err.emit(); + } + + disr_vals.push(discr); + prev_variant_span = v.span; + } + + check_representable(tcx, sp, def_id); + check_transparent(tcx, sp, def); +} + +/// In the case that a discriminant is both a duplicate and an overflowing literal, +/// we insert both the assigned discriminant and the literal it overflowed from into the formatted +/// output. Otherwise we format the discriminant normally. +fn format_discriminant_overflow<'tcx>( + tcx: TyCtxt<'tcx>, + variant: &hir::Variant<'_>, + dis: Discr<'tcx>, +) -> String { + if let Some(expr) = &variant.disr_expr { + let body = &tcx.hir().body(expr.body).value; + if let hir::ExprKind::Lit(lit) = &body.kind + && let rustc_ast::LitKind::Int(lit_value, _int_kind) = &lit.node + && dis.val != *lit_value + { + return format!("`{dis}` (overflowed from `{lit_value}`)"); + } + } + + format!("`{dis}`") +} + +pub(super) fn check_type_params_are_used<'tcx>( + tcx: TyCtxt<'tcx>, + generics: &ty::Generics, + ty: Ty<'tcx>, +) { + debug!("check_type_params_are_used(generics={:?}, ty={:?})", generics, ty); + + assert_eq!(generics.parent, None); + + if generics.own_counts().types == 0 { + return; + } + + let mut params_used = BitSet::new_empty(generics.params.len()); + + if ty.references_error() { + // If there is already another error, do not emit + // an error for not using a type parameter. + assert!(tcx.sess.has_errors().is_some()); + return; + } + + for leaf in ty.walk() { + if let GenericArgKind::Type(leaf_ty) = leaf.unpack() + && let ty::Param(param) = leaf_ty.kind() + { + debug!("found use of ty param {:?}", param); + params_used.insert(param.index); + } + } + + for param in &generics.params { + if !params_used.contains(param.index) + && let ty::GenericParamDefKind::Type { .. } = param.kind + { + let span = tcx.def_span(param.def_id); + struct_span_err!( + tcx.sess, + span, + E0091, + "type parameter `{}` is unused", + param.name, + ) + .span_label(span, "unused type parameter") + .emit(); + } + } +} + +pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) { + let module = tcx.hir_module_items(module_def_id); + for id in module.items() { + check_item_type(tcx, id); + } +} + +fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed { + struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing") + .span_label(span, "recursive `async fn`") + .note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`") + .note( + "consider using the `async_recursion` crate: https://crates.io/crates/async_recursion", + ) + .emit() +} + +/// Emit an error for recursive opaque types. +/// +/// If this is a return `impl Trait`, find the item's return expressions and point at them. For +/// direct recursion this is enough, but for indirect recursion also point at the last intermediary +/// `impl Trait`. +/// +/// If all the return expressions evaluate to `!`, then we explain that the error will go away +/// after changing it. This can happen when a user uses `panic!()` or similar as a placeholder. +fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) -> ErrorGuaranteed { + let mut err = struct_span_err!(tcx.sess, span, E0720, "cannot resolve opaque type"); + + let mut label = false; + if let Some((def_id, visitor)) = get_owner_return_paths(tcx, def_id) { + let typeck_results = tcx.typeck(def_id); + if visitor + .returns + .iter() + .filter_map(|expr| typeck_results.node_type_opt(expr.hir_id)) + .all(|ty| matches!(ty.kind(), ty::Never)) + { + let spans = visitor + .returns + .iter() + .filter(|expr| typeck_results.node_type_opt(expr.hir_id).is_some()) + .map(|expr| expr.span) + .collect::>(); + let span_len = spans.len(); + if span_len == 1 { + err.span_label(spans[0], "this returned value is of `!` type"); + } else { + let mut multispan: MultiSpan = spans.clone().into(); + for span in spans { + multispan.push_span_label(span, "this returned value is of `!` type"); + } + err.span_note(multispan, "these returned values have a concrete \"never\" type"); + } + err.help("this error will resolve once the item's body returns a concrete type"); + } else { + let mut seen = FxHashSet::default(); + seen.insert(span); + err.span_label(span, "recursive opaque type"); + label = true; + for (sp, ty) in visitor + .returns + .iter() + .filter_map(|e| typeck_results.node_type_opt(e.hir_id).map(|t| (e.span, t))) + .filter(|(_, ty)| !matches!(ty.kind(), ty::Never)) + { + struct OpaqueTypeCollector(Vec); + impl<'tcx> ty::visit::TypeVisitor<'tcx> for OpaqueTypeCollector { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + match *t.kind() { + ty::Opaque(def, _) => { + self.0.push(def); + ControlFlow::CONTINUE + } + _ => t.super_visit_with(self), + } + } + } + let mut visitor = OpaqueTypeCollector(vec![]); + ty.visit_with(&mut visitor); + for def_id in visitor.0 { + let ty_span = tcx.def_span(def_id); + if !seen.contains(&ty_span) { + err.span_label(ty_span, &format!("returning this opaque type `{ty}`")); + seen.insert(ty_span); + } + err.span_label(sp, &format!("returning here with type `{ty}`")); + } + } + } + } + if !label { + err.span_label(span, "cannot resolve opaque type"); + } + err.emit() +} diff --git a/compiler/rustc_typeck/src/check/closure.rs b/compiler/rustc_typeck/src/check/closure.rs new file mode 100644 index 000000000..fee872155 --- /dev/null +++ b/compiler/rustc_typeck/src/check/closure.rs @@ -0,0 +1,805 @@ +//! Code for type-checking closure expressions. + +use super::{check_fn, Expectation, FnCtxt, GeneratorTypes}; + +use crate::astconv::AstConv; +use crate::rustc_middle::ty::subst::Subst; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_hir::lang_items::LangItem; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::LateBoundRegionConversionTime; +use rustc_infer::infer::{InferOk, InferResult}; +use rustc_middle::ty::subst::InternalSubsts; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{self, Ty}; +use rustc_span::source_map::Span; +use rustc_target::spec::abi::Abi; +use rustc_trait_selection::traits::error_reporting::ArgKind; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _; +use std::cmp; +use std::iter; + +/// What signature do we *expect* the closure to have from context? +#[derive(Debug)] +struct ExpectedSig<'tcx> { + /// Span that gave us this expectation, if we know that. + cause_span: Option, + sig: ty::PolyFnSig<'tcx>, +} + +struct ClosureSignatures<'tcx> { + bound_sig: ty::PolyFnSig<'tcx>, + liberated_sig: ty::FnSig<'tcx>, +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + #[instrument(skip(self, expr, _capture, decl, body_id), level = "debug")] + pub fn check_expr_closure( + &self, + expr: &hir::Expr<'_>, + _capture: hir::CaptureBy, + decl: &'tcx hir::FnDecl<'tcx>, + body_id: hir::BodyId, + gen: Option, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + trace!("decl = {:#?}", decl); + trace!("expr = {:#?}", expr); + + // It's always helpful for inference if we know the kind of + // closure sooner rather than later, so first examine the expected + // type, and see if can glean a closure kind from there. + let (expected_sig, expected_kind) = match expected.to_option(self) { + Some(ty) => self.deduce_expectations_from_expected_type(ty), + None => (None, None), + }; + let body = self.tcx.hir().body(body_id); + self.check_closure(expr, expected_kind, decl, body, gen, expected_sig) + } + + #[instrument(skip(self, expr, body, decl), level = "debug")] + fn check_closure( + &self, + expr: &hir::Expr<'_>, + opt_kind: Option, + decl: &'tcx hir::FnDecl<'tcx>, + body: &'tcx hir::Body<'tcx>, + gen: Option, + expected_sig: Option>, + ) -> Ty<'tcx> { + trace!("decl = {:#?}", decl); + let expr_def_id = self.tcx.hir().local_def_id(expr.hir_id); + debug!(?expr_def_id); + + let ClosureSignatures { bound_sig, liberated_sig } = + self.sig_of_closure(expr.hir_id, expr_def_id.to_def_id(), decl, body, expected_sig); + + debug!(?bound_sig, ?liberated_sig); + + let return_type_pre_known = !liberated_sig.output().is_ty_infer(); + + let generator_types = check_fn( + self, + self.param_env.without_const(), + liberated_sig, + decl, + expr.hir_id, + body, + gen, + return_type_pre_known, + ) + .1; + + let parent_substs = InternalSubsts::identity_for_item( + self.tcx, + self.tcx.typeck_root_def_id(expr_def_id.to_def_id()), + ); + + let tupled_upvars_ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::ClosureSynthetic, + span: self.tcx.hir().span(expr.hir_id), + }); + + if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types + { + let generator_substs = ty::GeneratorSubsts::new( + self.tcx, + ty::GeneratorSubstsParts { + parent_substs, + resume_ty, + yield_ty, + return_ty: liberated_sig.output(), + witness: interior, + tupled_upvars_ty, + }, + ); + + return self.tcx.mk_generator( + expr_def_id.to_def_id(), + generator_substs.substs, + movability, + ); + } + + // Tuple up the arguments and insert the resulting function type into + // the `closures` table. + let sig = bound_sig.map_bound(|sig| { + self.tcx.mk_fn_sig( + iter::once(self.tcx.intern_tup(sig.inputs())), + sig.output(), + sig.c_variadic, + sig.unsafety, + sig.abi, + ) + }); + + debug!(?sig, ?opt_kind); + + let closure_kind_ty = match opt_kind { + Some(kind) => kind.to_ty(self.tcx), + + // Create a type variable (for now) to represent the closure kind. + // It will be unified during the upvar inference phase (`upvar.rs`) + None => self.next_ty_var(TypeVariableOrigin { + // FIXME(eddyb) distinguish closure kind inference variables from the rest. + kind: TypeVariableOriginKind::ClosureSynthetic, + span: expr.span, + }), + }; + + let closure_substs = ty::ClosureSubsts::new( + self.tcx, + ty::ClosureSubstsParts { + parent_substs, + closure_kind_ty, + closure_sig_as_fn_ptr_ty: self.tcx.mk_fn_ptr(sig), + tupled_upvars_ty, + }, + ); + + let closure_type = self.tcx.mk_closure(expr_def_id.to_def_id(), closure_substs.substs); + + debug!(?expr.hir_id, ?closure_type); + + closure_type + } + + /// Given the expected type, figures out what it can about this closure we + /// are about to type check: + #[instrument(skip(self), level = "debug")] + fn deduce_expectations_from_expected_type( + &self, + expected_ty: Ty<'tcx>, + ) -> (Option>, Option) { + match *expected_ty.kind() { + ty::Opaque(def_id, substs) => { + let bounds = self.tcx.bound_explicit_item_bounds(def_id); + let sig = bounds + .transpose_iter() + .map(|e| e.map_bound(|e| *e).transpose_tuple2()) + .find_map(|(pred, span)| match pred.0.kind().skip_binder() { + ty::PredicateKind::Projection(proj_predicate) => self + .deduce_sig_from_projection( + Some(span.0), + pred.0 + .kind() + .rebind(pred.rebind(proj_predicate).subst(self.tcx, substs)), + ), + _ => None, + }); + + let kind = bounds + .transpose_iter() + .map(|e| e.map_bound(|e| *e).transpose_tuple2()) + .filter_map(|(pred, _)| match pred.0.kind().skip_binder() { + ty::PredicateKind::Trait(tp) => { + self.tcx.fn_trait_kind_from_lang_item(tp.def_id()) + } + _ => None, + }) + .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur)))); + trace!(?sig, ?kind); + (sig, kind) + } + ty::Dynamic(ref object_type, ..) => { + let sig = object_type.projection_bounds().find_map(|pb| { + let pb = pb.with_self_ty(self.tcx, self.tcx.types.trait_object_dummy_self); + self.deduce_sig_from_projection(None, pb) + }); + let kind = object_type + .principal_def_id() + .and_then(|did| self.tcx.fn_trait_kind_from_lang_item(did)); + (sig, kind) + } + ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid), + ty::FnPtr(sig) => { + let expected_sig = ExpectedSig { cause_span: None, sig }; + (Some(expected_sig), Some(ty::ClosureKind::Fn)) + } + _ => (None, None), + } + } + + fn deduce_expectations_from_obligations( + &self, + expected_vid: ty::TyVid, + ) -> (Option>, Option) { + let expected_sig = + self.obligations_for_self_ty(expected_vid).find_map(|(_, obligation)| { + debug!(?obligation.predicate); + + let bound_predicate = obligation.predicate.kind(); + if let ty::PredicateKind::Projection(proj_predicate) = + obligation.predicate.kind().skip_binder() + { + // Given a Projection predicate, we can potentially infer + // the complete signature. + self.deduce_sig_from_projection( + Some(obligation.cause.span), + bound_predicate.rebind(proj_predicate), + ) + } else { + None + } + }); + + // Even if we can't infer the full signature, we may be able to + // infer the kind. This can occur when we elaborate a predicate + // like `F : Fn`. Note that due to subtyping we could encounter + // many viable options, so pick the most restrictive. + let expected_kind = self + .obligations_for_self_ty(expected_vid) + .filter_map(|(tr, _)| self.tcx.fn_trait_kind_from_lang_item(tr.def_id())) + .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur)))); + + (expected_sig, expected_kind) + } + + /// Given a projection like "::Result == Y", we can deduce + /// everything we need to know about a closure or generator. + /// + /// The `cause_span` should be the span that caused us to + /// have this expected signature, or `None` if we can't readily + /// know that. + #[instrument(level = "debug", skip(self, cause_span))] + fn deduce_sig_from_projection( + &self, + cause_span: Option, + projection: ty::PolyProjectionPredicate<'tcx>, + ) -> Option> { + let tcx = self.tcx; + + let trait_def_id = projection.trait_def_id(tcx); + + let is_fn = tcx.fn_trait_kind_from_lang_item(trait_def_id).is_some(); + let gen_trait = tcx.require_lang_item(LangItem::Generator, cause_span); + let is_gen = gen_trait == trait_def_id; + if !is_fn && !is_gen { + debug!("not fn or generator"); + return None; + } + + if is_gen { + // Check that we deduce the signature from the `<_ as std::ops::Generator>::Return` + // associated item and not yield. + let return_assoc_item = self.tcx.associated_item_def_ids(gen_trait)[1]; + if return_assoc_item != projection.projection_def_id() { + debug!("not return assoc item of generator"); + return None; + } + } + + let input_tys = if is_fn { + let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1); + let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty); + debug!(?arg_param_ty); + + match arg_param_ty.kind() { + &ty::Tuple(tys) => tys, + _ => return None, + } + } else { + // Generators with a `()` resume type may be defined with 0 or 1 explicit arguments, + // else they must have exactly 1 argument. For now though, just give up in this case. + return None; + }; + + // Since this is a return parameter type it is safe to unwrap. + let ret_param_ty = projection.skip_binder().term.ty().unwrap(); + let ret_param_ty = self.resolve_vars_if_possible(ret_param_ty); + debug!(?ret_param_ty); + + let sig = projection.rebind(self.tcx.mk_fn_sig( + input_tys.iter(), + ret_param_ty, + false, + hir::Unsafety::Normal, + Abi::Rust, + )); + debug!(?sig); + + Some(ExpectedSig { cause_span, sig }) + } + + fn sig_of_closure( + &self, + hir_id: hir::HirId, + expr_def_id: DefId, + decl: &hir::FnDecl<'_>, + body: &hir::Body<'_>, + expected_sig: Option>, + ) -> ClosureSignatures<'tcx> { + if let Some(e) = expected_sig { + self.sig_of_closure_with_expectation(hir_id, expr_def_id, decl, body, e) + } else { + self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body) + } + } + + /// If there is no expected signature, then we will convert the + /// types that the user gave into a signature. + #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")] + fn sig_of_closure_no_expectation( + &self, + hir_id: hir::HirId, + expr_def_id: DefId, + decl: &hir::FnDecl<'_>, + body: &hir::Body<'_>, + ) -> ClosureSignatures<'tcx> { + let bound_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body); + + self.closure_sigs(expr_def_id, body, bound_sig) + } + + /// Invoked to compute the signature of a closure expression. This + /// combines any user-provided type annotations (e.g., `|x: u32| + /// -> u32 { .. }`) with the expected signature. + /// + /// The approach is as follows: + /// + /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations. + /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any. + /// - If we have no expectation `E`, then the signature of the closure is `S`. + /// - Otherwise, the signature of the closure is E. Moreover: + /// - Skolemize the late-bound regions in `E`, yielding `E'`. + /// - Instantiate all the late-bound regions bound in the closure within `S` + /// with fresh (existential) variables, yielding `S'` + /// - Require that `E' = S'` + /// - We could use some kind of subtyping relationship here, + /// I imagine, but equality is easier and works fine for + /// our purposes. + /// + /// The key intuition here is that the user's types must be valid + /// from "the inside" of the closure, but the expectation + /// ultimately drives the overall signature. + /// + /// # Examples + /// + /// ```ignore (illustrative) + /// fn with_closure(_: F) + /// where F: Fn(&u32) -> &u32 { .. } + /// + /// with_closure(|x: &u32| { ... }) + /// ``` + /// + /// Here: + /// - E would be `fn(&u32) -> &u32`. + /// - S would be `fn(&u32) -> + /// - E' is `&'!0 u32 -> &'!0 u32` + /// - S' is `&'?0 u32 -> ?T` + /// + /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`. + /// + /// # Arguments + /// + /// - `expr_def_id`: the `DefId` of the closure expression + /// - `decl`: the HIR declaration of the closure + /// - `body`: the body of the closure + /// - `expected_sig`: the expected signature (if any). Note that + /// this is missing a binder: that is, there may be late-bound + /// regions with depth 1, which are bound then by the closure. + #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")] + fn sig_of_closure_with_expectation( + &self, + hir_id: hir::HirId, + expr_def_id: DefId, + decl: &hir::FnDecl<'_>, + body: &hir::Body<'_>, + expected_sig: ExpectedSig<'tcx>, + ) -> ClosureSignatures<'tcx> { + // Watch out for some surprises and just ignore the + // expectation if things don't see to match up with what we + // expect. + if expected_sig.sig.c_variadic() != decl.c_variadic { + return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body); + } else if expected_sig.sig.skip_binder().inputs_and_output.len() != decl.inputs.len() + 1 { + return self.sig_of_closure_with_mismatched_number_of_arguments( + expr_def_id, + decl, + body, + expected_sig, + ); + } + + // Create a `PolyFnSig`. Note the oddity that late bound + // regions appearing free in `expected_sig` are now bound up + // in this binder we are creating. + assert!(!expected_sig.sig.skip_binder().has_vars_bound_above(ty::INNERMOST)); + let bound_sig = expected_sig.sig.map_bound(|sig| { + self.tcx.mk_fn_sig( + sig.inputs().iter().cloned(), + sig.output(), + sig.c_variadic, + hir::Unsafety::Normal, + Abi::RustCall, + ) + }); + + // `deduce_expectations_from_expected_type` introduces + // late-bound lifetimes defined elsewhere, which we now + // anonymize away, so as not to confuse the user. + let bound_sig = self.tcx.anonymize_late_bound_regions(bound_sig); + + let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig); + + // Up till this point, we have ignored the annotations that the user + // gave. This function will check that they unify successfully. + // Along the way, it also writes out entries for types that the user + // wrote into our typeck results, which are then later used by the privacy + // check. + match self.check_supplied_sig_against_expectation( + hir_id, + expr_def_id, + decl, + body, + &closure_sigs, + ) { + Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok), + Err(_) => return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body), + } + + closure_sigs + } + + fn sig_of_closure_with_mismatched_number_of_arguments( + &self, + expr_def_id: DefId, + decl: &hir::FnDecl<'_>, + body: &hir::Body<'_>, + expected_sig: ExpectedSig<'tcx>, + ) -> ClosureSignatures<'tcx> { + let hir = self.tcx.hir(); + let expr_map_node = hir.get_if_local(expr_def_id).unwrap(); + let expected_args: Vec<_> = expected_sig + .sig + .skip_binder() + .inputs() + .iter() + .map(|ty| ArgKind::from_expected_ty(*ty, None)) + .collect(); + let (closure_span, found_args) = match self.get_fn_like_arguments(expr_map_node) { + Some((sp, args)) => (Some(sp), args), + None => (None, Vec::new()), + }; + let expected_span = + expected_sig.cause_span.unwrap_or_else(|| hir.span_if_local(expr_def_id).unwrap()); + self.report_arg_count_mismatch( + expected_span, + closure_span, + expected_args, + found_args, + true, + ) + .emit(); + + let error_sig = self.error_sig_of_closure(decl); + + self.closure_sigs(expr_def_id, body, error_sig) + } + + /// Enforce the user's types against the expectation. See + /// `sig_of_closure_with_expectation` for details on the overall + /// strategy. + fn check_supplied_sig_against_expectation( + &self, + hir_id: hir::HirId, + expr_def_id: DefId, + decl: &hir::FnDecl<'_>, + body: &hir::Body<'_>, + expected_sigs: &ClosureSignatures<'tcx>, + ) -> InferResult<'tcx, ()> { + // Get the signature S that the user gave. + // + // (See comment on `sig_of_closure_with_expectation` for the + // meaning of these letters.) + let supplied_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body); + + debug!("check_supplied_sig_against_expectation: supplied_sig={:?}", supplied_sig); + + // FIXME(#45727): As discussed in [this comment][c1], naively + // forcing equality here actually results in suboptimal error + // messages in some cases. For now, if there would have been + // an obvious error, we fallback to declaring the type of the + // closure to be the one the user gave, which allows other + // error message code to trigger. + // + // However, I think [there is potential to do even better + // here][c2], since in *this* code we have the precise span of + // the type parameter in question in hand when we report the + // error. + // + // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706 + // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796 + self.commit_if_ok(|_| { + let mut all_obligations = vec![]; + + // The liberated version of this signature should be a subtype + // of the liberated form of the expectation. + for ((hir_ty, &supplied_ty), expected_ty) in iter::zip( + iter::zip( + decl.inputs, + supplied_sig.inputs().skip_binder(), // binder moved to (*) below + ), + expected_sigs.liberated_sig.inputs(), // `liberated_sig` is E'. + ) { + // Instantiate (this part of..) S to S', i.e., with fresh variables. + let supplied_ty = self.replace_bound_vars_with_fresh_vars( + hir_ty.span, + LateBoundRegionConversionTime::FnCall, + supplied_sig.inputs().rebind(supplied_ty), + ); // recreated from (*) above + + // Check that E' = S'. + let cause = self.misc(hir_ty.span); + let InferOk { value: (), obligations } = + self.at(&cause, self.param_env).eq(*expected_ty, supplied_ty)?; + all_obligations.extend(obligations); + } + + let supplied_output_ty = self.replace_bound_vars_with_fresh_vars( + decl.output.span(), + LateBoundRegionConversionTime::FnCall, + supplied_sig.output(), + ); + let cause = &self.misc(decl.output.span()); + let InferOk { value: (), obligations } = self + .at(cause, self.param_env) + .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?; + all_obligations.extend(obligations); + + Ok(InferOk { value: (), obligations: all_obligations }) + }) + } + + /// If there is no expected signature, then we will convert the + /// types that the user gave into a signature. + /// + /// Also, record this closure signature for later. + #[instrument(skip(self, decl, body), level = "debug")] + fn supplied_sig_of_closure( + &self, + hir_id: hir::HirId, + expr_def_id: DefId, + decl: &hir::FnDecl<'_>, + body: &hir::Body<'_>, + ) -> ty::PolyFnSig<'tcx> { + let astconv: &dyn AstConv<'_> = self; + + trace!("decl = {:#?}", decl); + debug!(?body.generator_kind); + + let bound_vars = self.tcx.late_bound_vars(hir_id); + + // First, convert the types that the user supplied (if any). + let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a)); + let supplied_return = match decl.output { + hir::FnRetTy::Return(ref output) => astconv.ast_ty_to_ty(&output), + hir::FnRetTy::DefaultReturn(_) => match body.generator_kind { + // In the case of the async block that we create for a function body, + // we expect the return type of the block to match that of the enclosing + // function. + Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn)) => { + debug!("closure is async fn body"); + self.deduce_future_output_from_obligations(expr_def_id, body.id().hir_id) + .unwrap_or_else(|| { + // AFAIK, deducing the future output + // always succeeds *except* in error cases + // like #65159. I'd like to return Error + // here, but I can't because I can't + // easily (and locally) prove that we + // *have* reported an + // error. --nikomatsakis + astconv.ty_infer(None, decl.output.span()) + }) + } + + _ => astconv.ty_infer(None, decl.output.span()), + }, + }; + + let result = ty::Binder::bind_with_vars( + self.tcx.mk_fn_sig( + supplied_arguments, + supplied_return, + decl.c_variadic, + hir::Unsafety::Normal, + Abi::RustCall, + ), + bound_vars, + ); + + debug!(?result); + + let c_result = self.inh.infcx.canonicalize_response(result); + self.typeck_results.borrow_mut().user_provided_sigs.insert(expr_def_id, c_result); + + result + } + + /// Invoked when we are translating the generator that results + /// from desugaring an `async fn`. Returns the "sugared" return + /// type of the `async fn` -- that is, the return type that the + /// user specified. The "desugared" return type is an `impl + /// Future`, so we do this by searching through the + /// obligations to extract the `T`. + #[instrument(skip(self), level = "debug")] + fn deduce_future_output_from_obligations( + &self, + expr_def_id: DefId, + body_id: hir::HirId, + ) -> Option> { + let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| { + span_bug!(self.tcx.def_span(expr_def_id), "async fn generator outside of a fn") + }); + + let ret_ty = ret_coercion.borrow().expected_ty(); + let ret_ty = self.inh.infcx.shallow_resolve(ret_ty); + + let get_future_output = |predicate: ty::Predicate<'tcx>, span| { + // Search for a pending obligation like + // + // `::Output = T` + // + // where R is the return type we are expecting. This type `T` + // will be our output. + let bound_predicate = predicate.kind(); + if let ty::PredicateKind::Projection(proj_predicate) = bound_predicate.skip_binder() { + self.deduce_future_output_from_projection( + span, + bound_predicate.rebind(proj_predicate), + ) + } else { + None + } + }; + + let output_ty = match *ret_ty.kind() { + ty::Infer(ty::TyVar(ret_vid)) => { + self.obligations_for_self_ty(ret_vid).find_map(|(_, obligation)| { + get_future_output(obligation.predicate, obligation.cause.span) + })? + } + ty::Opaque(def_id, substs) => self + .tcx + .bound_explicit_item_bounds(def_id) + .transpose_iter() + .map(|e| e.map_bound(|e| *e).transpose_tuple2()) + .find_map(|(p, s)| get_future_output(p.subst(self.tcx, substs), s.0))?, + ty::Error(_) => return None, + _ => span_bug!( + self.tcx.def_span(expr_def_id), + "async fn generator return type not an inference variable" + ), + }; + + // async fn that have opaque types in their return type need to redo the conversion to inference variables + // as they fetch the still opaque version from the signature. + let InferOk { value: output_ty, obligations } = self + .replace_opaque_types_with_inference_vars( + output_ty, + body_id, + self.tcx.def_span(expr_def_id), + self.param_env, + ); + self.register_predicates(obligations); + + debug!("deduce_future_output_from_obligations: output_ty={:?}", output_ty); + Some(output_ty) + } + + /// Given a projection like + /// + /// `::Output = T` + /// + /// where `X` is some type that has no late-bound regions, returns + /// `Some(T)`. If the projection is for some other trait, returns + /// `None`. + fn deduce_future_output_from_projection( + &self, + cause_span: Span, + predicate: ty::PolyProjectionPredicate<'tcx>, + ) -> Option> { + debug!("deduce_future_output_from_projection(predicate={:?})", predicate); + + // We do not expect any bound regions in our predicate, so + // skip past the bound vars. + let Some(predicate) = predicate.no_bound_vars() else { + debug!("deduce_future_output_from_projection: has late-bound regions"); + return None; + }; + + // Check that this is a projection from the `Future` trait. + let trait_def_id = predicate.projection_ty.trait_def_id(self.tcx); + let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(cause_span)); + if trait_def_id != future_trait { + debug!("deduce_future_output_from_projection: not a future"); + return None; + } + + // The `Future` trait has only one associated item, `Output`, + // so check that this is what we see. + let output_assoc_item = self.tcx.associated_item_def_ids(future_trait)[0]; + if output_assoc_item != predicate.projection_ty.item_def_id { + span_bug!( + cause_span, + "projecting associated item `{:?}` from future, which is not Output `{:?}`", + predicate.projection_ty.item_def_id, + output_assoc_item, + ); + } + + // Extract the type from the projection. Note that there can + // be no bound variables in this type because the "self type" + // does not have any regions in it. + let output_ty = self.resolve_vars_if_possible(predicate.term); + debug!("deduce_future_output_from_projection: output_ty={:?}", output_ty); + // This is a projection on a Fn trait so will always be a type. + Some(output_ty.ty().unwrap()) + } + + /// Converts the types that the user supplied, in case that doing + /// so should yield an error, but returns back a signature where + /// all parameters are of type `TyErr`. + fn error_sig_of_closure(&self, decl: &hir::FnDecl<'_>) -> ty::PolyFnSig<'tcx> { + let astconv: &dyn AstConv<'_> = self; + + let supplied_arguments = decl.inputs.iter().map(|a| { + // Convert the types that the user supplied (if any), but ignore them. + astconv.ast_ty_to_ty(a); + self.tcx.ty_error() + }); + + if let hir::FnRetTy::Return(ref output) = decl.output { + astconv.ast_ty_to_ty(&output); + } + + let result = ty::Binder::dummy(self.tcx.mk_fn_sig( + supplied_arguments, + self.tcx.ty_error(), + decl.c_variadic, + hir::Unsafety::Normal, + Abi::RustCall, + )); + + debug!("supplied_sig_of_closure: result={:?}", result); + + result + } + + fn closure_sigs( + &self, + expr_def_id: DefId, + body: &hir::Body<'_>, + bound_sig: ty::PolyFnSig<'tcx>, + ) -> ClosureSignatures<'tcx> { + let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, bound_sig); + let liberated_sig = self.inh.normalize_associated_types_in( + body.value.span, + body.value.hir_id, + self.param_env, + liberated_sig, + ); + ClosureSignatures { bound_sig, liberated_sig } + } +} diff --git a/compiler/rustc_typeck/src/check/coercion.rs b/compiler/rustc_typeck/src/check/coercion.rs new file mode 100644 index 000000000..2ed5f569b --- /dev/null +++ b/compiler/rustc_typeck/src/check/coercion.rs @@ -0,0 +1,1804 @@ +//! # Type Coercion +//! +//! Under certain circumstances we will coerce from one type to another, +//! for example by auto-borrowing. This occurs in situations where the +//! compiler has a firm 'expected type' that was supplied from the user, +//! and where the actual type is similar to that expected type in purpose +//! but not in representation (so actual subtyping is inappropriate). +//! +//! ## Reborrowing +//! +//! Note that if we are expecting a reference, we will *reborrow* +//! even if the argument provided was already a reference. This is +//! useful for freezing mut things (that is, when the expected type is &T +//! but you have &mut T) and also for avoiding the linearity +//! of mut things (when the expected is &mut T and you have &mut T). See +//! the various `src/test/ui/coerce/*.rs` tests for +//! examples of where this is useful. +//! +//! ## Subtle note +//! +//! When inferring the generic arguments of functions, the argument +//! order is relevant, which can lead to the following edge case: +//! +//! ```ignore (illustrative) +//! fn foo(a: T, b: T) { +//! // ... +//! } +//! +//! foo(&7i32, &mut 7i32); +//! // This compiles, as we first infer `T` to be `&i32`, +//! // and then coerce `&mut 7i32` to `&7i32`. +//! +//! foo(&mut 7i32, &7i32); +//! // This does not compile, as we first infer `T` to be `&mut i32` +//! // and are then unable to coerce `&7i32` to `&mut i32`. +//! ``` + +use crate::astconv::AstConv; +use crate::check::FnCtxt; +use rustc_errors::{ + struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, +}; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::{Coercion, InferOk, InferResult}; +use rustc_infer::traits::{Obligation, TraitEngine, TraitEngineExt}; +use rustc_middle::lint::in_external_macro; +use rustc_middle::ty::adjustment::{ + Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast, +}; +use rustc_middle::ty::error::TypeError; +use rustc_middle::ty::relate::RelateResult; +use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{self, ToPredicate, Ty, TypeAndMut}; +use rustc_session::parse::feature_err; +use rustc_span::symbol::sym; +use rustc_span::{self, BytePos, DesugaringKind, Span}; +use rustc_target::spec::abi::Abi; +use rustc_trait_selection::infer::InferCtxtExt as _; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _; +use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode}; + +use smallvec::{smallvec, SmallVec}; +use std::ops::Deref; + +struct Coerce<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + cause: ObligationCause<'tcx>, + use_lub: bool, + /// Determines whether or not allow_two_phase_borrow is set on any + /// autoref adjustments we create while coercing. We don't want to + /// allow deref coercions to create two-phase borrows, at least initially, + /// but we do need two-phase borrows for function argument reborrows. + /// See #47489 and #48598 + /// See docs on the "AllowTwoPhase" type for a more detailed discussion + allow_two_phase: AllowTwoPhase, +} + +impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> { + type Target = FnCtxt<'a, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.fcx + } +} + +type CoerceResult<'tcx> = InferResult<'tcx, (Vec>, Ty<'tcx>)>; + +/// Coercing a mutable reference to an immutable works, while +/// coercing `&T` to `&mut T` should be forbidden. +fn coerce_mutbls<'tcx>( + from_mutbl: hir::Mutability, + to_mutbl: hir::Mutability, +) -> RelateResult<'tcx, ()> { + match (from_mutbl, to_mutbl) { + (hir::Mutability::Mut, hir::Mutability::Mut | hir::Mutability::Not) + | (hir::Mutability::Not, hir::Mutability::Not) => Ok(()), + (hir::Mutability::Not, hir::Mutability::Mut) => Err(TypeError::Mutability), + } +} + +/// Do not require any adjustments, i.e. coerce `x -> x`. +fn identity(_: Ty<'_>) -> Vec> { + vec![] +} + +fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec> { + move |target| vec![Adjustment { kind, target }] +} + +/// This always returns `Ok(...)`. +fn success<'tcx>( + adj: Vec>, + target: Ty<'tcx>, + obligations: traits::PredicateObligations<'tcx>, +) -> CoerceResult<'tcx> { + Ok(InferOk { value: (adj, target), obligations }) +} + +impl<'f, 'tcx> Coerce<'f, 'tcx> { + fn new( + fcx: &'f FnCtxt<'f, 'tcx>, + cause: ObligationCause<'tcx>, + allow_two_phase: AllowTwoPhase, + ) -> Self { + Coerce { fcx, cause, allow_two_phase, use_lub: false } + } + + fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> { + debug!("unify(a: {:?}, b: {:?}, use_lub: {})", a, b, self.use_lub); + self.commit_if_ok(|_| { + if self.use_lub { + self.at(&self.cause, self.fcx.param_env).lub(b, a) + } else { + self.at(&self.cause, self.fcx.param_env) + .sup(b, a) + .map(|InferOk { value: (), obligations }| InferOk { value: a, obligations }) + } + }) + } + + /// Unify two types (using sub or lub) and produce a specific coercion. + fn unify_and(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F) -> CoerceResult<'tcx> + where + F: FnOnce(Ty<'tcx>) -> Vec>, + { + self.unify(a, b) + .and_then(|InferOk { value: ty, obligations }| success(f(ty), ty, obligations)) + } + + #[instrument(skip(self))] + fn coerce(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { + // First, remove any resolved type variables (at the top level, at least): + let a = self.shallow_resolve(a); + let b = self.shallow_resolve(b); + debug!("Coerce.tys({:?} => {:?})", a, b); + + // Just ignore error types. + if a.references_error() || b.references_error() { + return success(vec![], self.fcx.tcx.ty_error(), vec![]); + } + + // Coercing from `!` to any type is allowed: + if a.is_never() { + return success(simple(Adjust::NeverToAny)(b), b, vec![]); + } + + // Coercing *from* an unresolved inference variable means that + // we have no information about the source type. This will always + // ultimately fall back to some form of subtyping. + if a.is_ty_var() { + return self.coerce_from_inference_variable(a, b, identity); + } + + // Consider coercing the subtype to a DST + // + // NOTE: this is wrapped in a `commit_if_ok` because it creates + // a "spurious" type variable, and we don't want to have that + // type variable in memory if the coercion fails. + let unsize = self.commit_if_ok(|_| self.coerce_unsized(a, b)); + match unsize { + Ok(_) => { + debug!("coerce: unsize successful"); + return unsize; + } + Err(TypeError::ObjectUnsafeCoercion(did)) => { + debug!("coerce: unsize not object safe"); + return Err(TypeError::ObjectUnsafeCoercion(did)); + } + Err(error) => { + debug!(?error, "coerce: unsize failed"); + } + } + + // Examine the supertype and consider auto-borrowing. + match *b.kind() { + ty::RawPtr(mt_b) => { + return self.coerce_unsafe_ptr(a, b, mt_b.mutbl); + } + ty::Ref(r_b, _, mutbl_b) => { + return self.coerce_borrowed_pointer(a, b, r_b, mutbl_b); + } + _ => {} + } + + match *a.kind() { + ty::FnDef(..) => { + // Function items are coercible to any closure + // type; function pointers are not (that would + // require double indirection). + // Additionally, we permit coercion of function + // items to drop the unsafe qualifier. + self.coerce_from_fn_item(a, b) + } + ty::FnPtr(a_f) => { + // We permit coercion of fn pointers to drop the + // unsafe qualifier. + self.coerce_from_fn_pointer(a, a_f, b) + } + ty::Closure(closure_def_id_a, substs_a) => { + // Non-capturing closures are coercible to + // function pointers or unsafe function pointers. + // It cannot convert closures that require unsafe. + self.coerce_closure_to_fn(a, closure_def_id_a, substs_a, b) + } + _ => { + // Otherwise, just use unification rules. + self.unify_and(a, b, identity) + } + } + } + + /// Coercing *from* an inference variable. In this case, we have no information + /// about the source type, so we can't really do a true coercion and we always + /// fall back to subtyping (`unify_and`). + fn coerce_from_inference_variable( + &self, + a: Ty<'tcx>, + b: Ty<'tcx>, + make_adjustments: impl FnOnce(Ty<'tcx>) -> Vec>, + ) -> CoerceResult<'tcx> { + debug!("coerce_from_inference_variable(a={:?}, b={:?})", a, b); + assert!(a.is_ty_var() && self.shallow_resolve(a) == a); + assert!(self.shallow_resolve(b) == b); + + if b.is_ty_var() { + // Two unresolved type variables: create a `Coerce` predicate. + let target_ty = if self.use_lub { + self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::LatticeVariable, + span: self.cause.span, + }) + } else { + b + }; + + let mut obligations = Vec::with_capacity(2); + for &source_ty in &[a, b] { + if source_ty != target_ty { + obligations.push(Obligation::new( + self.cause.clone(), + self.param_env, + ty::Binder::dummy(ty::PredicateKind::Coerce(ty::CoercePredicate { + a: source_ty, + b: target_ty, + })) + .to_predicate(self.tcx()), + )); + } + } + + debug!( + "coerce_from_inference_variable: two inference variables, target_ty={:?}, obligations={:?}", + target_ty, obligations + ); + let adjustments = make_adjustments(target_ty); + InferResult::Ok(InferOk { value: (adjustments, target_ty), obligations }) + } else { + // One unresolved type variable: just apply subtyping, we may be able + // to do something useful. + self.unify_and(a, b, make_adjustments) + } + } + + /// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`. + /// To match `A` with `B`, autoderef will be performed, + /// calling `deref`/`deref_mut` where necessary. + fn coerce_borrowed_pointer( + &self, + a: Ty<'tcx>, + b: Ty<'tcx>, + r_b: ty::Region<'tcx>, + mutbl_b: hir::Mutability, + ) -> CoerceResult<'tcx> { + debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b); + + // If we have a parameter of type `&M T_a` and the value + // provided is `expr`, we will be adding an implicit borrow, + // meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore, + // to type check, we will construct the type that `&M*expr` would + // yield. + + let (r_a, mt_a) = match *a.kind() { + ty::Ref(r_a, ty, mutbl) => { + let mt_a = ty::TypeAndMut { ty, mutbl }; + coerce_mutbls(mt_a.mutbl, mutbl_b)?; + (r_a, mt_a) + } + _ => return self.unify_and(a, b, identity), + }; + + let span = self.cause.span; + + let mut first_error = None; + let mut r_borrow_var = None; + let mut autoderef = self.autoderef(span, a); + let mut found = None; + + for (referent_ty, autoderefs) in autoderef.by_ref() { + if autoderefs == 0 { + // Don't let this pass, otherwise it would cause + // &T to autoref to &&T. + continue; + } + + // At this point, we have deref'd `a` to `referent_ty`. So + // imagine we are coercing from `&'a mut Vec` to `&'b mut [T]`. + // In the autoderef loop for `&'a mut Vec`, we would get + // three callbacks: + // + // - `&'a mut Vec` -- 0 derefs, just ignore it + // - `Vec` -- 1 deref + // - `[T]` -- 2 deref + // + // At each point after the first callback, we want to + // check to see whether this would match out target type + // (`&'b mut [T]`) if we autoref'd it. We can't just + // compare the referent types, though, because we still + // have to consider the mutability. E.g., in the case + // we've been considering, we have an `&mut` reference, so + // the `T` in `[T]` needs to be unified with equality. + // + // Therefore, we construct reference types reflecting what + // the types will be after we do the final auto-ref and + // compare those. Note that this means we use the target + // mutability [1], since it may be that we are coercing + // from `&mut T` to `&U`. + // + // One fine point concerns the region that we use. We + // choose the region such that the region of the final + // type that results from `unify` will be the region we + // want for the autoref: + // + // - if in sub mode, that means we want to use `'b` (the + // region from the target reference) for both + // pointers [2]. This is because sub mode (somewhat + // arbitrarily) returns the subtype region. In the case + // where we are coercing to a target type, we know we + // want to use that target type region (`'b`) because -- + // for the program to type-check -- it must be the + // smaller of the two. + // - One fine point. It may be surprising that we can + // use `'b` without relating `'a` and `'b`. The reason + // that this is ok is that what we produce is + // effectively a `&'b *x` expression (if you could + // annotate the region of a borrow), and regionck has + // code that adds edges from the region of a borrow + // (`'b`, here) into the regions in the borrowed + // expression (`*x`, here). (Search for "link".) + // - if in lub mode, things can get fairly complicated. The + // easiest thing is just to make a fresh + // region variable [4], which effectively means we defer + // the decision to region inference (and regionck, which will add + // some more edges to this variable). However, this can wind up + // creating a crippling number of variables in some cases -- + // e.g., #32278 -- so we optimize one particular case [3]. + // Let me try to explain with some examples: + // - The "running example" above represents the simple case, + // where we have one `&` reference at the outer level and + // ownership all the rest of the way down. In this case, + // we want `LUB('a, 'b)` as the resulting region. + // - However, if there are nested borrows, that region is + // too strong. Consider a coercion from `&'a &'x Rc` to + // `&'b T`. In this case, `'a` is actually irrelevant. + // The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)` + // we get spurious errors (`ui/regions-lub-ref-ref-rc.rs`). + // (The errors actually show up in borrowck, typically, because + // this extra edge causes the region `'a` to be inferred to something + // too big, which then results in borrowck errors.) + // - We could track the innermost shared reference, but there is already + // code in regionck that has the job of creating links between + // the region of a borrow and the regions in the thing being + // borrowed (here, `'a` and `'x`), and it knows how to handle + // all the various cases. So instead we just make a region variable + // and let regionck figure it out. + let r = if !self.use_lub { + r_b // [2] above + } else if autoderefs == 1 { + r_a // [3] above + } else { + if r_borrow_var.is_none() { + // create var lazily, at most once + let coercion = Coercion(span); + let r = self.next_region_var(coercion); + r_borrow_var = Some(r); // [4] above + } + r_borrow_var.unwrap() + }; + let derefd_ty_a = self.tcx.mk_ref( + r, + TypeAndMut { + ty: referent_ty, + mutbl: mutbl_b, // [1] above + }, + ); + match self.unify(derefd_ty_a, b) { + Ok(ok) => { + found = Some(ok); + break; + } + Err(err) => { + if first_error.is_none() { + first_error = Some(err); + } + } + } + } + + // Extract type or return an error. We return the first error + // we got, which should be from relating the "base" type + // (e.g., in example above, the failure from relating `Vec` + // to the target type), since that should be the least + // confusing. + let Some(InferOk { value: ty, mut obligations }) = found else { + let err = first_error.expect("coerce_borrowed_pointer had no error"); + debug!("coerce_borrowed_pointer: failed with err = {:?}", err); + return Err(err); + }; + + if ty == a && mt_a.mutbl == hir::Mutability::Not && autoderef.step_count() == 1 { + // As a special case, if we would produce `&'a *x`, that's + // a total no-op. We end up with the type `&'a T` just as + // we started with. In that case, just skip it + // altogether. This is just an optimization. + // + // Note that for `&mut`, we DO want to reborrow -- + // otherwise, this would be a move, which might be an + // error. For example `foo(self.x)` where `self` and + // `self.x` both have `&mut `type would be a move of + // `self.x`, but we auto-coerce it to `foo(&mut *self.x)`, + // which is a borrow. + assert_eq!(mutbl_b, hir::Mutability::Not); // can only coerce &T -> &U + return success(vec![], ty, obligations); + } + + let InferOk { value: mut adjustments, obligations: o } = + self.adjust_steps_as_infer_ok(&autoderef); + obligations.extend(o); + obligations.extend(autoderef.into_obligations()); + + // Now apply the autoref. We have to extract the region out of + // the final ref type we got. + let ty::Ref(r_borrow, _, _) = ty.kind() else { + span_bug!(span, "expected a ref type, got {:?}", ty); + }; + let mutbl = match mutbl_b { + hir::Mutability::Not => AutoBorrowMutability::Not, + hir::Mutability::Mut => { + AutoBorrowMutability::Mut { allow_two_phase_borrow: self.allow_two_phase } + } + }; + adjustments.push(Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(*r_borrow, mutbl)), + target: ty, + }); + + debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}", ty, adjustments); + + success(adjustments, ty, obligations) + } + + // &[T; n] or &mut [T; n] -> &[T] + // or &mut [T; n] -> &mut [T] + // or &Concrete -> &Trait, etc. + #[instrument(skip(self), level = "debug")] + fn coerce_unsized(&self, mut source: Ty<'tcx>, mut target: Ty<'tcx>) -> CoerceResult<'tcx> { + source = self.shallow_resolve(source); + target = self.shallow_resolve(target); + debug!(?source, ?target); + + // These 'if' statements require some explanation. + // The `CoerceUnsized` trait is special - it is only + // possible to write `impl CoerceUnsized for A` where + // A and B have 'matching' fields. This rules out the following + // two types of blanket impls: + // + // `impl CoerceUnsized for SomeType` + // `impl CoerceUnsized for T` + // + // Both of these trigger a special `CoerceUnsized`-related error (E0376) + // + // We can take advantage of this fact to avoid performing unnecessary work. + // If either `source` or `target` is a type variable, then any applicable impl + // would need to be generic over the self-type (`impl CoerceUnsized for T`) + // or generic over the `CoerceUnsized` type parameter (`impl CoerceUnsized for + // SomeType`). + // + // However, these are exactly the kinds of impls which are forbidden by + // the compiler! Therefore, we can be sure that coercion will always fail + // when either the source or target type is a type variable. This allows us + // to skip performing any trait selection, and immediately bail out. + if source.is_ty_var() { + debug!("coerce_unsized: source is a TyVar, bailing out"); + return Err(TypeError::Mismatch); + } + if target.is_ty_var() { + debug!("coerce_unsized: target is a TyVar, bailing out"); + return Err(TypeError::Mismatch); + } + + let traits = + (self.tcx.lang_items().unsize_trait(), self.tcx.lang_items().coerce_unsized_trait()); + let (Some(unsize_did), Some(coerce_unsized_did)) = traits else { + debug!("missing Unsize or CoerceUnsized traits"); + return Err(TypeError::Mismatch); + }; + + // Note, we want to avoid unnecessary unsizing. We don't want to coerce to + // a DST unless we have to. This currently comes out in the wash since + // we can't unify [T] with U. But to properly support DST, we need to allow + // that, at which point we will need extra checks on the target here. + + // Handle reborrows before selecting `Source: CoerceUnsized`. + let reborrow = match (source.kind(), target.kind()) { + (&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => { + coerce_mutbls(mutbl_a, mutbl_b)?; + + let coercion = Coercion(self.cause.span); + let r_borrow = self.next_region_var(coercion); + let mutbl = match mutbl_b { + hir::Mutability::Not => AutoBorrowMutability::Not, + hir::Mutability::Mut => AutoBorrowMutability::Mut { + // We don't allow two-phase borrows here, at least for initial + // implementation. If it happens that this coercion is a function argument, + // the reborrow in coerce_borrowed_ptr will pick it up. + allow_two_phase_borrow: AllowTwoPhase::No, + }, + }; + Some(( + Adjustment { kind: Adjust::Deref(None), target: ty_a }, + Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)), + target: self + .tcx + .mk_ref(r_borrow, ty::TypeAndMut { mutbl: mutbl_b, ty: ty_a }), + }, + )) + } + (&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => { + coerce_mutbls(mt_a, mt_b)?; + + Some(( + Adjustment { kind: Adjust::Deref(None), target: ty_a }, + Adjustment { + kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)), + target: self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mt_b, ty: ty_a }), + }, + )) + } + _ => None, + }; + let coerce_source = reborrow.as_ref().map_or(source, |&(_, ref r)| r.target); + + // Setup either a subtyping or a LUB relationship between + // the `CoerceUnsized` target type and the expected type. + // We only have the latter, so we use an inference variable + // for the former and let type inference do the rest. + let origin = TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: self.cause.span, + }; + let coerce_target = self.next_ty_var(origin); + let mut coercion = self.unify_and(coerce_target, target, |target| { + let unsize = Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target }; + match reborrow { + None => vec![unsize], + Some((ref deref, ref autoref)) => vec![deref.clone(), autoref.clone(), unsize], + } + })?; + + let mut selcx = traits::SelectionContext::new(self); + + // Create an obligation for `Source: CoerceUnsized`. + let cause = ObligationCause::new( + self.cause.span, + self.body_id, + ObligationCauseCode::Coercion { source, target }, + ); + + // Use a FIFO queue for this custom fulfillment procedure. + // + // A Vec (or SmallVec) is not a natural choice for a queue. However, + // this code path is hot, and this queue usually has a max length of 1 + // and almost never more than 3. By using a SmallVec we avoid an + // allocation, at the (very small) cost of (occasionally) having to + // shift subsequent elements down when removing the front element. + let mut queue: SmallVec<[_; 4]> = smallvec![traits::predicate_for_trait_def( + self.tcx, + self.fcx.param_env, + cause, + coerce_unsized_did, + 0, + coerce_source, + &[coerce_target.into()] + )]; + + let mut has_unsized_tuple_coercion = false; + let mut has_trait_upcasting_coercion = None; + + // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid + // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where + // inference might unify those two inner type variables later. + let traits = [coerce_unsized_did, unsize_did]; + while !queue.is_empty() { + let obligation = queue.remove(0); + debug!("coerce_unsized resolve step: {:?}", obligation); + let bound_predicate = obligation.predicate.kind(); + let trait_pred = match bound_predicate.skip_binder() { + ty::PredicateKind::Trait(trait_pred) if traits.contains(&trait_pred.def_id()) => { + if unsize_did == trait_pred.def_id() { + let self_ty = trait_pred.self_ty(); + let unsize_ty = trait_pred.trait_ref.substs[1].expect_ty(); + if let (ty::Dynamic(ref data_a, ..), ty::Dynamic(ref data_b, ..)) = + (self_ty.kind(), unsize_ty.kind()) + && data_a.principal_def_id() != data_b.principal_def_id() + { + debug!("coerce_unsized: found trait upcasting coercion"); + has_trait_upcasting_coercion = Some((self_ty, unsize_ty)); + } + if let ty::Tuple(..) = unsize_ty.kind() { + debug!("coerce_unsized: found unsized tuple coercion"); + has_unsized_tuple_coercion = true; + } + } + bound_predicate.rebind(trait_pred) + } + _ => { + coercion.obligations.push(obligation); + continue; + } + }; + match selcx.select(&obligation.with(trait_pred)) { + // Uncertain or unimplemented. + Ok(None) => { + if trait_pred.def_id() == unsize_did { + let trait_pred = self.resolve_vars_if_possible(trait_pred); + let self_ty = trait_pred.skip_binder().self_ty(); + let unsize_ty = trait_pred.skip_binder().trait_ref.substs[1].expect_ty(); + debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_pred); + match (&self_ty.kind(), &unsize_ty.kind()) { + (ty::Infer(ty::TyVar(v)), ty::Dynamic(..)) + if self.type_var_is_sized(*v) => + { + debug!("coerce_unsized: have sized infer {:?}", v); + coercion.obligations.push(obligation); + // `$0: Unsize` where we know that `$0: Sized`, try going + // for unsizing. + } + _ => { + // Some other case for `$0: Unsize`. Note that we + // hit this case even if `Something` is a sized type, so just + // don't do the coercion. + debug!("coerce_unsized: ambiguous unsize"); + return Err(TypeError::Mismatch); + } + } + } else { + debug!("coerce_unsized: early return - ambiguous"); + return Err(TypeError::Mismatch); + } + } + Err(traits::Unimplemented) => { + debug!("coerce_unsized: early return - can't prove obligation"); + return Err(TypeError::Mismatch); + } + + // Object safety violations or miscellaneous. + Err(err) => { + self.report_selection_error(obligation.clone(), &obligation, &err, false); + // Treat this like an obligation and follow through + // with the unsizing - the lack of a coercion should + // be silent, as it causes a type mismatch later. + } + + Ok(Some(impl_source)) => queue.extend(impl_source.nested_obligations()), + } + } + + if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion { + feature_err( + &self.tcx.sess.parse_sess, + sym::unsized_tuple_coercion, + self.cause.span, + "unsized tuple coercion is not stable enough for use and is subject to change", + ) + .emit(); + } + + if let Some((sub, sup)) = has_trait_upcasting_coercion + && !self.tcx().features().trait_upcasting + { + // Renders better when we erase regions, since they're not really the point here. + let (sub, sup) = self.tcx.erase_regions((sub, sup)); + let mut err = feature_err( + &self.tcx.sess.parse_sess, + sym::trait_upcasting, + self.cause.span, + &format!("cannot cast `{sub}` to `{sup}`, trait upcasting coercion is experimental"), + ); + err.note(&format!("required when coercing `{source}` into `{target}`")); + err.emit(); + } + + Ok(coercion) + } + + fn coerce_from_safe_fn( + &self, + a: Ty<'tcx>, + fn_ty_a: ty::PolyFnSig<'tcx>, + b: Ty<'tcx>, + to_unsafe: F, + normal: G, + ) -> CoerceResult<'tcx> + where + F: FnOnce(Ty<'tcx>) -> Vec>, + G: FnOnce(Ty<'tcx>) -> Vec>, + { + self.commit_if_ok(|snapshot| { + let result = if let ty::FnPtr(fn_ty_b) = b.kind() + && let (hir::Unsafety::Normal, hir::Unsafety::Unsafe) = + (fn_ty_a.unsafety(), fn_ty_b.unsafety()) + { + let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a); + self.unify_and(unsafe_a, b, to_unsafe) + } else { + self.unify_and(a, b, normal) + }; + + // FIXME(#73154): This is a hack. Currently LUB can generate + // unsolvable constraints. Additionally, it returns `a` + // unconditionally, even when the "LUB" is `b`. In the future, we + // want the coerced type to be the actual supertype of these two, + // but for now, we want to just error to ensure we don't lock + // ourselves into a specific behavior with NLL. + self.leak_check(false, snapshot)?; + + result + }) + } + + fn coerce_from_fn_pointer( + &self, + a: Ty<'tcx>, + fn_ty_a: ty::PolyFnSig<'tcx>, + b: Ty<'tcx>, + ) -> CoerceResult<'tcx> { + //! Attempts to coerce from the type of a Rust function item + //! into a closure or a `proc`. + //! + + let b = self.shallow_resolve(b); + debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b); + + self.coerce_from_safe_fn( + a, + fn_ty_a, + b, + simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)), + identity, + ) + } + + fn coerce_from_fn_item(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { + //! Attempts to coerce from the type of a Rust function item + //! into a closure or a `proc`. + + let b = self.shallow_resolve(b); + let InferOk { value: b, mut obligations } = + self.normalize_associated_types_in_as_infer_ok(self.cause.span, b); + debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b); + + match b.kind() { + ty::FnPtr(b_sig) => { + let a_sig = a.fn_sig(self.tcx); + if let ty::FnDef(def_id, _) = *a.kind() { + // Intrinsics are not coercible to function pointers + if self.tcx.is_intrinsic(def_id) { + return Err(TypeError::IntrinsicCast); + } + + // Safe `#[target_feature]` functions are not assignable to safe fn pointers (RFC 2396). + + if b_sig.unsafety() == hir::Unsafety::Normal + && !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty() + { + return Err(TypeError::TargetFeatureCast(def_id)); + } + } + + let InferOk { value: a_sig, obligations: o1 } = + self.normalize_associated_types_in_as_infer_ok(self.cause.span, a_sig); + obligations.extend(o1); + + let a_fn_pointer = self.tcx.mk_fn_ptr(a_sig); + let InferOk { value, obligations: o2 } = self.coerce_from_safe_fn( + a_fn_pointer, + a_sig, + b, + |unsafe_ty| { + vec![ + Adjustment { + kind: Adjust::Pointer(PointerCast::ReifyFnPointer), + target: a_fn_pointer, + }, + Adjustment { + kind: Adjust::Pointer(PointerCast::UnsafeFnPointer), + target: unsafe_ty, + }, + ] + }, + simple(Adjust::Pointer(PointerCast::ReifyFnPointer)), + )?; + + obligations.extend(o2); + Ok(InferOk { value, obligations }) + } + _ => self.unify_and(a, b, identity), + } + } + + fn coerce_closure_to_fn( + &self, + a: Ty<'tcx>, + closure_def_id_a: DefId, + substs_a: SubstsRef<'tcx>, + b: Ty<'tcx>, + ) -> CoerceResult<'tcx> { + //! Attempts to coerce from the type of a non-capturing closure + //! into a function pointer. + //! + + let b = self.shallow_resolve(b); + + match b.kind() { + // At this point we haven't done capture analysis, which means + // that the ClosureSubsts just contains an inference variable instead + // of tuple of captured types. + // + // All we care here is if any variable is being captured and not the exact paths, + // so we check `upvars_mentioned` for root variables being captured. + ty::FnPtr(fn_ty) + if self + .tcx + .upvars_mentioned(closure_def_id_a.expect_local()) + .map_or(true, |u| u.is_empty()) => + { + // We coerce the closure, which has fn type + // `extern "rust-call" fn((arg0,arg1,...)) -> _` + // to + // `fn(arg0,arg1,...) -> _` + // or + // `unsafe fn(arg0,arg1,...) -> _` + let closure_sig = substs_a.as_closure().sig(); + let unsafety = fn_ty.unsafety(); + let pointer_ty = + self.tcx.mk_fn_ptr(self.tcx.signature_unclosure(closure_sig, unsafety)); + debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})", a, b, pointer_ty); + self.unify_and( + pointer_ty, + b, + simple(Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))), + ) + } + _ => self.unify_and(a, b, identity), + } + } + + fn coerce_unsafe_ptr( + &self, + a: Ty<'tcx>, + b: Ty<'tcx>, + mutbl_b: hir::Mutability, + ) -> CoerceResult<'tcx> { + debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b); + + let (is_ref, mt_a) = match *a.kind() { + ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }), + ty::RawPtr(mt) => (false, mt), + _ => return self.unify_and(a, b, identity), + }; + coerce_mutbls(mt_a.mutbl, mutbl_b)?; + + // Check that the types which they point at are compatible. + let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mutbl_b, ty: mt_a.ty }); + // Although references and unsafe ptrs have the same + // representation, we still register an Adjust::DerefRef so that + // regionck knows that the region for `a` must be valid here. + if is_ref { + self.unify_and(a_unsafe, b, |target| { + vec![ + Adjustment { kind: Adjust::Deref(None), target: mt_a.ty }, + Adjustment { kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)), target }, + ] + }) + } else if mt_a.mutbl != mutbl_b { + self.unify_and(a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer))) + } else { + self.unify_and(a_unsafe, b, identity) + } + } +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Attempt to coerce an expression to a type, and return the + /// adjusted type of the expression, if successful. + /// Adjustments are only recorded if the coercion succeeded. + /// The expressions *must not* have any pre-existing adjustments. + pub fn try_coerce( + &self, + expr: &hir::Expr<'_>, + expr_ty: Ty<'tcx>, + target: Ty<'tcx>, + allow_two_phase: AllowTwoPhase, + cause: Option>, + ) -> RelateResult<'tcx, Ty<'tcx>> { + let source = self.resolve_vars_with_obligations(expr_ty); + debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target); + + let cause = + cause.unwrap_or_else(|| self.cause(expr.span, ObligationCauseCode::ExprAssignable)); + let coerce = Coerce::new(self, cause, allow_two_phase); + let ok = self.commit_if_ok(|_| coerce.coerce(source, target))?; + + let (adjustments, _) = self.register_infer_ok_obligations(ok); + self.apply_adjustments(expr, adjustments); + Ok(if expr_ty.references_error() { self.tcx.ty_error() } else { target }) + } + + /// Same as `try_coerce()`, but without side-effects. + /// + /// Returns false if the coercion creates any obligations that result in + /// errors. + pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool { + let source = self.resolve_vars_with_obligations(expr_ty); + debug!("coercion::can_with_predicates({:?} -> {:?})", source, target); + + let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable); + // We don't ever need two-phase here since we throw out the result of the coercion + let coerce = Coerce::new(self, cause, AllowTwoPhase::No); + self.probe(|_| { + let Ok(ok) = coerce.coerce(source, target) else { + return false; + }; + let mut fcx = traits::FulfillmentContext::new_in_snapshot(); + fcx.register_predicate_obligations(self, ok.obligations); + fcx.select_where_possible(&self).is_empty() + }) + } + + /// Given a type and a target type, this function will calculate and return + /// how many dereference steps needed to achieve `expr_ty <: target`. If + /// it's not possible, return `None`. + pub fn deref_steps(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> Option { + let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable); + // We don't ever need two-phase here since we throw out the result of the coercion + let coerce = Coerce::new(self, cause, AllowTwoPhase::No); + coerce + .autoderef(rustc_span::DUMMY_SP, expr_ty) + .find_map(|(ty, steps)| self.probe(|_| coerce.unify(ty, target)).ok().map(|_| steps)) + } + + /// Given a type, this function will calculate and return the type given + /// for `::Target` only if `Ty` also implements `DerefMut`. + /// + /// This function is for diagnostics only, since it does not register + /// trait or region sub-obligations. (presumably we could, but it's not + /// particularly important for diagnostics...) + pub fn deref_once_mutably_for_diagnostic(&self, expr_ty: Ty<'tcx>) -> Option> { + self.autoderef(rustc_span::DUMMY_SP, expr_ty).nth(1).and_then(|(deref_ty, _)| { + self.infcx + .type_implements_trait( + self.tcx.lang_items().deref_mut_trait()?, + expr_ty, + ty::List::empty(), + self.param_env, + ) + .may_apply() + .then(|| deref_ty) + }) + } + + /// Given some expressions, their known unified type and another expression, + /// tries to unify the types, potentially inserting coercions on any of the + /// provided expressions and returns their LUB (aka "common supertype"). + /// + /// This is really an internal helper. From outside the coercion + /// module, you should instantiate a `CoerceMany` instance. + fn try_find_coercion_lub( + &self, + cause: &ObligationCause<'tcx>, + exprs: &[E], + prev_ty: Ty<'tcx>, + new: &hir::Expr<'_>, + new_ty: Ty<'tcx>, + ) -> RelateResult<'tcx, Ty<'tcx>> + where + E: AsCoercionSite, + { + let prev_ty = self.resolve_vars_with_obligations(prev_ty); + let new_ty = self.resolve_vars_with_obligations(new_ty); + debug!( + "coercion::try_find_coercion_lub({:?}, {:?}, exprs={:?} exprs)", + prev_ty, + new_ty, + exprs.len() + ); + + // The following check fixes #88097, where the compiler erroneously + // attempted to coerce a closure type to itself via a function pointer. + if prev_ty == new_ty { + return Ok(prev_ty); + } + + // Special-case that coercion alone cannot handle: + // Function items or non-capturing closures of differing IDs or InternalSubsts. + let (a_sig, b_sig) = { + #[allow(rustc::usage_of_ty_tykind)] + let is_capturing_closure = |ty: &ty::TyKind<'tcx>| { + if let &ty::Closure(closure_def_id, _substs) = ty { + self.tcx.upvars_mentioned(closure_def_id.expect_local()).is_some() + } else { + false + } + }; + if is_capturing_closure(prev_ty.kind()) || is_capturing_closure(new_ty.kind()) { + (None, None) + } else { + match (prev_ty.kind(), new_ty.kind()) { + (ty::FnDef(..), ty::FnDef(..)) => { + // Don't reify if the function types have a LUB, i.e., they + // are the same function and their parameters have a LUB. + match self + .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty)) + { + // We have a LUB of prev_ty and new_ty, just return it. + Ok(ok) => return Ok(self.register_infer_ok_obligations(ok)), + Err(_) => { + (Some(prev_ty.fn_sig(self.tcx)), Some(new_ty.fn_sig(self.tcx))) + } + } + } + (ty::Closure(_, substs), ty::FnDef(..)) => { + let b_sig = new_ty.fn_sig(self.tcx); + let a_sig = self + .tcx + .signature_unclosure(substs.as_closure().sig(), b_sig.unsafety()); + (Some(a_sig), Some(b_sig)) + } + (ty::FnDef(..), ty::Closure(_, substs)) => { + let a_sig = prev_ty.fn_sig(self.tcx); + let b_sig = self + .tcx + .signature_unclosure(substs.as_closure().sig(), a_sig.unsafety()); + (Some(a_sig), Some(b_sig)) + } + (ty::Closure(_, substs_a), ty::Closure(_, substs_b)) => ( + Some(self.tcx.signature_unclosure( + substs_a.as_closure().sig(), + hir::Unsafety::Normal, + )), + Some(self.tcx.signature_unclosure( + substs_b.as_closure().sig(), + hir::Unsafety::Normal, + )), + ), + _ => (None, None), + } + } + }; + if let (Some(a_sig), Some(b_sig)) = (a_sig, b_sig) { + // Intrinsics are not coercible to function pointers. + if a_sig.abi() == Abi::RustIntrinsic + || a_sig.abi() == Abi::PlatformIntrinsic + || b_sig.abi() == Abi::RustIntrinsic + || b_sig.abi() == Abi::PlatformIntrinsic + { + return Err(TypeError::IntrinsicCast); + } + // The signature must match. + let a_sig = self.normalize_associated_types_in(new.span, a_sig); + let b_sig = self.normalize_associated_types_in(new.span, b_sig); + let sig = self + .at(cause, self.param_env) + .trace(prev_ty, new_ty) + .lub(a_sig, b_sig) + .map(|ok| self.register_infer_ok_obligations(ok))?; + + // Reify both sides and return the reified fn pointer type. + let fn_ptr = self.tcx.mk_fn_ptr(sig); + let prev_adjustment = match prev_ty.kind() { + ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(a_sig.unsafety())), + ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer), + _ => unreachable!(), + }; + let next_adjustment = match new_ty.kind() { + ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(b_sig.unsafety())), + ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer), + _ => unreachable!(), + }; + for expr in exprs.iter().map(|e| e.as_coercion_site()) { + self.apply_adjustments( + expr, + vec![Adjustment { kind: prev_adjustment.clone(), target: fn_ptr }], + ); + } + self.apply_adjustments(new, vec![Adjustment { kind: next_adjustment, target: fn_ptr }]); + return Ok(fn_ptr); + } + + // Configure a Coerce instance to compute the LUB. + // We don't allow two-phase borrows on any autorefs this creates since we + // probably aren't processing function arguments here and even if we were, + // they're going to get autorefed again anyway and we can apply 2-phase borrows + // at that time. + let mut coerce = Coerce::new(self, cause.clone(), AllowTwoPhase::No); + coerce.use_lub = true; + + // First try to coerce the new expression to the type of the previous ones, + // but only if the new expression has no coercion already applied to it. + let mut first_error = None; + if !self.typeck_results.borrow().adjustments().contains_key(new.hir_id) { + let result = self.commit_if_ok(|_| coerce.coerce(new_ty, prev_ty)); + match result { + Ok(ok) => { + let (adjustments, target) = self.register_infer_ok_obligations(ok); + self.apply_adjustments(new, adjustments); + debug!( + "coercion::try_find_coercion_lub: was able to coerce from new type {:?} to previous type {:?} ({:?})", + new_ty, prev_ty, target + ); + return Ok(target); + } + Err(e) => first_error = Some(e), + } + } + + // Then try to coerce the previous expressions to the type of the new one. + // This requires ensuring there are no coercions applied to *any* of the + // previous expressions, other than noop reborrows (ignoring lifetimes). + for expr in exprs { + let expr = expr.as_coercion_site(); + let noop = match self.typeck_results.borrow().expr_adjustments(expr) { + &[ + Adjustment { kind: Adjust::Deref(_), .. }, + Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }, + ] => { + match *self.node_ty(expr.hir_id).kind() { + ty::Ref(_, _, mt_orig) => { + let mutbl_adj: hir::Mutability = mutbl_adj.into(); + // Reborrow that we can safely ignore, because + // the next adjustment can only be a Deref + // which will be merged into it. + mutbl_adj == mt_orig + } + _ => false, + } + } + &[Adjustment { kind: Adjust::NeverToAny, .. }] | &[] => true, + _ => false, + }; + + if !noop { + debug!( + "coercion::try_find_coercion_lub: older expression {:?} had adjustments, requiring LUB", + expr, + ); + + return self + .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty)) + .map(|ok| self.register_infer_ok_obligations(ok)); + } + } + + match self.commit_if_ok(|_| coerce.coerce(prev_ty, new_ty)) { + Err(_) => { + // Avoid giving strange errors on failed attempts. + if let Some(e) = first_error { + Err(e) + } else { + self.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty)) + .map(|ok| self.register_infer_ok_obligations(ok)) + } + } + Ok(ok) => { + let (adjustments, target) = self.register_infer_ok_obligations(ok); + for expr in exprs { + let expr = expr.as_coercion_site(); + self.apply_adjustments(expr, adjustments.clone()); + } + debug!( + "coercion::try_find_coercion_lub: was able to coerce previous type {:?} to new type {:?} ({:?})", + prev_ty, new_ty, target + ); + Ok(target) + } + } + } +} + +/// CoerceMany encapsulates the pattern you should use when you have +/// many expressions that are all getting coerced to a common +/// type. This arises, for example, when you have a match (the result +/// of each arm is coerced to a common type). It also arises in less +/// obvious places, such as when you have many `break foo` expressions +/// that target the same loop, or the various `return` expressions in +/// a function. +/// +/// The basic protocol is as follows: +/// +/// - Instantiate the `CoerceMany` with an initial `expected_ty`. +/// This will also serve as the "starting LUB". The expectation is +/// that this type is something which all of the expressions *must* +/// be coercible to. Use a fresh type variable if needed. +/// - For each expression whose result is to be coerced, invoke `coerce()` with. +/// - In some cases we wish to coerce "non-expressions" whose types are implicitly +/// unit. This happens for example if you have a `break` with no expression, +/// or an `if` with no `else`. In that case, invoke `coerce_forced_unit()`. +/// - `coerce()` and `coerce_forced_unit()` may report errors. They hide this +/// from you so that you don't have to worry your pretty head about it. +/// But if an error is reported, the final type will be `err`. +/// - Invoking `coerce()` may cause us to go and adjust the "adjustments" on +/// previously coerced expressions. +/// - When all done, invoke `complete()`. This will return the LUB of +/// all your expressions. +/// - WARNING: I don't believe this final type is guaranteed to be +/// related to your initial `expected_ty` in any particular way, +/// although it will typically be a subtype, so you should check it. +/// - Invoking `complete()` may cause us to go and adjust the "adjustments" on +/// previously coerced expressions. +/// +/// Example: +/// +/// ```ignore (illustrative) +/// let mut coerce = CoerceMany::new(expected_ty); +/// for expr in exprs { +/// let expr_ty = fcx.check_expr_with_expectation(expr, expected); +/// coerce.coerce(fcx, &cause, expr, expr_ty); +/// } +/// let final_ty = coerce.complete(fcx); +/// ``` +pub struct CoerceMany<'tcx, 'exprs, E: AsCoercionSite> { + expected_ty: Ty<'tcx>, + final_ty: Option>, + expressions: Expressions<'tcx, 'exprs, E>, + pushed: usize, +} + +/// The type of a `CoerceMany` that is storing up the expressions into +/// a buffer. We use this in `check/mod.rs` for things like `break`. +pub type DynamicCoerceMany<'tcx> = CoerceMany<'tcx, 'tcx, &'tcx hir::Expr<'tcx>>; + +enum Expressions<'tcx, 'exprs, E: AsCoercionSite> { + Dynamic(Vec<&'tcx hir::Expr<'tcx>>), + UpFront(&'exprs [E]), +} + +impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> { + /// The usual case; collect the set of expressions dynamically. + /// If the full set of coercion sites is known before hand, + /// consider `with_coercion_sites()` instead to avoid allocation. + pub fn new(expected_ty: Ty<'tcx>) -> Self { + Self::make(expected_ty, Expressions::Dynamic(vec![])) + } + + /// As an optimization, you can create a `CoerceMany` with a + /// pre-existing slice of expressions. In this case, you are + /// expected to pass each element in the slice to `coerce(...)` in + /// order. This is used with arrays in particular to avoid + /// needlessly cloning the slice. + pub fn with_coercion_sites(expected_ty: Ty<'tcx>, coercion_sites: &'exprs [E]) -> Self { + Self::make(expected_ty, Expressions::UpFront(coercion_sites)) + } + + fn make(expected_ty: Ty<'tcx>, expressions: Expressions<'tcx, 'exprs, E>) -> Self { + CoerceMany { expected_ty, final_ty: None, expressions, pushed: 0 } + } + + /// Returns the "expected type" with which this coercion was + /// constructed. This represents the "downward propagated" type + /// that was given to us at the start of typing whatever construct + /// we are typing (e.g., the match expression). + /// + /// Typically, this is used as the expected type when + /// type-checking each of the alternative expressions whose types + /// we are trying to merge. + pub fn expected_ty(&self) -> Ty<'tcx> { + self.expected_ty + } + + /// Returns the current "merged type", representing our best-guess + /// at the LUB of the expressions we've seen so far (if any). This + /// isn't *final* until you call `self.complete()`, which will return + /// the merged type. + pub fn merged_ty(&self) -> Ty<'tcx> { + self.final_ty.unwrap_or(self.expected_ty) + } + + /// Indicates that the value generated by `expression`, which is + /// of type `expression_ty`, is one of the possibilities that we + /// could coerce from. This will record `expression`, and later + /// calls to `coerce` may come back and add adjustments and things + /// if necessary. + pub fn coerce<'a>( + &mut self, + fcx: &FnCtxt<'a, 'tcx>, + cause: &ObligationCause<'tcx>, + expression: &'tcx hir::Expr<'tcx>, + expression_ty: Ty<'tcx>, + ) { + self.coerce_inner(fcx, cause, Some(expression), expression_ty, None, false) + } + + /// Indicates that one of the inputs is a "forced unit". This + /// occurs in a case like `if foo { ... };`, where the missing else + /// generates a "forced unit". Another example is a `loop { break; + /// }`, where the `break` has no argument expression. We treat + /// these cases slightly differently for error-reporting + /// purposes. Note that these tend to correspond to cases where + /// the `()` expression is implicit in the source, and hence we do + /// not take an expression argument. + /// + /// The `augment_error` gives you a chance to extend the error + /// message, in case any results (e.g., we use this to suggest + /// removing a `;`). + pub fn coerce_forced_unit<'a>( + &mut self, + fcx: &FnCtxt<'a, 'tcx>, + cause: &ObligationCause<'tcx>, + augment_error: &mut dyn FnMut(&mut Diagnostic), + label_unit_as_expected: bool, + ) { + self.coerce_inner( + fcx, + cause, + None, + fcx.tcx.mk_unit(), + Some(augment_error), + label_unit_as_expected, + ) + } + + /// The inner coercion "engine". If `expression` is `None`, this + /// is a forced-unit case, and hence `expression_ty` must be + /// `Nil`. + #[instrument(skip(self, fcx, augment_error, label_expression_as_expected), level = "debug")] + pub(crate) fn coerce_inner<'a>( + &mut self, + fcx: &FnCtxt<'a, 'tcx>, + cause: &ObligationCause<'tcx>, + expression: Option<&'tcx hir::Expr<'tcx>>, + mut expression_ty: Ty<'tcx>, + augment_error: Option<&mut dyn FnMut(&mut Diagnostic)>, + label_expression_as_expected: bool, + ) { + // Incorporate whatever type inference information we have + // until now; in principle we might also want to process + // pending obligations, but doing so should only improve + // compatibility (hopefully that is true) by helping us + // uncover never types better. + if expression_ty.is_ty_var() { + expression_ty = fcx.infcx.shallow_resolve(expression_ty); + } + + // If we see any error types, just propagate that error + // upwards. + if expression_ty.references_error() || self.merged_ty().references_error() { + self.final_ty = Some(fcx.tcx.ty_error()); + return; + } + + // Handle the actual type unification etc. + let result = if let Some(expression) = expression { + if self.pushed == 0 { + // Special-case the first expression we are coercing. + // To be honest, I'm not entirely sure why we do this. + // We don't allow two-phase borrows, see comment in try_find_coercion_lub for why + fcx.try_coerce( + expression, + expression_ty, + self.expected_ty, + AllowTwoPhase::No, + Some(cause.clone()), + ) + } else { + match self.expressions { + Expressions::Dynamic(ref exprs) => fcx.try_find_coercion_lub( + cause, + exprs, + self.merged_ty(), + expression, + expression_ty, + ), + Expressions::UpFront(ref coercion_sites) => fcx.try_find_coercion_lub( + cause, + &coercion_sites[0..self.pushed], + self.merged_ty(), + expression, + expression_ty, + ), + } + } + } else { + // this is a hack for cases where we default to `()` because + // the expression etc has been omitted from the source. An + // example is an `if let` without an else: + // + // if let Some(x) = ... { } + // + // we wind up with a second match arm that is like `_ => + // ()`. That is the case we are considering here. We take + // a different path to get the right "expected, found" + // message and so forth (and because we know that + // `expression_ty` will be unit). + // + // Another example is `break` with no argument expression. + assert!(expression_ty.is_unit(), "if let hack without unit type"); + fcx.at(cause, fcx.param_env) + .eq_exp(label_expression_as_expected, expression_ty, self.merged_ty()) + .map(|infer_ok| { + fcx.register_infer_ok_obligations(infer_ok); + expression_ty + }) + }; + + debug!(?result); + match result { + Ok(v) => { + self.final_ty = Some(v); + if let Some(e) = expression { + match self.expressions { + Expressions::Dynamic(ref mut buffer) => buffer.push(e), + Expressions::UpFront(coercion_sites) => { + // if the user gave us an array to validate, check that we got + // the next expression in the list, as expected + assert_eq!( + coercion_sites[self.pushed].as_coercion_site().hir_id, + e.hir_id + ); + } + } + self.pushed += 1; + } + } + Err(coercion_error) => { + let (expected, found) = if label_expression_as_expected { + // In the case where this is a "forced unit", like + // `break`, we want to call the `()` "expected" + // since it is implied by the syntax. + // (Note: not all force-units work this way.)" + (expression_ty, self.final_ty.unwrap_or(self.expected_ty)) + } else { + // Otherwise, the "expected" type for error + // reporting is the current unification type, + // which is basically the LUB of the expressions + // we've seen so far (combined with the expected + // type) + (self.final_ty.unwrap_or(self.expected_ty), expression_ty) + }; + + let mut err; + let mut unsized_return = false; + match *cause.code() { + ObligationCauseCode::ReturnNoExpression => { + err = struct_span_err!( + fcx.tcx.sess, + cause.span, + E0069, + "`return;` in a function whose return type is not `()`" + ); + err.span_label(cause.span, "return type is not `()`"); + } + ObligationCauseCode::BlockTailExpression(blk_id) => { + let parent_id = fcx.tcx.hir().get_parent_node(blk_id); + err = self.report_return_mismatched_types( + cause, + expected, + found, + coercion_error.clone(), + fcx, + parent_id, + expression, + Some(blk_id), + ); + if !fcx.tcx.features().unsized_locals { + unsized_return = self.is_return_ty_unsized(fcx, blk_id); + } + } + ObligationCauseCode::ReturnValue(id) => { + err = self.report_return_mismatched_types( + cause, + expected, + found, + coercion_error.clone(), + fcx, + id, + expression, + None, + ); + if !fcx.tcx.features().unsized_locals { + let id = fcx.tcx.hir().get_parent_node(id); + unsized_return = self.is_return_ty_unsized(fcx, id); + } + } + _ => { + err = fcx.report_mismatched_types( + cause, + expected, + found, + coercion_error.clone(), + ); + } + } + + if let Some(augment_error) = augment_error { + augment_error(&mut err); + } + + let is_insufficiently_polymorphic = + matches!(coercion_error, TypeError::RegionsInsufficientlyPolymorphic(..)); + + if !is_insufficiently_polymorphic && let Some(expr) = expression { + fcx.emit_coerce_suggestions( + &mut err, + expr, + found, + expected, + None, + Some(coercion_error), + ); + } + + err.emit_unless(unsized_return); + + self.final_ty = Some(fcx.tcx.ty_error()); + } + } + } + + fn report_return_mismatched_types<'a>( + &self, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ty_err: TypeError<'tcx>, + fcx: &FnCtxt<'a, 'tcx>, + id: hir::HirId, + expression: Option<&'tcx hir::Expr<'tcx>>, + blk_id: Option, + ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { + let mut err = fcx.report_mismatched_types(cause, expected, found, ty_err); + + let mut pointing_at_return_type = false; + let mut fn_output = None; + + let parent_id = fcx.tcx.hir().get_parent_node(id); + let parent = fcx.tcx.hir().get(parent_id); + if let Some(expr) = expression + && let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(&hir::Closure { body, .. }), .. }) = parent + && !matches!(fcx.tcx.hir().body(body).value.kind, hir::ExprKind::Block(..)) + { + fcx.suggest_missing_semicolon(&mut err, expr, expected, true); + } + // Verify that this is a tail expression of a function, otherwise the + // label pointing out the cause for the type coercion will be wrong + // as prior return coercions would not be relevant (#57664). + let fn_decl = if let (Some(expr), Some(blk_id)) = (expression, blk_id) { + pointing_at_return_type = + fcx.suggest_mismatched_types_on_tail(&mut err, expr, expected, found, blk_id); + if let (Some(cond_expr), true, false) = ( + fcx.tcx.hir().get_if_cause(expr.hir_id), + expected.is_unit(), + pointing_at_return_type, + ) + // If the block is from an external macro or try (`?`) desugaring, then + // do not suggest adding a semicolon, because there's nowhere to put it. + // See issues #81943 and #87051. + && matches!( + cond_expr.span.desugaring_kind(), + None | Some(DesugaringKind::WhileLoop) + ) && !in_external_macro(fcx.tcx.sess, cond_expr.span) + && !matches!( + cond_expr.kind, + hir::ExprKind::Match(.., hir::MatchSource::TryDesugar) + ) + { + err.span_label(cond_expr.span, "expected this to be `()`"); + if expr.can_have_side_effects() { + fcx.suggest_semicolon_at_end(cond_expr.span, &mut err); + } + } + fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main)) + } else { + fcx.get_fn_decl(parent_id) + }; + + if let Some((fn_decl, can_suggest)) = fn_decl { + if blk_id.is_none() { + pointing_at_return_type |= fcx.suggest_missing_return_type( + &mut err, + &fn_decl, + expected, + found, + can_suggest, + fcx.tcx.hir().local_def_id_to_hir_id(fcx.tcx.hir().get_parent_item(id)), + ); + } + if !pointing_at_return_type { + fn_output = Some(&fn_decl.output); // `impl Trait` return type + } + } + + let parent_id = fcx.tcx.hir().get_parent_item(id); + let parent_item = fcx.tcx.hir().get_by_def_id(parent_id); + + if let (Some(expr), Some(_), Some((fn_decl, _, _))) = + (expression, blk_id, fcx.get_node_fn_decl(parent_item)) + { + fcx.suggest_missing_break_or_return_expr( + &mut err, + expr, + fn_decl, + expected, + found, + id, + fcx.tcx.hir().local_def_id_to_hir_id(parent_id), + ); + } + + if let (Some(sp), Some(fn_output)) = (fcx.ret_coercion_span.get(), fn_output) { + self.add_impl_trait_explanation(&mut err, cause, fcx, expected, sp, fn_output); + } + err + } + + fn add_impl_trait_explanation<'a>( + &self, + err: &mut Diagnostic, + cause: &ObligationCause<'tcx>, + fcx: &FnCtxt<'a, 'tcx>, + expected: Ty<'tcx>, + sp: Span, + fn_output: &hir::FnRetTy<'_>, + ) { + let return_sp = fn_output.span(); + err.span_label(return_sp, "expected because this return type..."); + err.span_label( + sp, + format!("...is found to be `{}` here", fcx.resolve_vars_with_obligations(expected)), + ); + let impl_trait_msg = "for information on `impl Trait`, see \ + "; + let trait_obj_msg = "for information on trait objects, see \ + "; + err.note("to return `impl Trait`, all returned values must be of the same type"); + err.note(impl_trait_msg); + let snippet = fcx + .tcx + .sess + .source_map() + .span_to_snippet(return_sp) + .unwrap_or_else(|_| "dyn Trait".to_string()); + let mut snippet_iter = snippet.split_whitespace(); + let has_impl = snippet_iter.next().map_or(false, |s| s == "impl"); + // Only suggest `Box` if `Trait` in `impl Trait` is object safe. + let mut is_object_safe = false; + if let hir::FnRetTy::Return(ty) = fn_output + // Get the return type. + && let hir::TyKind::OpaqueDef(..) = ty.kind + { + let ty = >::ast_ty_to_ty(fcx, ty); + // Get the `impl Trait`'s `DefId`. + if let ty::Opaque(def_id, _) = ty.kind() + // Get the `impl Trait`'s `Item` so that we can get its trait bounds and + // get the `Trait`'s `DefId`. + && let hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }) = + fcx.tcx.hir().expect_item(def_id.expect_local()).kind + { + // Are of this `impl Trait`'s traits object safe? + is_object_safe = bounds.iter().all(|bound| { + bound + .trait_ref() + .and_then(|t| t.trait_def_id()) + .map_or(false, |def_id| { + fcx.tcx.object_safety_violations(def_id).is_empty() + }) + }) + } + }; + if has_impl { + if is_object_safe { + err.multipart_suggestion( + "you could change the return type to be a boxed trait object", + vec![ + (return_sp.with_hi(return_sp.lo() + BytePos(4)), "Box".to_string()), + ], + Applicability::MachineApplicable, + ); + let sugg = [sp, cause.span] + .into_iter() + .flat_map(|sp| { + [ + (sp.shrink_to_lo(), "Box::new(".to_string()), + (sp.shrink_to_hi(), ")".to_string()), + ] + .into_iter() + }) + .collect::>(); + err.multipart_suggestion( + "if you change the return type to expect trait objects, box the returned \ + expressions", + sugg, + Applicability::MaybeIncorrect, + ); + } else { + err.help(&format!( + "if the trait `{}` were object safe, you could return a boxed trait object", + &snippet[5..] + )); + } + err.note(trait_obj_msg); + } + err.help("you could instead create a new `enum` with a variant for each returned type"); + } + + fn is_return_ty_unsized<'a>(&self, fcx: &FnCtxt<'a, 'tcx>, blk_id: hir::HirId) -> bool { + if let Some((fn_decl, _)) = fcx.get_fn_decl(blk_id) + && let hir::FnRetTy::Return(ty) = fn_decl.output + && let ty = >::ast_ty_to_ty(fcx, ty) + && let ty::Dynamic(..) = ty.kind() + { + return true; + } + false + } + + pub fn complete<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Ty<'tcx> { + if let Some(final_ty) = self.final_ty { + final_ty + } else { + // If we only had inputs that were of type `!` (or no + // inputs at all), then the final type is `!`. + assert_eq!(self.pushed, 0); + fcx.tcx.types.never + } + } +} + +/// Something that can be converted into an expression to which we can +/// apply a coercion. +pub trait AsCoercionSite { + fn as_coercion_site(&self) -> &hir::Expr<'_>; +} + +impl AsCoercionSite for hir::Expr<'_> { + fn as_coercion_site(&self) -> &hir::Expr<'_> { + self + } +} + +impl<'a, T> AsCoercionSite for &'a T +where + T: AsCoercionSite, +{ + fn as_coercion_site(&self) -> &hir::Expr<'_> { + (**self).as_coercion_site() + } +} + +impl AsCoercionSite for ! { + fn as_coercion_site(&self) -> &hir::Expr<'_> { + unreachable!() + } +} + +impl AsCoercionSite for hir::Arm<'_> { + fn as_coercion_site(&self) -> &hir::Expr<'_> { + &self.body + } +} diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs new file mode 100644 index 000000000..666498403 --- /dev/null +++ b/compiler/rustc_typeck/src/check/compare_method.rs @@ -0,0 +1,1547 @@ +use super::potentially_plural_count; +use crate::check::regionck::OutlivesEnvironmentExt; +use crate::check::wfcheck; +use crate::errors::LifetimesOrBoundsMismatchOnTrait; +use rustc_data_structures::fx::FxHashSet; +use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticId, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::def::{DefKind, Res}; +use rustc_hir::intravisit; +use rustc_hir::{GenericParamKind, ImplItemKind, TraitItemKind}; +use rustc_infer::infer::outlives::env::OutlivesEnvironment; +use rustc_infer::infer::{self, TyCtxtInferExt}; +use rustc_infer::traits::util; +use rustc_middle::ty::error::{ExpectedFound, TypeError}; +use rustc_middle::ty::subst::{InternalSubsts, Subst}; +use rustc_middle::ty::util::ExplicitSelf; +use rustc_middle::ty::{self, DefIdTree}; +use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt}; +use rustc_span::Span; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt; +use rustc_trait_selection::traits::{ + self, ObligationCause, ObligationCauseCode, ObligationCtxt, Reveal, +}; +use std::iter; + +/// Checks that a method from an impl conforms to the signature of +/// the same method as declared in the trait. +/// +/// # Parameters +/// +/// - `impl_m`: type of the method we are checking +/// - `impl_m_span`: span to use for reporting errors +/// - `trait_m`: the method in the trait +/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation +pub(crate) fn compare_impl_method<'tcx>( + tcx: TyCtxt<'tcx>, + impl_m: &ty::AssocItem, + trait_m: &ty::AssocItem, + impl_trait_ref: ty::TraitRef<'tcx>, + trait_item_span: Option, +) { + debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref); + + let impl_m_span = tcx.def_span(impl_m.def_id); + + if let Err(_) = compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) { + return; + } + + if let Err(_) = compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span) { + return; + } + + if let Err(_) = compare_generic_param_kinds(tcx, impl_m, trait_m) { + return; + } + + if let Err(_) = + compare_number_of_method_arguments(tcx, impl_m, impl_m_span, trait_m, trait_item_span) + { + return; + } + + if let Err(_) = compare_synthetic_generics(tcx, impl_m, trait_m) { + return; + } + + if let Err(_) = compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) + { + return; + } +} + +fn compare_predicate_entailment<'tcx>( + tcx: TyCtxt<'tcx>, + impl_m: &ty::AssocItem, + impl_m_span: Span, + trait_m: &ty::AssocItem, + impl_trait_ref: ty::TraitRef<'tcx>, +) -> Result<(), ErrorGuaranteed> { + let trait_to_impl_substs = impl_trait_ref.substs; + + // This node-id should be used for the `body_id` field on each + // `ObligationCause` (and the `FnCtxt`). + // + // FIXME(@lcnr): remove that after removing `cause.body_id` from + // obligations. + let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local()); + // We sometimes modify the span further down. + let mut cause = ObligationCause::new( + impl_m_span, + impl_m_hir_id, + ObligationCauseCode::CompareImplItemObligation { + impl_item_def_id: impl_m.def_id.expect_local(), + trait_item_def_id: trait_m.def_id, + kind: impl_m.kind, + }, + ); + + // This code is best explained by example. Consider a trait: + // + // trait Trait<'t, T> { + // fn method<'a, M>(t: &'t T, m: &'a M) -> Self; + // } + // + // And an impl: + // + // impl<'i, 'j, U> Trait<'j, &'i U> for Foo { + // fn method<'b, N>(t: &'j &'i U, m: &'b N) -> Foo; + // } + // + // We wish to decide if those two method types are compatible. + // + // We start out with trait_to_impl_substs, that maps the trait + // type parameters to impl type parameters. This is taken from the + // impl trait reference: + // + // trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo} + // + // We create a mapping `dummy_substs` that maps from the impl type + // parameters to fresh types and regions. For type parameters, + // this is the identity transform, but we could as well use any + // placeholder types. For regions, we convert from bound to free + // regions (Note: but only early-bound regions, i.e., those + // declared on the impl or used in type parameter bounds). + // + // impl_to_placeholder_substs = {'i => 'i0, U => U0, N => N0 } + // + // Now we can apply placeholder_substs to the type of the impl method + // to yield a new function type in terms of our fresh, placeholder + // types: + // + // <'b> fn(t: &'i0 U0, m: &'b) -> Foo + // + // We now want to extract and substitute the type of the *trait* + // method and compare it. To do so, we must create a compound + // substitution by combining trait_to_impl_substs and + // impl_to_placeholder_substs, and also adding a mapping for the method + // type parameters. We extend the mapping to also include + // the method parameters. + // + // trait_to_placeholder_substs = { T => &'i0 U0, Self => Foo, M => N0 } + // + // Applying this to the trait method type yields: + // + // <'a> fn(t: &'i0 U0, m: &'a) -> Foo + // + // This type is also the same but the name of the bound region ('a + // vs 'b). However, the normal subtyping rules on fn types handle + // this kind of equivalency just fine. + // + // We now use these substitutions to ensure that all declared bounds are + // satisfied by the implementation's method. + // + // We do this by creating a parameter environment which contains a + // substitution corresponding to impl_to_placeholder_substs. We then build + // trait_to_placeholder_substs and use it to convert the predicates contained + // in the trait_m.generics to the placeholder form. + // + // Finally we register each of these predicates as an obligation in + // a fresh FulfillmentCtxt, and invoke select_all_or_error. + + // Create mapping from impl to placeholder. + let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id); + + // Create mapping from trait to placeholder. + let trait_to_placeholder_substs = + impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs); + debug!("compare_impl_method: trait_to_placeholder_substs={:?}", trait_to_placeholder_substs); + + let impl_m_generics = tcx.generics_of(impl_m.def_id); + let trait_m_generics = tcx.generics_of(trait_m.def_id); + let impl_m_predicates = tcx.predicates_of(impl_m.def_id); + let trait_m_predicates = tcx.predicates_of(trait_m.def_id); + + // Check region bounds. + check_region_bounds_on_impl_item(tcx, impl_m, trait_m, &trait_m_generics, &impl_m_generics)?; + + // Create obligations for each predicate declared by the impl + // definition in the context of the trait's parameter + // environment. We can't just use `impl_env.caller_bounds`, + // however, because we want to replace all late-bound regions with + // region variables. + let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap()); + let mut hybrid_preds = impl_predicates.instantiate_identity(tcx); + + debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds); + + // This is the only tricky bit of the new way we check implementation methods + // We need to build a set of predicates where only the method-level bounds + // are from the trait and we assume all other bounds from the implementation + // to be previously satisfied. + // + // We then register the obligations from the impl_m and check to see + // if all constraints hold. + hybrid_preds + .predicates + .extend(trait_m_predicates.instantiate_own(tcx, trait_to_placeholder_substs).predicates); + + // Construct trait parameter environment and then shift it into the placeholder viewpoint. + // The key step here is to update the caller_bounds's predicates to be + // the new hybrid bounds we computed. + let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_hir_id); + let param_env = ty::ParamEnv::new( + tcx.intern_predicates(&hybrid_preds.predicates), + Reveal::UserFacing, + hir::Constness::NotConst, + ); + let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause); + + tcx.infer_ctxt().enter(|ref infcx| { + let ocx = ObligationCtxt::new(infcx); + + debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds()); + + let mut selcx = traits::SelectionContext::new(&infcx); + let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_substs); + for (predicate, span) in iter::zip(impl_m_own_bounds.predicates, impl_m_own_bounds.spans) { + let normalize_cause = traits::ObligationCause::misc(span, impl_m_hir_id); + let traits::Normalized { value: predicate, obligations } = + traits::normalize(&mut selcx, param_env, normalize_cause, predicate); + + ocx.register_obligations(obligations); + let cause = ObligationCause::new( + span, + impl_m_hir_id, + ObligationCauseCode::CompareImplItemObligation { + impl_item_def_id: impl_m.def_id.expect_local(), + trait_item_def_id: trait_m.def_id, + kind: impl_m.kind, + }, + ); + ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate)); + } + + // We now need to check that the signature of the impl method is + // compatible with that of the trait method. We do this by + // checking that `impl_fty <: trait_fty`. + // + // FIXME. Unfortunately, this doesn't quite work right now because + // associated type normalization is not integrated into subtype + // checks. For the comparison to be valid, we need to + // normalize the associated types in the impl/trait methods + // first. However, because function types bind regions, just + // calling `normalize_associated_types_in` would have no effect on + // any associated types appearing in the fn arguments or return + // type. + + // Compute placeholder form of impl and trait method tys. + let tcx = infcx.tcx; + + let mut wf_tys = FxHashSet::default(); + + let impl_sig = infcx.replace_bound_vars_with_fresh_vars( + impl_m_span, + infer::HigherRankedType, + tcx.fn_sig(impl_m.def_id), + ); + + let norm_cause = ObligationCause::misc(impl_m_span, impl_m_hir_id); + let impl_sig = ocx.normalize(norm_cause.clone(), param_env, impl_sig); + let impl_fty = tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig)); + debug!("compare_impl_method: impl_fty={:?}", impl_fty); + + let trait_sig = tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs); + let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig); + let trait_sig = ocx.normalize(norm_cause, param_env, trait_sig); + // Add the resulting inputs and output as well-formed. + wf_tys.extend(trait_sig.inputs_and_output.iter()); + let trait_fty = tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig)); + + debug!("compare_impl_method: trait_fty={:?}", trait_fty); + + // FIXME: We'd want to keep more accurate spans than "the method signature" when + // processing the comparison between the trait and impl fn, but we sadly lose them + // and point at the whole signature when a trait bound or specific input or output + // type would be more appropriate. In other places we have a `Vec` + // corresponding to their `Vec`, but we don't have that here. + // Fixing this would improve the output of test `issue-83765.rs`. + let sub_result = infcx + .at(&cause, param_env) + .sup(trait_fty, impl_fty) + .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok)); + + if let Err(terr) = sub_result { + debug!("sub_types failed: impl ty {:?}, trait ty {:?}", impl_fty, trait_fty); + + let (impl_err_span, trait_err_span) = + extract_spans_for_error_reporting(&infcx, &terr, &cause, impl_m, trait_m); + + cause.span = impl_err_span; + + let mut diag = struct_span_err!( + tcx.sess, + cause.span(), + E0053, + "method `{}` has an incompatible type for trait", + trait_m.name + ); + match &terr { + TypeError::ArgumentMutability(0) | TypeError::ArgumentSorts(_, 0) + if trait_m.fn_has_self_parameter => + { + let ty = trait_sig.inputs()[0]; + let sugg = match ExplicitSelf::determine(ty, |_| ty == impl_trait_ref.self_ty()) + { + ExplicitSelf::ByValue => "self".to_owned(), + ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(), + ExplicitSelf::ByReference(_, hir::Mutability::Mut) => { + "&mut self".to_owned() + } + _ => format!("self: {ty}"), + }; + + // When the `impl` receiver is an arbitrary self type, like `self: Box`, the + // span points only at the type `Box, but we want to cover the whole + // argument pattern and type. + let span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind { + ImplItemKind::Fn(ref sig, body) => tcx + .hir() + .body_param_names(body) + .zip(sig.decl.inputs.iter()) + .map(|(param, ty)| param.span.to(ty.span)) + .next() + .unwrap_or(impl_err_span), + _ => bug!("{:?} is not a method", impl_m), + }; + + diag.span_suggestion( + span, + "change the self-receiver type to match the trait", + sugg, + Applicability::MachineApplicable, + ); + } + TypeError::ArgumentMutability(i) | TypeError::ArgumentSorts(_, i) => { + if trait_sig.inputs().len() == *i { + // Suggestion to change output type. We do not suggest in `async` functions + // to avoid complex logic or incorrect output. + match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind { + ImplItemKind::Fn(ref sig, _) + if sig.header.asyncness == hir::IsAsync::NotAsync => + { + let msg = "change the output type to match the trait"; + let ap = Applicability::MachineApplicable; + match sig.decl.output { + hir::FnRetTy::DefaultReturn(sp) => { + let sugg = format!("-> {} ", trait_sig.output()); + diag.span_suggestion_verbose(sp, msg, sugg, ap); + } + hir::FnRetTy::Return(hir_ty) => { + let sugg = trait_sig.output(); + diag.span_suggestion(hir_ty.span, msg, sugg, ap); + } + }; + } + _ => {} + }; + } else if let Some(trait_ty) = trait_sig.inputs().get(*i) { + diag.span_suggestion( + impl_err_span, + "change the parameter type to match the trait", + trait_ty, + Applicability::MachineApplicable, + ); + } + } + _ => {} + } + + infcx.note_type_err( + &mut diag, + &cause, + trait_err_span.map(|sp| (sp, "type in trait".to_owned())), + Some(infer::ValuePairs::Terms(ExpectedFound { + expected: trait_fty.into(), + found: impl_fty.into(), + })), + &terr, + false, + false, + ); + + return Err(diag.emit()); + } + + // Check that all obligations are satisfied by the implementation's + // version. + let errors = ocx.select_all_or_error(); + if !errors.is_empty() { + let reported = infcx.report_fulfillment_errors(&errors, None, false); + return Err(reported); + } + + // Finally, resolve all regions. This catches wily misuses of + // lifetime parameters. + let mut outlives_environment = OutlivesEnvironment::new(param_env); + outlives_environment.add_implied_bounds(infcx, wf_tys, impl_m_hir_id); + infcx.check_region_obligations_and_report_errors( + impl_m.def_id.expect_local(), + &outlives_environment, + ); + + Ok(()) + }) +} + +fn check_region_bounds_on_impl_item<'tcx>( + tcx: TyCtxt<'tcx>, + impl_m: &ty::AssocItem, + trait_m: &ty::AssocItem, + trait_generics: &ty::Generics, + impl_generics: &ty::Generics, +) -> Result<(), ErrorGuaranteed> { + let trait_params = trait_generics.own_counts().lifetimes; + let impl_params = impl_generics.own_counts().lifetimes; + + debug!( + "check_region_bounds_on_impl_item: \ + trait_generics={:?} \ + impl_generics={:?}", + trait_generics, impl_generics + ); + + // Must have same number of early-bound lifetime parameters. + // Unfortunately, if the user screws up the bounds, then this + // will change classification between early and late. E.g., + // if in trait we have `<'a,'b:'a>`, and in impl we just have + // `<'a,'b>`, then we have 2 early-bound lifetime parameters + // in trait but 0 in the impl. But if we report "expected 2 + // but found 0" it's confusing, because it looks like there + // are zero. Since I don't quite know how to phrase things at + // the moment, give a kind of vague error message. + if trait_params != impl_params { + let span = tcx + .hir() + .get_generics(impl_m.def_id.expect_local()) + .expect("expected impl item to have generics or else we can't compare them") + .span; + let generics_span = if let Some(local_def_id) = trait_m.def_id.as_local() { + Some( + tcx.hir() + .get_generics(local_def_id) + .expect("expected trait item to have generics or else we can't compare them") + .span, + ) + } else { + None + }; + + let reported = tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait { + span, + item_kind: assoc_item_kind_str(impl_m), + ident: impl_m.ident(tcx), + generics_span, + }); + return Err(reported); + } + + Ok(()) +} + +#[instrument(level = "debug", skip(infcx))] +fn extract_spans_for_error_reporting<'a, 'tcx>( + infcx: &infer::InferCtxt<'a, 'tcx>, + terr: &TypeError<'_>, + cause: &ObligationCause<'tcx>, + impl_m: &ty::AssocItem, + trait_m: &ty::AssocItem, +) -> (Span, Option) { + let tcx = infcx.tcx; + let mut impl_args = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind { + ImplItemKind::Fn(ref sig, _) => { + sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span())) + } + _ => bug!("{:?} is not a method", impl_m), + }; + let trait_args = + trait_m.def_id.as_local().map(|def_id| match tcx.hir().expect_trait_item(def_id).kind { + TraitItemKind::Fn(ref sig, _) => { + sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span())) + } + _ => bug!("{:?} is not a TraitItemKind::Fn", trait_m), + }); + + match *terr { + TypeError::ArgumentMutability(i) => { + (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i))) + } + TypeError::ArgumentSorts(ExpectedFound { .. }, i) => { + (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i))) + } + _ => (cause.span(), tcx.hir().span_if_local(trait_m.def_id)), + } +} + +fn compare_self_type<'tcx>( + tcx: TyCtxt<'tcx>, + impl_m: &ty::AssocItem, + impl_m_span: Span, + trait_m: &ty::AssocItem, + impl_trait_ref: ty::TraitRef<'tcx>, +) -> Result<(), ErrorGuaranteed> { + // Try to give more informative error messages about self typing + // mismatches. Note that any mismatch will also be detected + // below, where we construct a canonical function type that + // includes the self parameter as a normal parameter. It's just + // that the error messages you get out of this code are a bit more + // inscrutable, particularly for cases where one method has no + // self. + + let self_string = |method: &ty::AssocItem| { + let untransformed_self_ty = match method.container { + ty::ImplContainer => impl_trait_ref.self_ty(), + ty::TraitContainer => tcx.types.self_param, + }; + let self_arg_ty = tcx.fn_sig(method.def_id).input(0); + let param_env = ty::ParamEnv::reveal_all(); + + tcx.infer_ctxt().enter(|infcx| { + let self_arg_ty = tcx.liberate_late_bound_regions(method.def_id, self_arg_ty); + let can_eq_self = |ty| infcx.can_eq(param_env, untransformed_self_ty, ty).is_ok(); + match ExplicitSelf::determine(self_arg_ty, can_eq_self) { + ExplicitSelf::ByValue => "self".to_owned(), + ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(), + ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(), + _ => format!("self: {self_arg_ty}"), + } + }) + }; + + match (trait_m.fn_has_self_parameter, impl_m.fn_has_self_parameter) { + (false, false) | (true, true) => {} + + (false, true) => { + let self_descr = self_string(impl_m); + let mut err = struct_span_err!( + tcx.sess, + impl_m_span, + E0185, + "method `{}` has a `{}` declaration in the impl, but not in the trait", + trait_m.name, + self_descr + ); + err.span_label(impl_m_span, format!("`{self_descr}` used in impl")); + if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) { + err.span_label(span, format!("trait method declared without `{self_descr}`")); + } else { + err.note_trait_signature(trait_m.name, trait_m.signature(tcx)); + } + let reported = err.emit(); + return Err(reported); + } + + (true, false) => { + let self_descr = self_string(trait_m); + let mut err = struct_span_err!( + tcx.sess, + impl_m_span, + E0186, + "method `{}` has a `{}` declaration in the trait, but not in the impl", + trait_m.name, + self_descr + ); + err.span_label(impl_m_span, format!("expected `{self_descr}` in impl")); + if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) { + err.span_label(span, format!("`{self_descr}` used in trait")); + } else { + err.note_trait_signature(trait_m.name, trait_m.signature(tcx)); + } + let reported = err.emit(); + return Err(reported); + } + } + + Ok(()) +} + +/// Checks that the number of generics on a given assoc item in a trait impl is the same +/// as the number of generics on the respective assoc item in the trait definition. +/// +/// For example this code emits the errors in the following code: +/// ``` +/// trait Trait { +/// fn foo(); +/// type Assoc; +/// } +/// +/// impl Trait for () { +/// fn foo() {} +/// //~^ error +/// type Assoc = u32; +/// //~^ error +/// } +/// ``` +/// +/// Notably this does not error on `foo` implemented as `foo` or +/// `foo` implemented as `foo`. This is handled in +/// [`compare_generic_param_kinds`]. This function also does not handle lifetime parameters +fn compare_number_of_generics<'tcx>( + tcx: TyCtxt<'tcx>, + impl_: &ty::AssocItem, + _impl_span: Span, + trait_: &ty::AssocItem, + trait_span: Option, +) -> Result<(), ErrorGuaranteed> { + let trait_own_counts = tcx.generics_of(trait_.def_id).own_counts(); + let impl_own_counts = tcx.generics_of(impl_.def_id).own_counts(); + + // This avoids us erroring on `foo` implemented as `foo` as this is implemented + // in `compare_generic_param_kinds` which will give a nicer error message than something like: + // "expected 1 type parameter, found 0 type parameters" + if (trait_own_counts.types + trait_own_counts.consts) + == (impl_own_counts.types + impl_own_counts.consts) + { + return Ok(()); + } + + let matchings = [ + ("type", trait_own_counts.types, impl_own_counts.types), + ("const", trait_own_counts.consts, impl_own_counts.consts), + ]; + + let item_kind = assoc_item_kind_str(impl_); + + let mut err_occurred = None; + for (kind, trait_count, impl_count) in matchings { + if impl_count != trait_count { + let arg_spans = |kind: ty::AssocKind, generics: &hir::Generics<'_>| { + let mut spans = generics + .params + .iter() + .filter(|p| match p.kind { + hir::GenericParamKind::Lifetime { + kind: hir::LifetimeParamKind::Elided, + } => { + // A fn can have an arbitrary number of extra elided lifetimes for the + // same signature. + !matches!(kind, ty::AssocKind::Fn) + } + _ => true, + }) + .map(|p| p.span) + .collect::>(); + if spans.is_empty() { + spans = vec![generics.span] + } + spans + }; + let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() { + let trait_item = tcx.hir().expect_trait_item(def_id); + let arg_spans: Vec = arg_spans(trait_.kind, trait_item.generics); + let impl_trait_spans: Vec = trait_item + .generics + .params + .iter() + .filter_map(|p| match p.kind { + GenericParamKind::Type { synthetic: true, .. } => Some(p.span), + _ => None, + }) + .collect(); + (Some(arg_spans), impl_trait_spans) + } else { + (trait_span.map(|s| vec![s]), vec![]) + }; + + let impl_item = tcx.hir().expect_impl_item(impl_.def_id.expect_local()); + let impl_item_impl_trait_spans: Vec = impl_item + .generics + .params + .iter() + .filter_map(|p| match p.kind { + GenericParamKind::Type { synthetic: true, .. } => Some(p.span), + _ => None, + }) + .collect(); + let spans = arg_spans(impl_.kind, impl_item.generics); + let span = spans.first().copied(); + + let mut err = tcx.sess.struct_span_err_with_code( + spans, + &format!( + "{} `{}` has {} {kind} parameter{} but its trait \ + declaration has {} {kind} parameter{}", + item_kind, + trait_.name, + impl_count, + pluralize!(impl_count), + trait_count, + pluralize!(trait_count), + kind = kind, + ), + DiagnosticId::Error("E0049".into()), + ); + + let mut suffix = None; + + if let Some(spans) = trait_spans { + let mut spans = spans.iter(); + if let Some(span) = spans.next() { + err.span_label( + *span, + format!( + "expected {} {} parameter{}", + trait_count, + kind, + pluralize!(trait_count), + ), + ); + } + for span in spans { + err.span_label(*span, ""); + } + } else { + suffix = Some(format!(", expected {trait_count}")); + } + + if let Some(span) = span { + err.span_label( + span, + format!( + "found {} {} parameter{}{}", + impl_count, + kind, + pluralize!(impl_count), + suffix.unwrap_or_else(String::new), + ), + ); + } + + for span in impl_trait_spans.iter().chain(impl_item_impl_trait_spans.iter()) { + err.span_label(*span, "`impl Trait` introduces an implicit type parameter"); + } + + let reported = err.emit(); + err_occurred = Some(reported); + } + } + + if let Some(reported) = err_occurred { Err(reported) } else { Ok(()) } +} + +fn compare_number_of_method_arguments<'tcx>( + tcx: TyCtxt<'tcx>, + impl_m: &ty::AssocItem, + impl_m_span: Span, + trait_m: &ty::AssocItem, + trait_item_span: Option, +) -> Result<(), ErrorGuaranteed> { + let impl_m_fty = tcx.fn_sig(impl_m.def_id); + let trait_m_fty = tcx.fn_sig(trait_m.def_id); + let trait_number_args = trait_m_fty.inputs().skip_binder().len(); + let impl_number_args = impl_m_fty.inputs().skip_binder().len(); + if trait_number_args != impl_number_args { + let trait_span = if let Some(def_id) = trait_m.def_id.as_local() { + match tcx.hir().expect_trait_item(def_id).kind { + TraitItemKind::Fn(ref trait_m_sig, _) => { + let pos = if trait_number_args > 0 { trait_number_args - 1 } else { 0 }; + if let Some(arg) = trait_m_sig.decl.inputs.get(pos) { + Some(if pos == 0 { + arg.span + } else { + arg.span.with_lo(trait_m_sig.decl.inputs[0].span.lo()) + }) + } else { + trait_item_span + } + } + _ => bug!("{:?} is not a method", impl_m), + } + } else { + trait_item_span + }; + let impl_span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind { + ImplItemKind::Fn(ref impl_m_sig, _) => { + let pos = if impl_number_args > 0 { impl_number_args - 1 } else { 0 }; + if let Some(arg) = impl_m_sig.decl.inputs.get(pos) { + if pos == 0 { + arg.span + } else { + arg.span.with_lo(impl_m_sig.decl.inputs[0].span.lo()) + } + } else { + impl_m_span + } + } + _ => bug!("{:?} is not a method", impl_m), + }; + let mut err = struct_span_err!( + tcx.sess, + impl_span, + E0050, + "method `{}` has {} but the declaration in trait `{}` has {}", + trait_m.name, + potentially_plural_count(impl_number_args, "parameter"), + tcx.def_path_str(trait_m.def_id), + trait_number_args + ); + if let Some(trait_span) = trait_span { + err.span_label( + trait_span, + format!( + "trait requires {}", + potentially_plural_count(trait_number_args, "parameter") + ), + ); + } else { + err.note_trait_signature(trait_m.name, trait_m.signature(tcx)); + } + err.span_label( + impl_span, + format!( + "expected {}, found {}", + potentially_plural_count(trait_number_args, "parameter"), + impl_number_args + ), + ); + let reported = err.emit(); + return Err(reported); + } + + Ok(()) +} + +fn compare_synthetic_generics<'tcx>( + tcx: TyCtxt<'tcx>, + impl_m: &ty::AssocItem, + trait_m: &ty::AssocItem, +) -> Result<(), ErrorGuaranteed> { + // FIXME(chrisvittal) Clean up this function, list of FIXME items: + // 1. Better messages for the span labels + // 2. Explanation as to what is going on + // If we get here, we already have the same number of generics, so the zip will + // be okay. + let mut error_found = None; + let impl_m_generics = tcx.generics_of(impl_m.def_id); + let trait_m_generics = tcx.generics_of(trait_m.def_id); + let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind { + GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)), + GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None, + }); + let trait_m_type_params = trait_m_generics.params.iter().filter_map(|param| match param.kind { + GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)), + GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None, + }); + for ((impl_def_id, impl_synthetic), (trait_def_id, trait_synthetic)) in + iter::zip(impl_m_type_params, trait_m_type_params) + { + if impl_synthetic != trait_synthetic { + let impl_def_id = impl_def_id.expect_local(); + let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_def_id); + let impl_span = tcx.hir().span(impl_hir_id); + let trait_span = tcx.def_span(trait_def_id); + let mut err = struct_span_err!( + tcx.sess, + impl_span, + E0643, + "method `{}` has incompatible signature for trait", + trait_m.name + ); + err.span_label(trait_span, "declaration in trait here"); + match (impl_synthetic, trait_synthetic) { + // The case where the impl method uses `impl Trait` but the trait method uses + // explicit generics + (true, false) => { + err.span_label(impl_span, "expected generic parameter, found `impl Trait`"); + (|| { + // try taking the name from the trait impl + // FIXME: this is obviously suboptimal since the name can already be used + // as another generic argument + let new_name = tcx.sess.source_map().span_to_snippet(trait_span).ok()?; + let trait_m = trait_m.def_id.as_local()?; + let trait_m = tcx.hir().trait_item(hir::TraitItemId { def_id: trait_m }); + + let impl_m = impl_m.def_id.as_local()?; + let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m }); + + // in case there are no generics, take the spot between the function name + // and the opening paren of the argument list + let new_generics_span = + tcx.sess.source_map().generate_fn_name_span(impl_span)?.shrink_to_hi(); + // in case there are generics, just replace them + let generics_span = + impl_m.generics.span.substitute_dummy(new_generics_span); + // replace with the generics from the trait + let new_generics = + tcx.sess.source_map().span_to_snippet(trait_m.generics.span).ok()?; + + err.multipart_suggestion( + "try changing the `impl Trait` argument to a generic parameter", + vec![ + // replace `impl Trait` with `T` + (impl_span, new_name), + // replace impl method generics with trait method generics + // This isn't quite right, as users might have changed the names + // of the generics, but it works for the common case + (generics_span, new_generics), + ], + Applicability::MaybeIncorrect, + ); + Some(()) + })(); + } + // The case where the trait method uses `impl Trait`, but the impl method uses + // explicit generics. + (false, true) => { + err.span_label(impl_span, "expected `impl Trait`, found generic parameter"); + (|| { + let impl_m = impl_m.def_id.as_local()?; + let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m }); + let input_tys = match impl_m.kind { + hir::ImplItemKind::Fn(ref sig, _) => sig.decl.inputs, + _ => unreachable!(), + }; + struct Visitor(Option, hir::def_id::LocalDefId); + impl<'v> intravisit::Visitor<'v> for Visitor { + fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) { + intravisit::walk_ty(self, ty); + if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) = + ty.kind + && let Res::Def(DefKind::TyParam, def_id) = path.res + && def_id == self.1.to_def_id() + { + self.0 = Some(ty.span); + } + } + } + let mut visitor = Visitor(None, impl_def_id); + for ty in input_tys { + intravisit::Visitor::visit_ty(&mut visitor, ty); + } + let span = visitor.0?; + + let bounds = impl_m.generics.bounds_for_param(impl_def_id).next()?.bounds; + let bounds = bounds.first()?.span().to(bounds.last()?.span()); + let bounds = tcx.sess.source_map().span_to_snippet(bounds).ok()?; + + err.multipart_suggestion( + "try removing the generic parameter and using `impl Trait` instead", + vec![ + // delete generic parameters + (impl_m.generics.span, String::new()), + // replace param usage with `impl Trait` + (span, format!("impl {bounds}")), + ], + Applicability::MaybeIncorrect, + ); + Some(()) + })(); + } + _ => unreachable!(), + } + let reported = err.emit(); + error_found = Some(reported); + } + } + if let Some(reported) = error_found { Err(reported) } else { Ok(()) } +} + +/// Checks that all parameters in the generics of a given assoc item in a trait impl have +/// the same kind as the respective generic parameter in the trait def. +/// +/// For example all 4 errors in the following code are emitted here: +/// ``` +/// trait Foo { +/// fn foo(); +/// type bar; +/// fn baz(); +/// type blah; +/// } +/// +/// impl Foo for () { +/// fn foo() {} +/// //~^ error +/// type bar {} +/// //~^ error +/// fn baz() {} +/// //~^ error +/// type blah = u32; +/// //~^ error +/// } +/// ``` +/// +/// This function does not handle lifetime parameters +fn compare_generic_param_kinds<'tcx>( + tcx: TyCtxt<'tcx>, + impl_item: &ty::AssocItem, + trait_item: &ty::AssocItem, +) -> Result<(), ErrorGuaranteed> { + assert_eq!(impl_item.kind, trait_item.kind); + + let ty_const_params_of = |def_id| { + tcx.generics_of(def_id).params.iter().filter(|param| { + matches!( + param.kind, + GenericParamDefKind::Const { .. } | GenericParamDefKind::Type { .. } + ) + }) + }; + + for (param_impl, param_trait) in + iter::zip(ty_const_params_of(impl_item.def_id), ty_const_params_of(trait_item.def_id)) + { + use GenericParamDefKind::*; + if match (¶m_impl.kind, ¶m_trait.kind) { + (Const { .. }, Const { .. }) + if tcx.type_of(param_impl.def_id) != tcx.type_of(param_trait.def_id) => + { + true + } + (Const { .. }, Type { .. }) | (Type { .. }, Const { .. }) => true, + // this is exhaustive so that anyone adding new generic param kinds knows + // to make sure this error is reported for them. + (Const { .. }, Const { .. }) | (Type { .. }, Type { .. }) => false, + (Lifetime { .. }, _) | (_, Lifetime { .. }) => unreachable!(), + } { + let param_impl_span = tcx.def_span(param_impl.def_id); + let param_trait_span = tcx.def_span(param_trait.def_id); + + let mut err = struct_span_err!( + tcx.sess, + param_impl_span, + E0053, + "{} `{}` has an incompatible generic parameter for trait `{}`", + assoc_item_kind_str(&impl_item), + trait_item.name, + &tcx.def_path_str(tcx.parent(trait_item.def_id)) + ); + + let make_param_message = |prefix: &str, param: &ty::GenericParamDef| match param.kind { + Const { .. } => { + format!("{} const parameter of type `{}`", prefix, tcx.type_of(param.def_id)) + } + Type { .. } => format!("{} type parameter", prefix), + Lifetime { .. } => unreachable!(), + }; + + let trait_header_span = tcx.def_ident_span(tcx.parent(trait_item.def_id)).unwrap(); + err.span_label(trait_header_span, ""); + err.span_label(param_trait_span, make_param_message("expected", param_trait)); + + let impl_header_span = tcx.def_span(tcx.parent(impl_item.def_id)); + err.span_label(impl_header_span, ""); + err.span_label(param_impl_span, make_param_message("found", param_impl)); + + let reported = err.emit(); + return Err(reported); + } + } + + Ok(()) +} + +pub(crate) fn compare_const_impl<'tcx>( + tcx: TyCtxt<'tcx>, + impl_c: &ty::AssocItem, + impl_c_span: Span, + trait_c: &ty::AssocItem, + impl_trait_ref: ty::TraitRef<'tcx>, +) { + debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref); + + tcx.infer_ctxt().enter(|infcx| { + let param_env = tcx.param_env(impl_c.def_id); + let ocx = ObligationCtxt::new(&infcx); + + // The below is for the most part highly similar to the procedure + // for methods above. It is simpler in many respects, especially + // because we shouldn't really have to deal with lifetimes or + // predicates. In fact some of this should probably be put into + // shared functions because of DRY violations... + let trait_to_impl_substs = impl_trait_ref.substs; + + // Create a parameter environment that represents the implementation's + // method. + let impl_c_hir_id = tcx.hir().local_def_id_to_hir_id(impl_c.def_id.expect_local()); + + // Compute placeholder form of impl and trait const tys. + let impl_ty = tcx.type_of(impl_c.def_id); + let trait_ty = tcx.bound_type_of(trait_c.def_id).subst(tcx, trait_to_impl_substs); + let mut cause = ObligationCause::new( + impl_c_span, + impl_c_hir_id, + ObligationCauseCode::CompareImplItemObligation { + impl_item_def_id: impl_c.def_id.expect_local(), + trait_item_def_id: trait_c.def_id, + kind: impl_c.kind, + }, + ); + + // There is no "body" here, so just pass dummy id. + let impl_ty = ocx.normalize(cause.clone(), param_env, impl_ty); + + debug!("compare_const_impl: impl_ty={:?}", impl_ty); + + let trait_ty = ocx.normalize(cause.clone(), param_env, trait_ty); + + debug!("compare_const_impl: trait_ty={:?}", trait_ty); + + let err = infcx + .at(&cause, param_env) + .sup(trait_ty, impl_ty) + .map(|ok| ocx.register_infer_ok_obligations(ok)); + + if let Err(terr) = err { + debug!( + "checking associated const for compatibility: impl ty {:?}, trait ty {:?}", + impl_ty, trait_ty + ); + + // Locate the Span containing just the type of the offending impl + match tcx.hir().expect_impl_item(impl_c.def_id.expect_local()).kind { + ImplItemKind::Const(ref ty, _) => cause.span = ty.span, + _ => bug!("{:?} is not a impl const", impl_c), + } + + let mut diag = struct_span_err!( + tcx.sess, + cause.span, + E0326, + "implemented const `{}` has an incompatible type for trait", + trait_c.name + ); + + let trait_c_span = trait_c.def_id.as_local().map(|trait_c_def_id| { + // Add a label to the Span containing just the type of the const + match tcx.hir().expect_trait_item(trait_c_def_id).kind { + TraitItemKind::Const(ref ty, _) => ty.span, + _ => bug!("{:?} is not a trait const", trait_c), + } + }); + + infcx.note_type_err( + &mut diag, + &cause, + trait_c_span.map(|span| (span, "type in trait".to_owned())), + Some(infer::ValuePairs::Terms(ExpectedFound { + expected: trait_ty.into(), + found: impl_ty.into(), + })), + &terr, + false, + false, + ); + diag.emit(); + } + + // Check that all obligations are satisfied by the implementation's + // version. + let errors = ocx.select_all_or_error(); + if !errors.is_empty() { + infcx.report_fulfillment_errors(&errors, None, false); + return; + } + + let outlives_environment = OutlivesEnvironment::new(param_env); + infcx.check_region_obligations_and_report_errors( + impl_c.def_id.expect_local(), + &outlives_environment, + ); + }); +} + +pub(crate) fn compare_ty_impl<'tcx>( + tcx: TyCtxt<'tcx>, + impl_ty: &ty::AssocItem, + impl_ty_span: Span, + trait_ty: &ty::AssocItem, + impl_trait_ref: ty::TraitRef<'tcx>, + trait_item_span: Option, +) { + debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref); + + let _: Result<(), ErrorGuaranteed> = (|| { + compare_number_of_generics(tcx, impl_ty, impl_ty_span, trait_ty, trait_item_span)?; + + compare_generic_param_kinds(tcx, impl_ty, trait_ty)?; + + let sp = tcx.def_span(impl_ty.def_id); + compare_type_predicate_entailment(tcx, impl_ty, sp, trait_ty, impl_trait_ref)?; + + check_type_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref) + })(); +} + +/// The equivalent of [compare_predicate_entailment], but for associated types +/// instead of associated functions. +fn compare_type_predicate_entailment<'tcx>( + tcx: TyCtxt<'tcx>, + impl_ty: &ty::AssocItem, + impl_ty_span: Span, + trait_ty: &ty::AssocItem, + impl_trait_ref: ty::TraitRef<'tcx>, +) -> Result<(), ErrorGuaranteed> { + let impl_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id); + let trait_to_impl_substs = + impl_substs.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.substs); + + let impl_ty_generics = tcx.generics_of(impl_ty.def_id); + let trait_ty_generics = tcx.generics_of(trait_ty.def_id); + let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id); + let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id); + + check_region_bounds_on_impl_item( + tcx, + impl_ty, + trait_ty, + &trait_ty_generics, + &impl_ty_generics, + )?; + + let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_substs); + + if impl_ty_own_bounds.is_empty() { + // Nothing to check. + return Ok(()); + } + + // This `HirId` should be used for the `body_id` field on each + // `ObligationCause` (and the `FnCtxt`). This is what + // `regionck_item` expects. + let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()); + debug!("compare_type_predicate_entailment: trait_to_impl_substs={:?}", trait_to_impl_substs); + + // The predicates declared by the impl definition, the trait and the + // associated type in the trait are assumed. + let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap()); + let mut hybrid_preds = impl_predicates.instantiate_identity(tcx); + hybrid_preds + .predicates + .extend(trait_ty_predicates.instantiate_own(tcx, trait_to_impl_substs).predicates); + + debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds); + + let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_hir_id); + let param_env = ty::ParamEnv::new( + tcx.intern_predicates(&hybrid_preds.predicates), + Reveal::UserFacing, + hir::Constness::NotConst, + ); + let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause); + tcx.infer_ctxt().enter(|infcx| { + let ocx = ObligationCtxt::new(&infcx); + + debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds()); + + let mut selcx = traits::SelectionContext::new(&infcx); + + assert_eq!(impl_ty_own_bounds.predicates.len(), impl_ty_own_bounds.spans.len()); + for (span, predicate) in + std::iter::zip(impl_ty_own_bounds.spans, impl_ty_own_bounds.predicates) + { + let cause = ObligationCause::misc(span, impl_ty_hir_id); + let traits::Normalized { value: predicate, obligations } = + traits::normalize(&mut selcx, param_env, cause, predicate); + + let cause = ObligationCause::new( + span, + impl_ty_hir_id, + ObligationCauseCode::CompareImplItemObligation { + impl_item_def_id: impl_ty.def_id.expect_local(), + trait_item_def_id: trait_ty.def_id, + kind: impl_ty.kind, + }, + ); + ocx.register_obligations(obligations); + ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate)); + } + + // Check that all obligations are satisfied by the implementation's + // version. + let errors = ocx.select_all_or_error(); + if !errors.is_empty() { + let reported = infcx.report_fulfillment_errors(&errors, None, false); + return Err(reported); + } + + // Finally, resolve all regions. This catches wily misuses of + // lifetime parameters. + let outlives_environment = OutlivesEnvironment::new(param_env); + infcx.check_region_obligations_and_report_errors( + impl_ty.def_id.expect_local(), + &outlives_environment, + ); + + Ok(()) + }) +} + +/// Validate that `ProjectionCandidate`s created for this associated type will +/// be valid. +/// +/// Usually given +/// +/// trait X { type Y: Copy } impl X for T { type Y = S; } +/// +/// We are able to normalize `::U` to `S`, and so when we check the +/// impl is well-formed we have to prove `S: Copy`. +/// +/// For default associated types the normalization is not possible (the value +/// from the impl could be overridden). We also can't normalize generic +/// associated types (yet) because they contain bound parameters. +#[tracing::instrument(level = "debug", skip(tcx))] +pub fn check_type_bounds<'tcx>( + tcx: TyCtxt<'tcx>, + trait_ty: &ty::AssocItem, + impl_ty: &ty::AssocItem, + impl_ty_span: Span, + impl_trait_ref: ty::TraitRef<'tcx>, +) -> Result<(), ErrorGuaranteed> { + // Given + // + // impl Foo for (A, B) { + // type Bar =... + // } + // + // - `impl_trait_ref` would be `<(A, B) as Foo> + // - `impl_ty_substs` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0) + // - `rebased_substs` would be `[(A, B), u32, ^0.0]`, combining the substs from + // the *trait* with the generic associated type parameters (as bound vars). + // + // A note regarding the use of bound vars here: + // Imagine as an example + // ``` + // trait Family { + // type Member; + // } + // + // impl Family for VecFamily { + // type Member = i32; + // } + // ``` + // Here, we would generate + // ```notrust + // forall { Normalize(::Member => i32) } + // ``` + // when we really would like to generate + // ```notrust + // forall { Normalize(::Member => i32) :- Implemented(C: Eq) } + // ``` + // But, this is probably fine, because although the first clause can be used with types C that + // do not implement Eq, for it to cause some kind of problem, there would have to be a + // VecFamily::Member for some type X where !(X: Eq), that appears in the value of type + // Member = .... That type would fail a well-formedness check that we ought to be doing + // elsewhere, which would check that any ::Member meets the bounds declared in + // the trait (notably, that X: Eq and T: Family). + let defs: &ty::Generics = tcx.generics_of(impl_ty.def_id); + let mut substs = smallvec::SmallVec::with_capacity(defs.count()); + if let Some(def_id) = defs.parent { + let parent_defs = tcx.generics_of(def_id); + InternalSubsts::fill_item(&mut substs, tcx, parent_defs, &mut |param, _| { + tcx.mk_param_from_def(param) + }); + } + let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> = + smallvec::SmallVec::with_capacity(defs.count()); + InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param.kind { + GenericParamDefKind::Type { .. } => { + let kind = ty::BoundTyKind::Param(param.name); + let bound_var = ty::BoundVariableKind::Ty(kind); + bound_vars.push(bound_var); + tcx.mk_ty(ty::Bound( + ty::INNERMOST, + ty::BoundTy { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind }, + )) + .into() + } + GenericParamDefKind::Lifetime => { + let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name); + let bound_var = ty::BoundVariableKind::Region(kind); + bound_vars.push(bound_var); + tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind }, + )) + .into() + } + GenericParamDefKind::Const { .. } => { + let bound_var = ty::BoundVariableKind::Const; + bound_vars.push(bound_var); + tcx.mk_const(ty::ConstS { + ty: tcx.type_of(param.def_id), + kind: ty::ConstKind::Bound( + ty::INNERMOST, + ty::BoundVar::from_usize(bound_vars.len() - 1), + ), + }) + .into() + } + }); + let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter()); + let impl_ty_substs = tcx.intern_substs(&substs); + let container_id = impl_ty.container_id(tcx); + + let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs); + let impl_ty_value = tcx.type_of(impl_ty.def_id); + + let param_env = tcx.param_env(impl_ty.def_id); + + // When checking something like + // + // trait X { type Y: PartialEq<::Y> } + // impl X for T { default type Y = S; } + // + // We will have to prove the bound S: PartialEq<::Y>. In this case + // we want ::Y to normalize to S. This is valid because we are + // checking the default value specifically here. Add this equality to the + // ParamEnv for normalization specifically. + let normalize_param_env = { + let mut predicates = param_env.caller_bounds().iter().collect::>(); + match impl_ty_value.kind() { + ty::Projection(proj) + if proj.item_def_id == trait_ty.def_id && proj.substs == rebased_substs => + { + // Don't include this predicate if the projected type is + // exactly the same as the projection. This can occur in + // (somewhat dubious) code like this: + // + // impl X for T where T: X { type Y = ::Y; } + } + _ => predicates.push( + ty::Binder::bind_with_vars( + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + item_def_id: trait_ty.def_id, + substs: rebased_substs, + }, + term: impl_ty_value.into(), + }, + bound_vars, + ) + .to_predicate(tcx), + ), + }; + ty::ParamEnv::new( + tcx.intern_predicates(&predicates), + Reveal::UserFacing, + param_env.constness(), + ) + }; + debug!(?normalize_param_env); + + let impl_ty_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id); + let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs); + + tcx.infer_ctxt().enter(move |infcx| { + let ocx = ObligationCtxt::new(&infcx); + + let mut selcx = traits::SelectionContext::new(&infcx); + let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()); + let normalize_cause = ObligationCause::new( + impl_ty_span, + impl_ty_hir_id, + ObligationCauseCode::CheckAssociatedTypeBounds { + impl_item_def_id: impl_ty.def_id.expect_local(), + trait_item_def_id: trait_ty.def_id, + }, + ); + let mk_cause = |span: Span| { + let code = if span.is_dummy() { + traits::MiscObligation + } else { + traits::BindingObligation(trait_ty.def_id, span) + }; + ObligationCause::new(impl_ty_span, impl_ty_hir_id, code) + }; + + let obligations = tcx + .bound_explicit_item_bounds(trait_ty.def_id) + .transpose_iter() + .map(|e| e.map_bound(|e| *e).transpose_tuple2()) + .map(|(bound, span)| { + debug!(?bound); + // this is where opaque type is found + let concrete_ty_bound = bound.subst(tcx, rebased_substs); + debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound); + + traits::Obligation::new(mk_cause(span.0), param_env, concrete_ty_bound) + }) + .collect(); + debug!("check_type_bounds: item_bounds={:?}", obligations); + + for mut obligation in util::elaborate_obligations(tcx, obligations) { + let traits::Normalized { value: normalized_predicate, obligations } = traits::normalize( + &mut selcx, + normalize_param_env, + normalize_cause.clone(), + obligation.predicate, + ); + debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate); + obligation.predicate = normalized_predicate; + + ocx.register_obligations(obligations); + ocx.register_obligation(obligation); + } + // Check that all obligations are satisfied by the implementation's + // version. + let errors = ocx.select_all_or_error(); + if !errors.is_empty() { + let reported = infcx.report_fulfillment_errors(&errors, None, false); + return Err(reported); + } + + // Finally, resolve all regions. This catches wily misuses of + // lifetime parameters. + let implied_bounds = match impl_ty.container { + ty::TraitContainer => FxHashSet::default(), + ty::ImplContainer => wfcheck::impl_implied_bounds( + tcx, + param_env, + container_id.expect_local(), + impl_ty_span, + ), + }; + let mut outlives_environment = OutlivesEnvironment::new(param_env); + outlives_environment.add_implied_bounds(&infcx, implied_bounds, impl_ty_hir_id); + infcx.check_region_obligations_and_report_errors( + impl_ty.def_id.expect_local(), + &outlives_environment, + ); + + let constraints = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types(); + for (key, value) in constraints { + infcx + .report_mismatched_types( + &ObligationCause::misc( + value.hidden_type.span, + tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()), + ), + tcx.mk_opaque(key.def_id.to_def_id(), key.substs), + value.hidden_type.ty, + TypeError::Mismatch, + ) + .emit(); + } + + Ok(()) + }) +} + +fn assoc_item_kind_str(impl_item: &ty::AssocItem) -> &'static str { + match impl_item.kind { + ty::AssocKind::Const => "const", + ty::AssocKind::Fn => "method", + ty::AssocKind::Type => "type", + } +} diff --git a/compiler/rustc_typeck/src/check/demand.rs b/compiler/rustc_typeck/src/check/demand.rs new file mode 100644 index 000000000..4de48dc5b --- /dev/null +++ b/compiler/rustc_typeck/src/check/demand.rs @@ -0,0 +1,1442 @@ +use crate::check::FnCtxt; +use rustc_infer::infer::InferOk; +use rustc_middle::middle::stability::EvalResult; +use rustc_trait_selection::infer::InferCtxtExt as _; +use rustc_trait_selection::traits::ObligationCause; + +use rustc_ast::util::parser::PREC_POSTFIX; +use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{is_range_literal, Node}; +use rustc_middle::lint::in_external_macro; +use rustc_middle::ty::adjustment::AllowTwoPhase; +use rustc_middle::ty::error::{ExpectedFound, TypeError}; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut}; +use rustc_span::symbol::{sym, Symbol}; +use rustc_span::{BytePos, Span}; + +use super::method::probe; + +use std::iter; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn emit_coerce_suggestions( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'tcx>, + expr_ty: Ty<'tcx>, + expected: Ty<'tcx>, + expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + error: Option>, + ) { + self.annotate_expected_due_to_let_ty(err, expr, error); + self.suggest_deref_ref_or_into(err, expr, expected, expr_ty, expected_ty_expr); + self.suggest_compatible_variants(err, expr, expected, expr_ty); + self.suggest_non_zero_new_unwrap(err, expr, expected, expr_ty); + if self.suggest_calling_boxed_future_when_appropriate(err, expr, expected, expr_ty) { + return; + } + self.suggest_no_capture_closure(err, expected, expr_ty); + self.suggest_boxing_when_appropriate(err, expr, expected, expr_ty); + self.suggest_missing_parentheses(err, expr); + self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected); + self.note_type_is_not_clone(err, expected, expr_ty, expr); + self.note_need_for_fn_pointer(err, expected, expr_ty); + self.note_internal_mutation_in_method(err, expr, expected, expr_ty); + self.report_closure_inferred_return_type(err, expected); + } + + // Requires that the two types unify, and prints an error message if + // they don't. + pub fn demand_suptype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) { + if let Some(mut e) = self.demand_suptype_diag(sp, expected, actual) { + e.emit(); + } + } + + pub fn demand_suptype_diag( + &self, + sp: Span, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + ) -> Option> { + self.demand_suptype_with_origin(&self.misc(sp), expected, actual) + } + + #[instrument(skip(self), level = "debug")] + pub fn demand_suptype_with_origin( + &self, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + ) -> Option> { + match self.at(cause, self.param_env).sup(expected, actual) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + None + } + Err(e) => Some(self.report_mismatched_types(&cause, expected, actual, e)), + } + } + + pub fn demand_eqtype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) { + if let Some(mut err) = self.demand_eqtype_diag(sp, expected, actual) { + err.emit(); + } + } + + pub fn demand_eqtype_diag( + &self, + sp: Span, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + ) -> Option> { + self.demand_eqtype_with_origin(&self.misc(sp), expected, actual) + } + + pub fn demand_eqtype_with_origin( + &self, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + ) -> Option> { + match self.at(cause, self.param_env).eq(expected, actual) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + None + } + Err(e) => Some(self.report_mismatched_types(cause, expected, actual, e)), + } + } + + pub fn demand_coerce( + &self, + expr: &hir::Expr<'tcx>, + checked_ty: Ty<'tcx>, + expected: Ty<'tcx>, + expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + allow_two_phase: AllowTwoPhase, + ) -> Ty<'tcx> { + let (ty, err) = + self.demand_coerce_diag(expr, checked_ty, expected, expected_ty_expr, allow_two_phase); + if let Some(mut err) = err { + err.emit(); + } + ty + } + + /// Checks that the type of `expr` can be coerced to `expected`. + /// + /// N.B., this code relies on `self.diverges` to be accurate. In particular, assignments to `!` + /// will be permitted if the diverges flag is currently "always". + #[tracing::instrument(level = "debug", skip(self, expr, expected_ty_expr, allow_two_phase))] + pub fn demand_coerce_diag( + &self, + expr: &hir::Expr<'tcx>, + checked_ty: Ty<'tcx>, + expected: Ty<'tcx>, + expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + allow_two_phase: AllowTwoPhase, + ) -> (Ty<'tcx>, Option>) { + let expected = self.resolve_vars_with_obligations(expected); + + let e = match self.try_coerce(expr, checked_ty, expected, allow_two_phase, None) { + Ok(ty) => return (ty, None), + Err(e) => e, + }; + + self.set_tainted_by_errors(); + let expr = expr.peel_drop_temps(); + let cause = self.misc(expr.span); + let expr_ty = self.resolve_vars_with_obligations(checked_ty); + let mut err = self.report_mismatched_types(&cause, expected, expr_ty, e.clone()); + + let is_insufficiently_polymorphic = + matches!(e, TypeError::RegionsInsufficientlyPolymorphic(..)); + + // FIXME(#73154): For now, we do leak check when coercing function + // pointers in typeck, instead of only during borrowck. This can lead + // to these `RegionsInsufficientlyPolymorphic` errors that aren't helpful. + if !is_insufficiently_polymorphic { + self.emit_coerce_suggestions( + &mut err, + expr, + expr_ty, + expected, + expected_ty_expr, + Some(e), + ); + } + + (expected, Some(err)) + } + + fn annotate_expected_due_to_let_ty( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + error: Option>, + ) { + let parent = self.tcx.hir().get_parent_node(expr.hir_id); + match (self.tcx.hir().find(parent), error) { + (Some(hir::Node::Local(hir::Local { ty: Some(ty), init: Some(init), .. })), _) + if init.hir_id == expr.hir_id => + { + // Point at `let` assignment type. + err.span_label(ty.span, "expected due to this"); + } + ( + Some(hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Assign(lhs, rhs, _), .. + })), + Some(TypeError::Sorts(ExpectedFound { expected, .. })), + ) if rhs.hir_id == expr.hir_id && !expected.is_closure() => { + // We ignore closures explicitly because we already point at them elsewhere. + // Point at the assigned-to binding. + let mut primary_span = lhs.span; + let mut secondary_span = lhs.span; + let mut post_message = ""; + match lhs.kind { + hir::ExprKind::Path(hir::QPath::Resolved( + None, + hir::Path { + res: + hir::def::Res::Def( + hir::def::DefKind::Static(_) | hir::def::DefKind::Const, + def_id, + ), + .. + }, + )) => { + if let Some(hir::Node::Item(hir::Item { + ident, + kind: hir::ItemKind::Static(ty, ..) | hir::ItemKind::Const(ty, ..), + .. + })) = self.tcx.hir().get_if_local(*def_id) + { + primary_span = ty.span; + secondary_span = ident.span; + post_message = " type"; + } + } + hir::ExprKind::Path(hir::QPath::Resolved( + None, + hir::Path { res: hir::def::Res::Local(hir_id), .. }, + )) => { + if let Some(hir::Node::Pat(pat)) = self.tcx.hir().find(*hir_id) { + let parent = self.tcx.hir().get_parent_node(pat.hir_id); + primary_span = pat.span; + secondary_span = pat.span; + match self.tcx.hir().find(parent) { + Some(hir::Node::Local(hir::Local { ty: Some(ty), .. })) => { + primary_span = ty.span; + post_message = " type"; + } + Some(hir::Node::Local(hir::Local { init: Some(init), .. })) => { + primary_span = init.span; + post_message = " value"; + } + Some(hir::Node::Param(hir::Param { ty_span, .. })) => { + primary_span = *ty_span; + post_message = " parameter type"; + } + _ => {} + } + } + } + _ => {} + } + + if primary_span != secondary_span + && self + .tcx + .sess + .source_map() + .is_multiline(secondary_span.shrink_to_hi().until(primary_span)) + { + // We are pointing at the binding's type or initializer value, but it's pattern + // is in a different line, so we point at both. + err.span_label(secondary_span, "expected due to the type of this binding"); + err.span_label(primary_span, &format!("expected due to this{post_message}")); + } else if post_message == "" { + // We are pointing at either the assignment lhs or the binding def pattern. + err.span_label(primary_span, "expected due to the type of this binding"); + } else { + // We are pointing at the binding's type or initializer value. + err.span_label(primary_span, &format!("expected due to this{post_message}")); + } + + if !lhs.is_syntactic_place_expr() { + // We already emitted E0070 "invalid left-hand side of assignment", so we + // silence this. + err.downgrade_to_delayed_bug(); + } + } + _ => {} + } + } + + /// If the expected type is an enum (Issue #55250) with any variants whose + /// sole field is of the found type, suggest such variants. (Issue #42764) + fn suggest_compatible_variants( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + expected: Ty<'tcx>, + expr_ty: Ty<'tcx>, + ) { + if let ty::Adt(expected_adt, substs) = expected.kind() { + if let hir::ExprKind::Field(base, ident) = expr.kind { + let base_ty = self.typeck_results.borrow().expr_ty(base); + if self.can_eq(self.param_env, base_ty, expected).is_ok() + && let Some(base_span) = base.span.find_ancestor_inside(expr.span) + { + err.span_suggestion_verbose( + expr.span.with_lo(base_span.hi()), + format!("consider removing the tuple struct field `{ident}`"), + "", + Applicability::MaybeIncorrect, + ); + return + } + } + + // If the expression is of type () and it's the return expression of a block, + // we suggest adding a separate return expression instead. + // (To avoid things like suggesting `Ok(while .. { .. })`.) + if expr_ty.is_unit() { + let mut id = expr.hir_id; + let mut parent; + + // Unroll desugaring, to make sure this works for `for` loops etc. + loop { + parent = self.tcx.hir().get_parent_node(id); + if let Some(parent_span) = self.tcx.hir().opt_span(parent) { + if parent_span.find_ancestor_inside(expr.span).is_some() { + // The parent node is part of the same span, so is the result of the + // same expansion/desugaring and not the 'real' parent node. + id = parent; + continue; + } + } + break; + } + + if let Some(hir::Node::Block(&hir::Block { + span: block_span, expr: Some(e), .. + })) = self.tcx.hir().find(parent) + { + if e.hir_id == id { + if let Some(span) = expr.span.find_ancestor_inside(block_span) { + let return_suggestions = if self + .tcx + .is_diagnostic_item(sym::Result, expected_adt.did()) + { + vec!["Ok(())"] + } else if self.tcx.is_diagnostic_item(sym::Option, expected_adt.did()) { + vec!["None", "Some(())"] + } else { + return; + }; + if let Some(indent) = + self.tcx.sess.source_map().indentation_before(span.shrink_to_lo()) + { + // Add a semicolon, except after `}`. + let semicolon = + match self.tcx.sess.source_map().span_to_snippet(span) { + Ok(s) if s.ends_with('}') => "", + _ => ";", + }; + err.span_suggestions( + span.shrink_to_hi(), + "try adding an expression at the end of the block", + return_suggestions + .into_iter() + .map(|r| format!("{semicolon}\n{indent}{r}")), + Applicability::MaybeIncorrect, + ); + } + return; + } + } + } + } + + let compatible_variants: Vec<(String, _, _, Option)> = expected_adt + .variants() + .iter() + .filter(|variant| { + variant.fields.len() == 1 + }) + .filter_map(|variant| { + let sole_field = &variant.fields[0]; + + let field_is_local = sole_field.did.is_local(); + let field_is_accessible = + sole_field.vis.is_accessible_from(expr.hir_id.owner.to_def_id(), self.tcx) + // Skip suggestions for unstable public fields (for example `Pin::pointer`) + && matches!(self.tcx.eval_stability(sole_field.did, None, expr.span, None), EvalResult::Allow | EvalResult::Unmarked); + + if !field_is_local && !field_is_accessible { + return None; + } + + let note_about_variant_field_privacy = (field_is_local && !field_is_accessible) + .then(|| format!(" (its field is private, but it's local to this crate and its privacy can be changed)")); + + let sole_field_ty = sole_field.ty(self.tcx, substs); + if self.can_coerce(expr_ty, sole_field_ty) { + let variant_path = + with_no_trimmed_paths!(self.tcx.def_path_str(variant.def_id)); + // FIXME #56861: DRYer prelude filtering + if let Some(path) = variant_path.strip_prefix("std::prelude::") + && let Some((_, path)) = path.split_once("::") + { + return Some((path.to_string(), variant.ctor_kind, sole_field.name, note_about_variant_field_privacy)); + } + Some((variant_path, variant.ctor_kind, sole_field.name, note_about_variant_field_privacy)) + } else { + None + } + }) + .collect(); + + let suggestions_for = |variant: &_, ctor, field_name| { + let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) { + Some(ident) => format!("{ident}: "), + None => String::new(), + }; + + let (open, close) = match ctor { + hir::def::CtorKind::Fn => ("(".to_owned(), ")"), + hir::def::CtorKind::Fictive => (format!(" {{ {field_name}: "), " }"), + + // unit variants don't have fields + hir::def::CtorKind::Const => unreachable!(), + }; + + vec![ + (expr.span.shrink_to_lo(), format!("{prefix}{variant}{open}")), + (expr.span.shrink_to_hi(), close.to_owned()), + ] + }; + + match &compatible_variants[..] { + [] => { /* No variants to format */ } + [(variant, ctor_kind, field_name, note)] => { + // Just a single matching variant. + err.multipart_suggestion_verbose( + &format!( + "try wrapping the expression in `{variant}`{note}", + note = note.as_deref().unwrap_or("") + ), + suggestions_for(&**variant, *ctor_kind, *field_name), + Applicability::MaybeIncorrect, + ); + } + _ => { + // More than one matching variant. + err.multipart_suggestions( + &format!( + "try wrapping the expression in a variant of `{}`", + self.tcx.def_path_str(expected_adt.did()) + ), + compatible_variants.into_iter().map( + |(variant, ctor_kind, field_name, _)| { + suggestions_for(&variant, ctor_kind, field_name) + }, + ), + Applicability::MaybeIncorrect, + ); + } + } + } + } + + fn suggest_non_zero_new_unwrap( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + expected: Ty<'tcx>, + expr_ty: Ty<'tcx>, + ) { + let tcx = self.tcx; + let (adt, unwrap) = match expected.kind() { + // In case Option is wanted, but * is provided, suggest calling new + ty::Adt(adt, substs) if tcx.is_diagnostic_item(sym::Option, adt.did()) => { + // Unwrap option + let ty::Adt(adt, _) = substs.type_at(0).kind() else { return }; + + (adt, "") + } + // In case NonZero* is wanted, but * is provided also add `.unwrap()` to satisfy types + ty::Adt(adt, _) => (adt, ".unwrap()"), + _ => return, + }; + + let map = [ + (sym::NonZeroU8, tcx.types.u8), + (sym::NonZeroU16, tcx.types.u16), + (sym::NonZeroU32, tcx.types.u32), + (sym::NonZeroU64, tcx.types.u64), + (sym::NonZeroU128, tcx.types.u128), + (sym::NonZeroI8, tcx.types.i8), + (sym::NonZeroI16, tcx.types.i16), + (sym::NonZeroI32, tcx.types.i32), + (sym::NonZeroI64, tcx.types.i64), + (sym::NonZeroI128, tcx.types.i128), + ]; + + let Some((s, _)) = map + .iter() + .find(|&&(s, t)| self.tcx.is_diagnostic_item(s, adt.did()) && self.can_coerce(expr_ty, t)) + else { return }; + + let path = self.tcx.def_path_str(adt.non_enum_variant().def_id); + + err.multipart_suggestion( + format!("consider calling `{s}::new`"), + vec![ + (expr.span.shrink_to_lo(), format!("{path}::new(")), + (expr.span.shrink_to_hi(), format!("){unwrap}")), + ], + Applicability::MaybeIncorrect, + ); + } + + pub fn get_conversion_methods( + &self, + span: Span, + expected: Ty<'tcx>, + checked_ty: Ty<'tcx>, + hir_id: hir::HirId, + ) -> Vec { + let mut methods = + self.probe_for_return_type(span, probe::Mode::MethodCall, expected, checked_ty, hir_id); + methods.retain(|m| { + self.has_only_self_parameter(m) + && self + .tcx + // This special internal attribute is used to permit + // "identity-like" conversion methods to be suggested here. + // + // FIXME (#46459 and #46460): ideally + // `std::convert::Into::into` and `std::borrow:ToOwned` would + // also be `#[rustc_conversion_suggestion]`, if not for + // method-probing false-positives and -negatives (respectively). + // + // FIXME? Other potential candidate methods: `as_ref` and + // `as_mut`? + .has_attr(m.def_id, sym::rustc_conversion_suggestion) + }); + + methods + } + + /// This function checks whether the method is not static and does not accept other parameters than `self`. + fn has_only_self_parameter(&self, method: &AssocItem) -> bool { + match method.kind { + ty::AssocKind::Fn => { + method.fn_has_self_parameter + && self.tcx.fn_sig(method.def_id).inputs().skip_binder().len() == 1 + } + _ => false, + } + } + + /// Identify some cases where `as_ref()` would be appropriate and suggest it. + /// + /// Given the following code: + /// ```compile_fail,E0308 + /// struct Foo; + /// fn takes_ref(_: &Foo) {} + /// let ref opt = Some(Foo); + /// + /// opt.map(|param| takes_ref(param)); + /// ``` + /// Suggest using `opt.as_ref().map(|param| takes_ref(param));` instead. + /// + /// It only checks for `Option` and `Result` and won't work with + /// ```ignore (illustrative) + /// opt.map(|param| { takes_ref(param) }); + /// ``` + fn can_use_as_ref(&self, expr: &hir::Expr<'_>) -> Option<(Span, &'static str, String)> { + let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = expr.kind else { + return None; + }; + + let hir::def::Res::Local(local_id) = path.res else { + return None; + }; + + let local_parent = self.tcx.hir().get_parent_node(local_id); + let Some(Node::Param(hir::Param { hir_id: param_hir_id, .. })) = self.tcx.hir().find(local_parent) else { + return None; + }; + + let param_parent = self.tcx.hir().get_parent_node(*param_hir_id); + let Some(Node::Expr(hir::Expr { + hir_id: expr_hir_id, + kind: hir::ExprKind::Closure(hir::Closure { fn_decl: closure_fn_decl, .. }), + .. + })) = self.tcx.hir().find(param_parent) else { + return None; + }; + + let expr_parent = self.tcx.hir().get_parent_node(*expr_hir_id); + let hir = self.tcx.hir().find(expr_parent); + let closure_params_len = closure_fn_decl.inputs.len(); + let ( + Some(Node::Expr(hir::Expr { + kind: hir::ExprKind::MethodCall(method_path, method_expr, _), + .. + })), + 1, + ) = (hir, closure_params_len) else { + return None; + }; + + let self_ty = self.typeck_results.borrow().expr_ty(&method_expr[0]); + let self_ty = format!("{:?}", self_ty); + let name = method_path.ident.name; + let is_as_ref_able = (self_ty.starts_with("&std::option::Option") + || self_ty.starts_with("&std::result::Result") + || self_ty.starts_with("std::option::Option") + || self_ty.starts_with("std::result::Result")) + && (name == sym::map || name == sym::and_then); + match (is_as_ref_able, self.sess().source_map().span_to_snippet(method_path.ident.span)) { + (true, Ok(src)) => { + let suggestion = format!("as_ref().{}", src); + Some((method_path.ident.span, "consider using `as_ref` instead", suggestion)) + } + _ => None, + } + } + + pub(crate) fn maybe_get_struct_pattern_shorthand_field( + &self, + expr: &hir::Expr<'_>, + ) -> Option { + let hir = self.tcx.hir(); + let local = match expr { + hir::Expr { + kind: + hir::ExprKind::Path(hir::QPath::Resolved( + None, + hir::Path { + res: hir::def::Res::Local(_), + segments: [hir::PathSegment { ident, .. }], + .. + }, + )), + .. + } => Some(ident), + _ => None, + }?; + + match hir.find(hir.get_parent_node(expr.hir_id))? { + Node::Expr(hir::Expr { kind: hir::ExprKind::Struct(_, fields, ..), .. }) => { + for field in *fields { + if field.ident.name == local.name && field.is_shorthand { + return Some(local.name); + } + } + } + _ => {} + } + + None + } + + /// If the given `HirId` corresponds to a block with a trailing expression, return that expression + pub(crate) fn maybe_get_block_expr( + &self, + expr: &hir::Expr<'tcx>, + ) -> Option<&'tcx hir::Expr<'tcx>> { + match expr { + hir::Expr { kind: hir::ExprKind::Block(block, ..), .. } => block.expr, + _ => None, + } + } + + /// Returns whether the given expression is an `else if`. + pub(crate) fn is_else_if_block(&self, expr: &hir::Expr<'_>) -> bool { + if let hir::ExprKind::If(..) = expr.kind { + let parent_id = self.tcx.hir().get_parent_node(expr.hir_id); + if let Some(Node::Expr(hir::Expr { + kind: hir::ExprKind::If(_, _, Some(else_expr)), + .. + })) = self.tcx.hir().find(parent_id) + { + return else_expr.hir_id == expr.hir_id; + } + } + false + } + + /// This function is used to determine potential "simple" improvements or users' errors and + /// provide them useful help. For example: + /// + /// ```compile_fail,E0308 + /// fn some_fn(s: &str) {} + /// + /// let x = "hey!".to_owned(); + /// some_fn(x); // error + /// ``` + /// + /// No need to find every potential function which could make a coercion to transform a + /// `String` into a `&str` since a `&` would do the trick! + /// + /// In addition of this check, it also checks between references mutability state. If the + /// expected is mutable but the provided isn't, maybe we could just say "Hey, try with + /// `&mut`!". + pub fn check_ref( + &self, + expr: &hir::Expr<'tcx>, + checked_ty: Ty<'tcx>, + expected: Ty<'tcx>, + ) -> Option<(Span, String, String, Applicability, bool /* verbose */)> { + let sess = self.sess(); + let sp = expr.span; + + // If the span is from an external macro, there's no suggestion we can make. + if in_external_macro(sess, sp) { + return None; + } + + let sm = sess.source_map(); + + let replace_prefix = |s: &str, old: &str, new: &str| { + s.strip_prefix(old).map(|stripped| new.to_string() + stripped) + }; + + // `ExprKind::DropTemps` is semantically irrelevant for these suggestions. + let expr = expr.peel_drop_temps(); + + match (&expr.kind, expected.kind(), checked_ty.kind()) { + (_, &ty::Ref(_, exp, _), &ty::Ref(_, check, _)) => match (exp.kind(), check.kind()) { + (&ty::Str, &ty::Array(arr, _) | &ty::Slice(arr)) if arr == self.tcx.types.u8 => { + if let hir::ExprKind::Lit(_) = expr.kind + && let Ok(src) = sm.span_to_snippet(sp) + && replace_prefix(&src, "b\"", "\"").is_some() + { + let pos = sp.lo() + BytePos(1); + return Some(( + sp.with_hi(pos), + "consider removing the leading `b`".to_string(), + String::new(), + Applicability::MachineApplicable, + true, + )); + } + } + (&ty::Array(arr, _) | &ty::Slice(arr), &ty::Str) if arr == self.tcx.types.u8 => { + if let hir::ExprKind::Lit(_) = expr.kind + && let Ok(src) = sm.span_to_snippet(sp) + && replace_prefix(&src, "\"", "b\"").is_some() + { + return Some(( + sp.shrink_to_lo(), + "consider adding a leading `b`".to_string(), + "b".to_string(), + Applicability::MachineApplicable, + true, + )); + } + } + _ => {} + }, + (_, &ty::Ref(_, _, mutability), _) => { + // Check if it can work when put into a ref. For example: + // + // ``` + // fn bar(x: &mut i32) {} + // + // let x = 0u32; + // bar(&x); // error, expected &mut + // ``` + let ref_ty = match mutability { + hir::Mutability::Mut => { + self.tcx.mk_mut_ref(self.tcx.mk_region(ty::ReStatic), checked_ty) + } + hir::Mutability::Not => { + self.tcx.mk_imm_ref(self.tcx.mk_region(ty::ReStatic), checked_ty) + } + }; + if self.can_coerce(ref_ty, expected) { + let mut sugg_sp = sp; + if let hir::ExprKind::MethodCall(ref segment, ref args, _) = expr.kind { + let clone_trait = + self.tcx.require_lang_item(LangItem::Clone, Some(segment.ident.span)); + if let ([arg], Some(true), sym::clone) = ( + &args[..], + self.typeck_results.borrow().type_dependent_def_id(expr.hir_id).map( + |did| { + let ai = self.tcx.associated_item(did); + ai.trait_container(self.tcx) == Some(clone_trait) + }, + ), + segment.ident.name, + ) { + // If this expression had a clone call when suggesting borrowing + // we want to suggest removing it because it'd now be unnecessary. + sugg_sp = arg.span; + } + } + if let Ok(src) = sm.span_to_snippet(sugg_sp) { + let needs_parens = match expr.kind { + // parenthesize if needed (Issue #46756) + hir::ExprKind::Cast(_, _) | hir::ExprKind::Binary(_, _, _) => true, + // parenthesize borrows of range literals (Issue #54505) + _ if is_range_literal(expr) => true, + _ => false, + }; + let sugg_expr = if needs_parens { format!("({src})") } else { src }; + + if let Some(sugg) = self.can_use_as_ref(expr) { + return Some(( + sugg.0, + sugg.1.to_string(), + sugg.2, + Applicability::MachineApplicable, + false, + )); + } + + let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) { + Some(ident) => format!("{ident}: "), + None => String::new(), + }; + + if let Some(hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Assign(..), + .. + })) = self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id)) + { + if mutability == hir::Mutability::Mut { + // Suppressing this diagnostic, we'll properly print it in `check_expr_assign` + return None; + } + } + + return Some(match mutability { + hir::Mutability::Mut => ( + sp, + "consider mutably borrowing here".to_string(), + format!("{prefix}&mut {sugg_expr}"), + Applicability::MachineApplicable, + false, + ), + hir::Mutability::Not => ( + sp, + "consider borrowing here".to_string(), + format!("{prefix}&{sugg_expr}"), + Applicability::MachineApplicable, + false, + ), + }); + } + } + } + ( + hir::ExprKind::AddrOf(hir::BorrowKind::Ref, _, ref expr), + _, + &ty::Ref(_, checked, _), + ) if self.can_sub(self.param_env, checked, expected).is_ok() => { + // We have `&T`, check if what was expected was `T`. If so, + // we may want to suggest removing a `&`. + if sm.is_imported(expr.span) { + // Go through the spans from which this span was expanded, + // and find the one that's pointing inside `sp`. + // + // E.g. for `&format!("")`, where we want the span to the + // `format!()` invocation instead of its expansion. + if let Some(call_span) = + iter::successors(Some(expr.span), |s| s.parent_callsite()) + .find(|&s| sp.contains(s)) + && sm.is_span_accessible(call_span) + { + return Some(( + sp.with_hi(call_span.lo()), + "consider removing the borrow".to_string(), + String::new(), + Applicability::MachineApplicable, + true, + )); + } + return None; + } + if sp.contains(expr.span) + && sm.is_span_accessible(expr.span) + { + return Some(( + sp.with_hi(expr.span.lo()), + "consider removing the borrow".to_string(), + String::new(), + Applicability::MachineApplicable, + true, + )); + } + } + ( + _, + &ty::RawPtr(TypeAndMut { ty: ty_b, mutbl: mutbl_b }), + &ty::Ref(_, ty_a, mutbl_a), + ) => { + if let Some(steps) = self.deref_steps(ty_a, ty_b) + // Only suggest valid if dereferencing needed. + && steps > 0 + // The pointer type implements `Copy` trait so the suggestion is always valid. + && let Ok(src) = sm.span_to_snippet(sp) + { + let derefs = "*".repeat(steps); + if let Some((span, src, applicability)) = match mutbl_b { + hir::Mutability::Mut => { + let new_prefix = "&mut ".to_owned() + &derefs; + match mutbl_a { + hir::Mutability::Mut => { + replace_prefix(&src, "&mut ", &new_prefix).map(|_| { + let pos = sp.lo() + BytePos(5); + let sp = sp.with_lo(pos).with_hi(pos); + (sp, derefs, Applicability::MachineApplicable) + }) + } + hir::Mutability::Not => { + replace_prefix(&src, "&", &new_prefix).map(|_| { + let pos = sp.lo() + BytePos(1); + let sp = sp.with_lo(pos).with_hi(pos); + ( + sp, + format!("mut {derefs}"), + Applicability::Unspecified, + ) + }) + } + } + } + hir::Mutability::Not => { + let new_prefix = "&".to_owned() + &derefs; + match mutbl_a { + hir::Mutability::Mut => { + replace_prefix(&src, "&mut ", &new_prefix).map(|_| { + let lo = sp.lo() + BytePos(1); + let hi = sp.lo() + BytePos(5); + let sp = sp.with_lo(lo).with_hi(hi); + (sp, derefs, Applicability::MachineApplicable) + }) + } + hir::Mutability::Not => { + replace_prefix(&src, "&", &new_prefix).map(|_| { + let pos = sp.lo() + BytePos(1); + let sp = sp.with_lo(pos).with_hi(pos); + (sp, derefs, Applicability::MachineApplicable) + }) + } + } + } + } { + return Some(( + span, + "consider dereferencing".to_string(), + src, + applicability, + true, + )); + } + } + } + _ if sp == expr.span => { + if let Some(mut steps) = self.deref_steps(checked_ty, expected) { + let mut expr = expr.peel_blocks(); + let mut prefix_span = expr.span.shrink_to_lo(); + let mut remove = String::new(); + + // Try peeling off any existing `&` and `&mut` to reach our target type + while steps > 0 { + if let hir::ExprKind::AddrOf(_, mutbl, inner) = expr.kind { + // If the expression has `&`, removing it would fix the error + prefix_span = prefix_span.with_hi(inner.span.lo()); + expr = inner; + remove += match mutbl { + hir::Mutability::Not => "&", + hir::Mutability::Mut => "&mut ", + }; + steps -= 1; + } else { + break; + } + } + // If we've reached our target type with just removing `&`, then just print now. + if steps == 0 { + return Some(( + prefix_span, + format!("consider removing the `{}`", remove.trim()), + String::new(), + // Do not remove `&&` to get to bool, because it might be something like + // { a } && b, which we have a separate fixup suggestion that is more + // likely correct... + if remove.trim() == "&&" && expected == self.tcx.types.bool { + Applicability::MaybeIncorrect + } else { + Applicability::MachineApplicable + }, + true, + )); + } + + // For this suggestion to make sense, the type would need to be `Copy`, + // or we have to be moving out of a `Box` + if self.type_is_copy_modulo_regions(self.param_env, expected, sp) + // FIXME(compiler-errors): We can actually do this if the checked_ty is + // `steps` layers of boxes, not just one, but this is easier and most likely. + || (checked_ty.is_box() && steps == 1) + { + let deref_kind = if checked_ty.is_box() { + "unboxing the value" + } else if checked_ty.is_region_ptr() { + "dereferencing the borrow" + } else { + "dereferencing the type" + }; + + // Suggest removing `&` if we have removed any, otherwise suggest just + // dereferencing the remaining number of steps. + let message = if remove.is_empty() { + format!("consider {deref_kind}") + } else { + format!( + "consider removing the `{}` and {} instead", + remove.trim(), + deref_kind + ) + }; + + let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) { + Some(ident) => format!("{ident}: "), + None => String::new(), + }; + + let (span, suggestion) = if self.is_else_if_block(expr) { + // Don't suggest nonsense like `else *if` + return None; + } else if let Some(expr) = self.maybe_get_block_expr(expr) { + // prefix should be empty here.. + (expr.span.shrink_to_lo(), "*".to_string()) + } else { + (prefix_span, format!("{}{}", prefix, "*".repeat(steps))) + }; + + return Some(( + span, + message, + suggestion, + Applicability::MachineApplicable, + true, + )); + } + } + } + _ => {} + } + None + } + + pub fn check_for_cast( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + checked_ty: Ty<'tcx>, + expected_ty: Ty<'tcx>, + expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + ) -> bool { + if self.tcx.sess.source_map().is_imported(expr.span) { + // Ignore if span is from within a macro. + return false; + } + + let Ok(src) = self.tcx.sess.source_map().span_to_snippet(expr.span) else { + return false; + }; + + // If casting this expression to a given numeric type would be appropriate in case of a type + // mismatch. + // + // We want to minimize the amount of casting operations that are suggested, as it can be a + // lossy operation with potentially bad side effects, so we only suggest when encountering + // an expression that indicates that the original type couldn't be directly changed. + // + // For now, don't suggest casting with `as`. + let can_cast = false; + + let mut sugg = vec![]; + + if let Some(hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Struct(_, fields, _), .. + })) = self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id)) + { + // `expr` is a literal field for a struct, only suggest if appropriate + match (*fields) + .iter() + .find(|field| field.expr.hir_id == expr.hir_id && field.is_shorthand) + { + // This is a field literal + Some(field) => { + sugg.push((field.ident.span.shrink_to_lo(), format!("{}: ", field.ident))); + } + // Likely a field was meant, but this field wasn't found. Do not suggest anything. + None => return false, + } + }; + + if let hir::ExprKind::Call(path, args) = &expr.kind + && let (hir::ExprKind::Path(hir::QPath::TypeRelative(base_ty, path_segment)), 1) = + (&path.kind, args.len()) + // `expr` is a conversion like `u32::from(val)`, do not suggest anything (#63697). + && let (hir::TyKind::Path(hir::QPath::Resolved(None, base_ty_path)), sym::from) = + (&base_ty.kind, path_segment.ident.name) + { + if let Some(ident) = &base_ty_path.segments.iter().map(|s| s.ident).next() { + match ident.name { + sym::i128 + | sym::i64 + | sym::i32 + | sym::i16 + | sym::i8 + | sym::u128 + | sym::u64 + | sym::u32 + | sym::u16 + | sym::u8 + | sym::isize + | sym::usize + if base_ty_path.segments.len() == 1 => + { + return false; + } + _ => {} + } + } + } + + let msg = format!( + "you can convert {} `{}` to {} `{}`", + checked_ty.kind().article(), + checked_ty, + expected_ty.kind().article(), + expected_ty, + ); + let cast_msg = format!( + "you can cast {} `{}` to {} `{}`", + checked_ty.kind().article(), + checked_ty, + expected_ty.kind().article(), + expected_ty, + ); + let lit_msg = format!( + "change the type of the numeric literal from `{checked_ty}` to `{expected_ty}`", + ); + + let close_paren = if expr.precedence().order() < PREC_POSTFIX { + sugg.push((expr.span.shrink_to_lo(), "(".to_string())); + ")" + } else { + "" + }; + + let mut cast_suggestion = sugg.clone(); + cast_suggestion.push((expr.span.shrink_to_hi(), format!("{close_paren} as {expected_ty}"))); + let mut into_suggestion = sugg.clone(); + into_suggestion.push((expr.span.shrink_to_hi(), format!("{close_paren}.into()"))); + let mut suffix_suggestion = sugg.clone(); + suffix_suggestion.push(( + if matches!( + (&expected_ty.kind(), &checked_ty.kind()), + (ty::Int(_) | ty::Uint(_), ty::Float(_)) + ) { + // Remove fractional part from literal, for example `42.0f32` into `42` + let src = src.trim_end_matches(&checked_ty.to_string()); + let len = src.split('.').next().unwrap().len(); + expr.span.with_lo(expr.span.lo() + BytePos(len as u32)) + } else { + let len = src.trim_end_matches(&checked_ty.to_string()).len(); + expr.span.with_lo(expr.span.lo() + BytePos(len as u32)) + }, + if expr.precedence().order() < PREC_POSTFIX { + // Readd `)` + format!("{expected_ty})") + } else { + expected_ty.to_string() + }, + )); + let literal_is_ty_suffixed = |expr: &hir::Expr<'_>| { + if let hir::ExprKind::Lit(lit) = &expr.kind { lit.node.is_suffixed() } else { false } + }; + let is_negative_int = + |expr: &hir::Expr<'_>| matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Neg, ..)); + let is_uint = |ty: Ty<'_>| matches!(ty.kind(), ty::Uint(..)); + + let in_const_context = self.tcx.hir().is_inside_const_context(expr.hir_id); + + let suggest_fallible_into_or_lhs_from = + |err: &mut Diagnostic, exp_to_found_is_fallible: bool| { + // If we know the expression the expected type is derived from, we might be able + // to suggest a widening conversion rather than a narrowing one (which may + // panic). For example, given x: u8 and y: u32, if we know the span of "x", + // x > y + // can be given the suggestion "u32::from(x) > y" rather than + // "x > y.try_into().unwrap()". + let lhs_expr_and_src = expected_ty_expr.and_then(|expr| { + self.tcx + .sess + .source_map() + .span_to_snippet(expr.span) + .ok() + .map(|src| (expr, src)) + }); + let (msg, suggestion) = if let (Some((lhs_expr, lhs_src)), false) = + (lhs_expr_and_src, exp_to_found_is_fallible) + { + let msg = format!( + "you can convert `{lhs_src}` from `{expected_ty}` to `{checked_ty}`, matching the type of `{src}`", + ); + let suggestion = vec![ + (lhs_expr.span.shrink_to_lo(), format!("{checked_ty}::from(")), + (lhs_expr.span.shrink_to_hi(), ")".to_string()), + ]; + (msg, suggestion) + } else { + let msg = format!("{msg} and panic if the converted value doesn't fit"); + let mut suggestion = sugg.clone(); + suggestion.push(( + expr.span.shrink_to_hi(), + format!("{close_paren}.try_into().unwrap()"), + )); + (msg, suggestion) + }; + err.multipart_suggestion_verbose( + &msg, + suggestion, + Applicability::MachineApplicable, + ); + }; + + let suggest_to_change_suffix_or_into = + |err: &mut Diagnostic, + found_to_exp_is_fallible: bool, + exp_to_found_is_fallible: bool| { + let exp_is_lhs = + expected_ty_expr.map(|e| self.tcx.hir().is_lhs(e.hir_id)).unwrap_or(false); + + if exp_is_lhs { + return; + } + + let always_fallible = found_to_exp_is_fallible + && (exp_to_found_is_fallible || expected_ty_expr.is_none()); + let msg = if literal_is_ty_suffixed(expr) { + &lit_msg + } else if always_fallible && (is_negative_int(expr) && is_uint(expected_ty)) { + // We now know that converting either the lhs or rhs is fallible. Before we + // suggest a fallible conversion, check if the value can never fit in the + // expected type. + let msg = format!("`{src}` cannot fit into type `{expected_ty}`"); + err.note(&msg); + return; + } else if in_const_context { + // Do not recommend `into` or `try_into` in const contexts. + return; + } else if found_to_exp_is_fallible { + return suggest_fallible_into_or_lhs_from(err, exp_to_found_is_fallible); + } else { + &msg + }; + let suggestion = if literal_is_ty_suffixed(expr) { + suffix_suggestion.clone() + } else { + into_suggestion.clone() + }; + err.multipart_suggestion_verbose(msg, suggestion, Applicability::MachineApplicable); + }; + + match (&expected_ty.kind(), &checked_ty.kind()) { + (&ty::Int(ref exp), &ty::Int(ref found)) => { + let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width()) + { + (Some(exp), Some(found)) if exp < found => (true, false), + (Some(exp), Some(found)) if exp > found => (false, true), + (None, Some(8 | 16)) => (false, true), + (Some(8 | 16), None) => (true, false), + (None, _) | (_, None) => (true, true), + _ => (false, false), + }; + suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible); + true + } + (&ty::Uint(ref exp), &ty::Uint(ref found)) => { + let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width()) + { + (Some(exp), Some(found)) if exp < found => (true, false), + (Some(exp), Some(found)) if exp > found => (false, true), + (None, Some(8 | 16)) => (false, true), + (Some(8 | 16), None) => (true, false), + (None, _) | (_, None) => (true, true), + _ => (false, false), + }; + suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible); + true + } + (&ty::Int(exp), &ty::Uint(found)) => { + let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width()) + { + (Some(exp), Some(found)) if found < exp => (false, true), + (None, Some(8)) => (false, true), + _ => (true, true), + }; + suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible); + true + } + (&ty::Uint(exp), &ty::Int(found)) => { + let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width()) + { + (Some(exp), Some(found)) if found > exp => (true, false), + (Some(8), None) => (true, false), + _ => (true, true), + }; + suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible); + true + } + (&ty::Float(ref exp), &ty::Float(ref found)) => { + if found.bit_width() < exp.bit_width() { + suggest_to_change_suffix_or_into(err, false, true); + } else if literal_is_ty_suffixed(expr) { + err.multipart_suggestion_verbose( + &lit_msg, + suffix_suggestion, + Applicability::MachineApplicable, + ); + } else if can_cast { + // Missing try_into implementation for `f64` to `f32` + err.multipart_suggestion_verbose( + &format!("{cast_msg}, producing the closest possible value"), + cast_suggestion, + Applicability::MaybeIncorrect, // lossy conversion + ); + } + true + } + (&ty::Uint(_) | &ty::Int(_), &ty::Float(_)) => { + if literal_is_ty_suffixed(expr) { + err.multipart_suggestion_verbose( + &lit_msg, + suffix_suggestion, + Applicability::MachineApplicable, + ); + } else if can_cast { + // Missing try_into implementation for `{float}` to `{integer}` + err.multipart_suggestion_verbose( + &format!("{msg}, rounding the float towards zero"), + cast_suggestion, + Applicability::MaybeIncorrect, // lossy conversion + ); + } + true + } + (&ty::Float(ref exp), &ty::Uint(ref found)) => { + // if `found` is `None` (meaning found is `usize`), don't suggest `.into()` + if exp.bit_width() > found.bit_width().unwrap_or(256) { + err.multipart_suggestion_verbose( + &format!( + "{msg}, producing the floating point representation of the integer", + ), + into_suggestion, + Applicability::MachineApplicable, + ); + } else if literal_is_ty_suffixed(expr) { + err.multipart_suggestion_verbose( + &lit_msg, + suffix_suggestion, + Applicability::MachineApplicable, + ); + } else { + // Missing try_into implementation for `{integer}` to `{float}` + err.multipart_suggestion_verbose( + &format!( + "{cast_msg}, producing the floating point representation of the integer, \ + rounded if necessary", + ), + cast_suggestion, + Applicability::MaybeIncorrect, // lossy conversion + ); + } + true + } + (&ty::Float(ref exp), &ty::Int(ref found)) => { + // if `found` is `None` (meaning found is `isize`), don't suggest `.into()` + if exp.bit_width() > found.bit_width().unwrap_or(256) { + err.multipart_suggestion_verbose( + &format!( + "{}, producing the floating point representation of the integer", + &msg, + ), + into_suggestion, + Applicability::MachineApplicable, + ); + } else if literal_is_ty_suffixed(expr) { + err.multipart_suggestion_verbose( + &lit_msg, + suffix_suggestion, + Applicability::MachineApplicable, + ); + } else { + // Missing try_into implementation for `{integer}` to `{float}` + err.multipart_suggestion_verbose( + &format!( + "{}, producing the floating point representation of the integer, \ + rounded if necessary", + &msg, + ), + cast_suggestion, + Applicability::MaybeIncorrect, // lossy conversion + ); + } + true + } + ( + &ty::Uint(ty::UintTy::U32 | ty::UintTy::U64 | ty::UintTy::U128) + | &ty::Int(ty::IntTy::I32 | ty::IntTy::I64 | ty::IntTy::I128), + &ty::Char, + ) => { + err.multipart_suggestion_verbose( + &format!("{cast_msg}, since a `char` always occupies 4 bytes"), + cast_suggestion, + Applicability::MachineApplicable, + ); + true + } + _ => false, + } + } + + // Report the type inferred by the return statement. + fn report_closure_inferred_return_type(&self, err: &mut Diagnostic, expected: Ty<'tcx>) { + if let Some(sp) = self.ret_coercion_span.get() + // If the closure has an explicit return type annotation, or if + // the closure's return type has been inferred from outside + // requirements (such as an Fn* trait bound), then a type error + // may occur at the first return expression we see in the closure + // (if it conflicts with the declared return type). Skip adding a + // note in this case, since it would be incorrect. + && !self.return_type_pre_known + { + err.span_note( + sp, + &format!( + "return type inferred to be `{}` here", + self.resolve_vars_if_possible(expected) + ), + ); + } + } +} diff --git a/compiler/rustc_typeck/src/check/diverges.rs b/compiler/rustc_typeck/src/check/diverges.rs new file mode 100644 index 000000000..963a93a95 --- /dev/null +++ b/compiler/rustc_typeck/src/check/diverges.rs @@ -0,0 +1,78 @@ +use rustc_span::source_map::DUMMY_SP; +use rustc_span::{self, Span}; +use std::{cmp, ops}; + +/// Tracks whether executing a node may exit normally (versus +/// return/break/panic, which "diverge", leaving dead code in their +/// wake). Tracked semi-automatically (through type variables marked +/// as diverging), with some manual adjustments for control-flow +/// primitives (approximating a CFG). +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum Diverges { + /// Potentially unknown, some cases converge, + /// others require a CFG to determine them. + Maybe, + + /// Definitely known to diverge and therefore + /// not reach the next sibling or its parent. + Always { + /// The `Span` points to the expression + /// that caused us to diverge + /// (e.g. `return`, `break`, etc). + span: Span, + /// In some cases (e.g. a `match` expression + /// where all arms diverge), we may be + /// able to provide a more informative + /// message to the user. + /// If this is `None`, a default message + /// will be generated, which is suitable + /// for most cases. + custom_note: Option<&'static str>, + }, + + /// Same as `Always` but with a reachability + /// warning already emitted. + WarnedAlways, +} + +// Convenience impls for combining `Diverges`. + +impl ops::BitAnd for Diverges { + type Output = Self; + fn bitand(self, other: Self) -> Self { + cmp::min(self, other) + } +} + +impl ops::BitOr for Diverges { + type Output = Self; + fn bitor(self, other: Self) -> Self { + cmp::max(self, other) + } +} + +impl ops::BitAndAssign for Diverges { + fn bitand_assign(&mut self, other: Self) { + *self = *self & other; + } +} + +impl ops::BitOrAssign for Diverges { + fn bitor_assign(&mut self, other: Self) { + *self = *self | other; + } +} + +impl Diverges { + /// Creates a `Diverges::Always` with the provided `span` and the default note message. + pub(super) fn always(span: Span) -> Diverges { + Diverges::Always { span, custom_note: None } + } + + pub(super) fn is_always(self) -> bool { + // Enum comparison ignores the + // contents of fields, so we just + // fill them in with garbage here. + self >= Diverges::Always { span: DUMMY_SP, custom_note: None } + } +} diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_typeck/src/check/dropck.rs new file mode 100644 index 000000000..321064ec0 --- /dev/null +++ b/compiler/rustc_typeck/src/check/dropck.rs @@ -0,0 +1,327 @@ +// FIXME(@lcnr): Move this module out of `rustc_typeck`. +// +// We don't do any drop checking during hir typeck. +use crate::hir::def_id::{DefId, LocalDefId}; +use rustc_errors::{struct_span_err, ErrorGuaranteed}; +use rustc_middle::ty::error::TypeError; +use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation}; +use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::util::IgnoreRegions; +use rustc_middle::ty::{self, Predicate, Ty, TyCtxt}; + +/// This function confirms that the `Drop` implementation identified by +/// `drop_impl_did` is not any more specialized than the type it is +/// attached to (Issue #8142). +/// +/// This means: +/// +/// 1. The self type must be nominal (this is already checked during +/// coherence), +/// +/// 2. The generic region/type parameters of the impl's self type must +/// all be parameters of the Drop impl itself (i.e., no +/// specialization like `impl Drop for Foo`), and, +/// +/// 3. Any bounds on the generic parameters must be reflected in the +/// struct/enum definition for the nominal type itself (i.e. +/// cannot do `struct S; impl Drop for S { ... }`). +/// +pub fn check_drop_impl(tcx: TyCtxt<'_>, drop_impl_did: DefId) -> Result<(), ErrorGuaranteed> { + let dtor_self_type = tcx.type_of(drop_impl_did); + let dtor_predicates = tcx.predicates_of(drop_impl_did); + match dtor_self_type.kind() { + ty::Adt(adt_def, self_to_impl_substs) => { + ensure_drop_params_and_item_params_correspond( + tcx, + drop_impl_did.expect_local(), + adt_def.did(), + self_to_impl_substs, + )?; + + ensure_drop_predicates_are_implied_by_item_defn( + tcx, + dtor_predicates, + adt_def.did().expect_local(), + self_to_impl_substs, + ) + } + _ => { + // Destructors only work on nominal types. This was + // already checked by coherence, but compilation may + // not have been terminated. + let span = tcx.def_span(drop_impl_did); + let reported = tcx.sess.delay_span_bug( + span, + &format!("should have been rejected by coherence check: {dtor_self_type}"), + ); + Err(reported) + } + } +} + +fn ensure_drop_params_and_item_params_correspond<'tcx>( + tcx: TyCtxt<'tcx>, + drop_impl_did: LocalDefId, + self_type_did: DefId, + drop_impl_substs: SubstsRef<'tcx>, +) -> Result<(), ErrorGuaranteed> { + let Err(arg) = tcx.uses_unique_generic_params(drop_impl_substs, IgnoreRegions::No) else { + return Ok(()) + }; + + let drop_impl_span = tcx.def_span(drop_impl_did); + let item_span = tcx.def_span(self_type_did); + let self_descr = tcx.def_kind(self_type_did).descr(self_type_did); + let mut err = + struct_span_err!(tcx.sess, drop_impl_span, E0366, "`Drop` impls cannot be specialized"); + match arg { + ty::util::NotUniqueParam::DuplicateParam(arg) => { + err.note(&format!("`{arg}` is mentioned multiple times")) + } + ty::util::NotUniqueParam::NotParam(arg) => { + err.note(&format!("`{arg}` is not a generic parameter")) + } + }; + err.span_note( + item_span, + &format!( + "use the same sequence of generic lifetime, type and const parameters \ + as the {self_descr} definition", + ), + ); + Err(err.emit()) +} + +/// Confirms that every predicate imposed by dtor_predicates is +/// implied by assuming the predicates attached to self_type_did. +fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( + tcx: TyCtxt<'tcx>, + dtor_predicates: ty::GenericPredicates<'tcx>, + self_type_did: LocalDefId, + self_to_impl_substs: SubstsRef<'tcx>, +) -> Result<(), ErrorGuaranteed> { + let mut result = Ok(()); + + // Here is an example, analogous to that from + // `compare_impl_method`. + // + // Consider a struct type: + // + // struct Type<'c, 'b:'c, 'a> { + // x: &'a Contents // (contents are irrelevant; + // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.) + // } + // + // and a Drop impl: + // + // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> { + // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y) + // } + // + // We start out with self_to_impl_substs, that maps the generic + // parameters of Type to that of the Drop impl. + // + // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x} + // + // Applying this to the predicates (i.e., assumptions) provided by the item + // definition yields the instantiated assumptions: + // + // ['y : 'z] + // + // We then check all of the predicates of the Drop impl: + // + // ['y:'z, 'x:'y] + // + // and ensure each is in the list of instantiated + // assumptions. Here, `'y:'z` is present, but `'x:'y` is + // absent. So we report an error that the Drop impl injected a + // predicate that is not present on the struct definition. + + // We can assume the predicates attached to struct/enum definition + // hold. + let generic_assumptions = tcx.predicates_of(self_type_did); + + let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); + let assumptions_in_impl_context = assumptions_in_impl_context.predicates; + + let self_param_env = tcx.param_env(self_type_did); + + // An earlier version of this code attempted to do this checking + // via the traits::fulfill machinery. However, it ran into trouble + // since the fulfill machinery merely turns outlives-predicates + // 'a:'b and T:'b into region inference constraints. It is simpler + // just to look for all the predicates directly. + + assert_eq!(dtor_predicates.parent, None); + for &(predicate, predicate_sp) in dtor_predicates.predicates { + // (We do not need to worry about deep analysis of type + // expressions etc because the Drop impls are already forced + // to take on a structure that is roughly an alpha-renaming of + // the generic parameters of the item definition.) + + // This path now just checks *all* predicates via an instantiation of + // the `SimpleEqRelation`, which simply forwards to the `relate` machinery + // after taking care of anonymizing late bound regions. + // + // However, it may be more efficient in the future to batch + // the analysis together via the fulfill (see comment above regarding + // the usage of the fulfill machinery), rather than the + // repeated `.iter().any(..)` calls. + + // This closure is a more robust way to check `Predicate` equality + // than simple `==` checks (which were the previous implementation). + // It relies on `ty::relate` for `TraitPredicate`, `ProjectionPredicate`, + // `ConstEvaluatable` and `TypeOutlives` (which implement the Relate trait), + // while delegating on simple equality for the other `Predicate`. + // This implementation solves (Issue #59497) and (Issue #58311). + // It is unclear to me at the moment whether the approach based on `relate` + // could be extended easily also to the other `Predicate`. + let predicate_matches_closure = |p: Predicate<'tcx>| { + let mut relator: SimpleEqRelation<'tcx> = SimpleEqRelation::new(tcx, self_param_env); + let predicate = predicate.kind(); + let p = p.kind(); + match (predicate.skip_binder(), p.skip_binder()) { + (ty::PredicateKind::Trait(a), ty::PredicateKind::Trait(b)) => { + // Since struct predicates cannot have ~const, project the impl predicate + // onto one that ignores the constness. This is equivalent to saying that + // we match a `Trait` bound on the struct with a `Trait` or `~const Trait` + // in the impl. + let non_const_a = + ty::TraitPredicate { constness: ty::BoundConstness::NotConst, ..a }; + relator.relate(predicate.rebind(non_const_a), p.rebind(b)).is_ok() + } + (ty::PredicateKind::Projection(a), ty::PredicateKind::Projection(b)) => { + relator.relate(predicate.rebind(a), p.rebind(b)).is_ok() + } + ( + ty::PredicateKind::ConstEvaluatable(a), + ty::PredicateKind::ConstEvaluatable(b), + ) => tcx.try_unify_abstract_consts(self_param_env.and((a, b))), + ( + ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)), + ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)), + ) => { + relator.relate(predicate.rebind(ty_a), p.rebind(ty_b)).is_ok() + && relator.relate(predicate.rebind(lt_a), p.rebind(lt_b)).is_ok() + } + (ty::PredicateKind::WellFormed(arg_a), ty::PredicateKind::WellFormed(arg_b)) => { + relator.relate(predicate.rebind(arg_a), p.rebind(arg_b)).is_ok() + } + _ => predicate == p, + } + }; + + if !assumptions_in_impl_context.iter().copied().any(predicate_matches_closure) { + let item_span = tcx.def_span(self_type_did); + let self_descr = tcx.def_kind(self_type_did).descr(self_type_did.to_def_id()); + let reported = struct_span_err!( + tcx.sess, + predicate_sp, + E0367, + "`Drop` impl requires `{predicate}` but the {self_descr} it is implemented for does not", + ) + .span_note(item_span, "the implementor must specify the same requirement") + .emit(); + result = Err(reported); + } + } + + result +} + +// This is an implementation of the TypeRelation trait with the +// aim of simply comparing for equality (without side-effects). +// It is not intended to be used anywhere else other than here. +pub(crate) struct SimpleEqRelation<'tcx> { + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, +} + +impl<'tcx> SimpleEqRelation<'tcx> { + fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> SimpleEqRelation<'tcx> { + SimpleEqRelation { tcx, param_env } + } +} + +impl<'tcx> TypeRelation<'tcx> for SimpleEqRelation<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } + + fn param_env(&self) -> ty::ParamEnv<'tcx> { + self.param_env + } + + fn tag(&self) -> &'static str { + "dropck::SimpleEqRelation" + } + + fn a_is_expected(&self) -> bool { + true + } + + fn relate_with_variance>( + &mut self, + _: ty::Variance, + _info: ty::VarianceDiagInfo<'tcx>, + a: T, + b: T, + ) -> RelateResult<'tcx, T> { + // Here we ignore variance because we require drop impl's types + // to be *exactly* the same as to the ones in the struct definition. + self.relate(a, b) + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + debug!("SimpleEqRelation::tys(a={:?}, b={:?})", a, b); + ty::relate::super_relate_tys(self, a, b) + } + + fn regions( + &mut self, + a: ty::Region<'tcx>, + b: ty::Region<'tcx>, + ) -> RelateResult<'tcx, ty::Region<'tcx>> { + debug!("SimpleEqRelation::regions(a={:?}, b={:?})", a, b); + + // We can just equate the regions because LBRs have been + // already anonymized. + if a == b { + Ok(a) + } else { + // I'm not sure is this `TypeError` is the right one, but + // it should not matter as it won't be checked (the dropck + // will emit its own, more informative and higher-level errors + // in case anything goes wrong). + Err(TypeError::RegionsPlaceholderMismatch) + } + } + + fn consts( + &mut self, + a: ty::Const<'tcx>, + b: ty::Const<'tcx>, + ) -> RelateResult<'tcx, ty::Const<'tcx>> { + debug!("SimpleEqRelation::consts(a={:?}, b={:?})", a, b); + ty::relate::super_relate_consts(self, a, b) + } + + fn binders( + &mut self, + a: ty::Binder<'tcx, T>, + b: ty::Binder<'tcx, T>, + ) -> RelateResult<'tcx, ty::Binder<'tcx, T>> + where + T: Relate<'tcx>, + { + debug!("SimpleEqRelation::binders({:?}: {:?}", a, b); + + // Anonymizing the LBRs is necessary to solve (Issue #59497). + // After we do so, it should be totally fine to skip the binders. + let anon_a = self.tcx.anonymize_bound_vars(a); + let anon_b = self.tcx.anonymize_bound_vars(b); + self.relate(anon_a.skip_binder(), anon_b.skip_binder())?; + + Ok(a) + } +} diff --git a/compiler/rustc_typeck/src/check/expectation.rs b/compiler/rustc_typeck/src/check/expectation.rs new file mode 100644 index 000000000..e9e810344 --- /dev/null +++ b/compiler/rustc_typeck/src/check/expectation.rs @@ -0,0 +1,122 @@ +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_middle::ty::{self, Ty}; +use rustc_span::{self, Span}; + +use super::Expectation::*; +use super::FnCtxt; + +/// When type-checking an expression, we propagate downward +/// whatever type hint we are able in the form of an `Expectation`. +#[derive(Copy, Clone, Debug)] +pub enum Expectation<'tcx> { + /// We know nothing about what type this expression should have. + NoExpectation, + + /// This expression should have the type given (or some subtype). + ExpectHasType(Ty<'tcx>), + + /// This expression will be cast to the `Ty`. + ExpectCastableToType(Ty<'tcx>), + + /// This rvalue expression will be wrapped in `&` or `Box` and coerced + /// to `&Ty` or `Box`, respectively. `Ty` is `[A]` or `Trait`. + ExpectRvalueLikeUnsized(Ty<'tcx>), + + IsLast(Span), +} + +impl<'a, 'tcx> Expectation<'tcx> { + // Disregard "castable to" expectations because they + // can lead us astray. Consider for example `if cond + // {22} else {c} as u8` -- if we propagate the + // "castable to u8" constraint to 22, it will pick the + // type 22u8, which is overly constrained (c might not + // be a u8). In effect, the problem is that the + // "castable to" expectation is not the tightest thing + // we can say, so we want to drop it in this case. + // The tightest thing we can say is "must unify with + // else branch". Note that in the case of a "has type" + // constraint, this limitation does not hold. + + // If the expected type is just a type variable, then don't use + // an expected type. Otherwise, we might write parts of the type + // when checking the 'then' block which are incompatible with the + // 'else' branch. + pub(super) fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> { + match *self { + ExpectHasType(ety) => { + let ety = fcx.shallow_resolve(ety); + if !ety.is_ty_var() { ExpectHasType(ety) } else { NoExpectation } + } + ExpectRvalueLikeUnsized(ety) => ExpectRvalueLikeUnsized(ety), + _ => NoExpectation, + } + } + + /// Provides an expectation for an rvalue expression given an *optional* + /// hint, which is not required for type safety (the resulting type might + /// be checked higher up, as is the case with `&expr` and `box expr`), but + /// is useful in determining the concrete type. + /// + /// The primary use case is where the expected type is a fat pointer, + /// like `&[isize]`. For example, consider the following statement: + /// + /// let x: &[isize] = &[1, 2, 3]; + /// + /// In this case, the expected type for the `&[1, 2, 3]` expression is + /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the + /// expectation `ExpectHasType([isize])`, that would be too strong -- + /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`. + /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced + /// to the type `&[isize]`. Therefore, we propagate this more limited hint, + /// which still is useful, because it informs integer literals and the like. + /// See the test case `test/ui/coerce-expect-unsized.rs` and #20169 + /// for examples of where this comes up,. + pub(super) fn rvalue_hint(fcx: &FnCtxt<'a, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> { + match fcx.tcx.struct_tail_without_normalization(ty).kind() { + ty::Slice(_) | ty::Str | ty::Dynamic(..) => ExpectRvalueLikeUnsized(ty), + _ => ExpectHasType(ty), + } + } + + // Resolves `expected` by a single level if it is a variable. If + // there is no expected type or resolution is not possible (e.g., + // no constraints yet present), just returns `self`. + fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> { + match self { + NoExpectation => NoExpectation, + ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(t)), + ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(t)), + ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(t)), + IsLast(sp) => IsLast(sp), + } + } + + pub(super) fn to_option(self, fcx: &FnCtxt<'a, 'tcx>) -> Option> { + match self.resolve(fcx) { + NoExpectation | IsLast(_) => None, + ExpectCastableToType(ty) | ExpectHasType(ty) | ExpectRvalueLikeUnsized(ty) => Some(ty), + } + } + + /// It sometimes happens that we want to turn an expectation into + /// a **hard constraint** (i.e., something that must be satisfied + /// for the program to type-check). `only_has_type` will return + /// such a constraint, if it exists. + pub(super) fn only_has_type(self, fcx: &FnCtxt<'a, 'tcx>) -> Option> { + match self { + ExpectHasType(ty) => Some(fcx.resolve_vars_if_possible(ty)), + NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) | IsLast(_) => { + None + } + } + } + + /// Like `only_has_type`, but instead of returning `None` if no + /// hard constraint exists, creates a fresh type variable. + pub(super) fn coercion_target_type(self, fcx: &FnCtxt<'a, 'tcx>, span: Span) -> Ty<'tcx> { + self.only_has_type(fcx).unwrap_or_else(|| { + fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span }) + }) + } +} diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs new file mode 100644 index 000000000..6e97b0bf2 --- /dev/null +++ b/compiler/rustc_typeck/src/check/expr.rs @@ -0,0 +1,2824 @@ +//! Type checking expressions. +//! +//! See `mod.rs` for more context on type checking in general. + +use crate::astconv::AstConv as _; +use crate::check::cast; +use crate::check::coercion::CoerceMany; +use crate::check::fatally_break_rust; +use crate::check::method::SelfSource; +use crate::check::report_unexpected_variant_res; +use crate::check::BreakableCtxt; +use crate::check::Diverges; +use crate::check::DynamicCoerceMany; +use crate::check::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation}; +use crate::check::FnCtxt; +use crate::check::Needs; +use crate::check::TupleArgumentsFlag::DontTupleArguments; +use crate::errors::{ + FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct, + YieldExprOutsideOfGenerator, +}; +use crate::type_error_struct; + +use super::suggest_call_constructor; +use crate::errors::{AddressOfTemporaryTaken, ReturnStmtOutsideOfFnBody, StructExprNonExhaustive}; +use rustc_ast as ast; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::stack::ensure_sufficient_stack; +use rustc_errors::{ + pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId, + EmissionGuarantee, ErrorGuaranteed, +}; +use rustc_hir as hir; +use rustc_hir::def::{CtorKind, DefKind, Res}; +use rustc_hir::def_id::DefId; +use rustc_hir::intravisit::Visitor; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{Closure, ExprKind, HirId, QPath}; +use rustc_infer::infer; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::InferOk; +use rustc_infer::traits::ObligationCause; +use rustc_middle::middle::stability; +use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase}; +use rustc_middle::ty::error::TypeError::FieldMisMatch; +use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TypeVisitable}; +use rustc_session::parse::feature_err; +use rustc_span::hygiene::DesugaringKind; +use rustc_span::lev_distance::find_best_match_for_name; +use rustc_span::source_map::{Span, Spanned}; +use rustc_span::symbol::{kw, sym, Ident, Symbol}; +use rustc_span::{BytePos, Pos}; +use rustc_target::spec::abi::Abi::RustIntrinsic; +use rustc_trait_selection::infer::InferCtxtExt; +use rustc_trait_selection::traits::{self, ObligationCauseCode}; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + fn check_expr_eq_type(&self, expr: &'tcx hir::Expr<'tcx>, expected: Ty<'tcx>) { + let ty = self.check_expr_with_hint(expr, expected); + self.demand_eqtype(expr.span, expected, ty); + } + + pub fn check_expr_has_type_or_error( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Ty<'tcx>, + extend_err: impl FnMut(&mut Diagnostic), + ) -> Ty<'tcx> { + self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected), extend_err) + } + + fn check_expr_meets_expectation_or_error( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + mut extend_err: impl FnMut(&mut Diagnostic), + ) -> Ty<'tcx> { + let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool); + let mut ty = self.check_expr_with_expectation(expr, expected); + + // While we don't allow *arbitrary* coercions here, we *do* allow + // coercions from ! to `expected`. + if ty.is_never() { + if let Some(adjustments) = self.typeck_results.borrow().adjustments().get(expr.hir_id) { + self.tcx().sess.delay_span_bug( + expr.span, + "expression with never type wound up being adjusted", + ); + return if let [Adjustment { kind: Adjust::NeverToAny, target }] = &adjustments[..] { + target.to_owned() + } else { + self.tcx().ty_error() + }; + } + + let adj_ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::AdjustmentType, + span: expr.span, + }); + self.apply_adjustments( + expr, + vec![Adjustment { kind: Adjust::NeverToAny, target: adj_ty }], + ); + ty = adj_ty; + } + + if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) { + let expr = expr.peel_drop_temps(); + self.suggest_deref_ref_or_into(&mut err, expr, expected_ty, ty, None); + extend_err(&mut err); + err.emit(); + } + ty + } + + pub(super) fn check_expr_coercable_to_type( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Ty<'tcx>, + expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + ) -> Ty<'tcx> { + let ty = self.check_expr_with_hint(expr, expected); + // checks don't need two phase + self.demand_coerce(expr, ty, expected, expected_ty_expr, AllowTwoPhase::No) + } + + pub(super) fn check_expr_with_hint( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Ty<'tcx>, + ) -> Ty<'tcx> { + self.check_expr_with_expectation(expr, ExpectHasType(expected)) + } + + fn check_expr_with_expectation_and_needs( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + needs: Needs, + ) -> Ty<'tcx> { + let ty = self.check_expr_with_expectation(expr, expected); + + // If the expression is used in a place whether mutable place is required + // e.g. LHS of assignment, perform the conversion. + if let Needs::MutPlace = needs { + self.convert_place_derefs_to_mutable(expr); + } + + ty + } + + pub(super) fn check_expr(&self, expr: &'tcx hir::Expr<'tcx>) -> Ty<'tcx> { + self.check_expr_with_expectation(expr, NoExpectation) + } + + pub(super) fn check_expr_with_needs( + &self, + expr: &'tcx hir::Expr<'tcx>, + needs: Needs, + ) -> Ty<'tcx> { + self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs) + } + + /// Invariant: + /// If an expression has any sub-expressions that result in a type error, + /// inspecting that expression's type with `ty.references_error()` will return + /// true. Likewise, if an expression is known to diverge, inspecting its + /// type with `ty::type_is_bot` will return true (n.b.: since Rust is + /// strict, _|_ can appear in the type of an expression that does not, + /// itself, diverge: for example, fn() -> _|_.) + /// Note that inspecting a type's structure *directly* may expose the fact + /// that there are actually multiple representations for `Error`, so avoid + /// that when err needs to be handled differently. + #[instrument(skip(self, expr), level = "debug")] + pub(super) fn check_expr_with_expectation( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + self.check_expr_with_expectation_and_args(expr, expected, &[]) + } + + /// Same as `check_expr_with_expectation`, but allows us to pass in the arguments of a + /// `ExprKind::Call` when evaluating its callee when it is an `ExprKind::Path`. + pub(super) fn check_expr_with_expectation_and_args( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + args: &'tcx [hir::Expr<'tcx>], + ) -> Ty<'tcx> { + if self.tcx().sess.verbose() { + // make this code only run with -Zverbose because it is probably slow + if let Ok(lint_str) = self.tcx.sess.source_map().span_to_snippet(expr.span) { + if !lint_str.contains('\n') { + debug!("expr text: {lint_str}"); + } else { + let mut lines = lint_str.lines(); + if let Some(line0) = lines.next() { + let remaining_lines = lines.count(); + debug!("expr text: {line0}"); + debug!("expr text: ...(and {remaining_lines} more lines)"); + } + } + } + } + + // True if `expr` is a `Try::from_ok(())` that is a result of desugaring a try block + // without the final expr (e.g. `try { return; }`). We don't want to generate an + // unreachable_code lint for it since warnings for autogenerated code are confusing. + let is_try_block_generated_unit_expr = match expr.kind { + ExprKind::Call(_, args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => { + args.len() == 1 && args[0].span.is_desugaring(DesugaringKind::TryBlock) + } + + _ => false, + }; + + // Warn for expressions after diverging siblings. + if !is_try_block_generated_unit_expr { + self.warn_if_unreachable(expr.hir_id, expr.span, "expression"); + } + + // Hide the outer diverging and has_errors flags. + let old_diverges = self.diverges.replace(Diverges::Maybe); + let old_has_errors = self.has_errors.replace(false); + + let ty = ensure_sufficient_stack(|| match &expr.kind { + hir::ExprKind::Path( + qpath @ hir::QPath::Resolved(..) | qpath @ hir::QPath::TypeRelative(..), + ) => self.check_expr_path(qpath, expr, args), + _ => self.check_expr_kind(expr, expected), + }); + + // Warn for non-block expressions with diverging children. + match expr.kind { + ExprKind::Block(..) + | ExprKind::If(..) + | ExprKind::Let(..) + | ExprKind::Loop(..) + | ExprKind::Match(..) => {} + // If `expr` is a result of desugaring the try block and is an ok-wrapped + // diverging expression (e.g. it arose from desugaring of `try { return }`), + // we skip issuing a warning because it is autogenerated code. + ExprKind::Call(..) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {} + ExprKind::Call(callee, _) => self.warn_if_unreachable(expr.hir_id, callee.span, "call"), + ExprKind::MethodCall(segment, ..) => { + self.warn_if_unreachable(expr.hir_id, segment.ident.span, "call") + } + _ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression"), + } + + // Any expression that produces a value of type `!` must have diverged + if ty.is_never() { + self.diverges.set(self.diverges.get() | Diverges::always(expr.span)); + } + + // Record the type, which applies it effects. + // We need to do this after the warning above, so that + // we don't warn for the diverging expression itself. + self.write_ty(expr.hir_id, ty); + + // Combine the diverging and has_error flags. + self.diverges.set(self.diverges.get() | old_diverges); + self.has_errors.set(self.has_errors.get() | old_has_errors); + + debug!("type of {} is...", self.tcx.hir().node_to_string(expr.hir_id)); + debug!("... {:?}, expected is {:?}", ty, expected); + + ty + } + + #[instrument(skip(self, expr), level = "debug")] + fn check_expr_kind( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + trace!("expr={:#?}", expr); + + let tcx = self.tcx; + match expr.kind { + ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected), + ExprKind::Lit(ref lit) => self.check_lit(&lit, expected), + ExprKind::Binary(op, lhs, rhs) => self.check_binop(expr, op, lhs, rhs, expected), + ExprKind::Assign(lhs, rhs, span) => { + self.check_expr_assign(expr, expected, lhs, rhs, span) + } + ExprKind::AssignOp(op, lhs, rhs) => { + self.check_binop_assign(expr, op, lhs, rhs, expected) + } + ExprKind::Unary(unop, oprnd) => self.check_expr_unary(unop, oprnd, expected, expr), + ExprKind::AddrOf(kind, mutbl, oprnd) => { + self.check_expr_addr_of(kind, mutbl, oprnd, expected, expr) + } + ExprKind::Path(QPath::LangItem(lang_item, _, hir_id)) => { + self.check_lang_item_path(lang_item, expr, hir_id) + } + ExprKind::Path(ref qpath) => self.check_expr_path(qpath, expr, &[]), + ExprKind::InlineAsm(asm) => { + // We defer some asm checks as we may not have resolved the input and output types yet (they may still be infer vars). + self.deferred_asm_checks.borrow_mut().push((asm, expr.hir_id)); + self.check_expr_asm(asm) + } + ExprKind::Break(destination, ref expr_opt) => { + self.check_expr_break(destination, expr_opt.as_deref(), expr) + } + ExprKind::Continue(destination) => { + if destination.target_id.is_ok() { + tcx.types.never + } else { + // There was an error; make type-check fail. + tcx.ty_error() + } + } + ExprKind::Ret(ref expr_opt) => self.check_expr_return(expr_opt.as_deref(), expr), + ExprKind::Let(let_expr) => self.check_expr_let(let_expr), + ExprKind::Loop(body, _, source, _) => { + self.check_expr_loop(body, source, expected, expr) + } + ExprKind::Match(discrim, arms, match_src) => { + self.check_match(expr, &discrim, arms, expected, match_src) + } + ExprKind::Closure(&Closure { capture_clause, fn_decl, body, movability, .. }) => { + self.check_expr_closure(expr, capture_clause, &fn_decl, body, movability, expected) + } + ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected), + ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected), + ExprKind::MethodCall(segment, args, _) => { + self.check_method_call(expr, segment, args, expected) + } + ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr), + ExprKind::Type(e, t) => { + let ty = self.to_ty_saving_user_provided_ty(&t); + self.check_expr_eq_type(&e, ty); + ty + } + ExprKind::If(cond, then_expr, opt_else_expr) => { + self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected) + } + ExprKind::DropTemps(e) => self.check_expr_with_expectation(e, expected), + ExprKind::Array(args) => self.check_expr_array(args, expected, expr), + ExprKind::ConstBlock(ref anon_const) => { + self.check_expr_const_block(anon_const, expected, expr) + } + ExprKind::Repeat(element, ref count) => { + self.check_expr_repeat(element, count, expected, expr) + } + ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr), + ExprKind::Struct(qpath, fields, ref base_expr) => { + self.check_expr_struct(expr, expected, qpath, fields, base_expr) + } + ExprKind::Field(base, field) => self.check_field(expr, &base, field), + ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr), + ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src), + hir::ExprKind::Err => tcx.ty_error(), + } + } + + fn check_expr_box(&self, expr: &'tcx hir::Expr<'tcx>, expected: Expectation<'tcx>) -> Ty<'tcx> { + let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| match ty.kind() { + ty::Adt(def, _) if def.is_box() => Expectation::rvalue_hint(self, ty.boxed_ty()), + _ => NoExpectation, + }); + let referent_ty = self.check_expr_with_expectation(expr, expected_inner); + self.require_type_is_sized(referent_ty, expr.span, traits::SizedBoxType); + self.tcx.mk_box(referent_ty) + } + + fn check_expr_unary( + &self, + unop: hir::UnOp, + oprnd: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + let expected_inner = match unop { + hir::UnOp::Not | hir::UnOp::Neg => expected, + hir::UnOp::Deref => NoExpectation, + }; + let mut oprnd_t = self.check_expr_with_expectation(&oprnd, expected_inner); + + if !oprnd_t.references_error() { + oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t); + match unop { + hir::UnOp::Deref => { + if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) { + oprnd_t = ty; + } else { + let mut err = type_error_struct!( + tcx.sess, + expr.span, + oprnd_t, + E0614, + "type `{oprnd_t}` cannot be dereferenced", + ); + let sp = tcx.sess.source_map().start_point(expr.span); + if let Some(sp) = + tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) + { + tcx.sess.parse_sess.expr_parentheses_needed(&mut err, *sp); + } + err.emit(); + oprnd_t = tcx.ty_error(); + } + } + hir::UnOp::Not => { + let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner); + // If it's builtin, we can reuse the type, this helps inference. + if !(oprnd_t.is_integral() || *oprnd_t.kind() == ty::Bool) { + oprnd_t = result; + } + } + hir::UnOp::Neg => { + let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner); + // If it's builtin, we can reuse the type, this helps inference. + if !oprnd_t.is_numeric() { + oprnd_t = result; + } + } + } + } + oprnd_t + } + + fn check_expr_addr_of( + &self, + kind: hir::BorrowKind, + mutbl: hir::Mutability, + oprnd: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| { + match ty.kind() { + ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { + if oprnd.is_syntactic_place_expr() { + // Places may legitimately have unsized types. + // For example, dereferences of a fat pointer and + // the last field of a struct can be unsized. + ExpectHasType(*ty) + } else { + Expectation::rvalue_hint(self, *ty) + } + } + _ => NoExpectation, + } + }); + let ty = + self.check_expr_with_expectation_and_needs(&oprnd, hint, Needs::maybe_mut_place(mutbl)); + + let tm = ty::TypeAndMut { ty, mutbl }; + match kind { + _ if tm.ty.references_error() => self.tcx.ty_error(), + hir::BorrowKind::Raw => { + self.check_named_place_expr(oprnd); + self.tcx.mk_ptr(tm) + } + hir::BorrowKind::Ref => { + // Note: at this point, we cannot say what the best lifetime + // is to use for resulting pointer. We want to use the + // shortest lifetime possible so as to avoid spurious borrowck + // errors. Moreover, the longest lifetime will depend on the + // precise details of the value whose address is being taken + // (and how long it is valid), which we don't know yet until + // type inference is complete. + // + // Therefore, here we simply generate a region variable. The + // region inferencer will then select a suitable value. + // Finally, borrowck will infer the value of the region again, + // this time with enough precision to check that the value + // whose address was taken can actually be made to live as long + // as it needs to live. + let region = self.next_region_var(infer::AddrOfRegion(expr.span)); + self.tcx.mk_ref(region, tm) + } + } + } + + /// Does this expression refer to a place that either: + /// * Is based on a local or static. + /// * Contains a dereference + /// Note that the adjustments for the children of `expr` should already + /// have been resolved. + fn check_named_place_expr(&self, oprnd: &'tcx hir::Expr<'tcx>) { + let is_named = oprnd.is_place_expr(|base| { + // Allow raw borrows if there are any deref adjustments. + // + // const VAL: (i32,) = (0,); + // const REF: &(i32,) = &(0,); + // + // &raw const VAL.0; // ERROR + // &raw const REF.0; // OK, same as &raw const (*REF).0; + // + // This is maybe too permissive, since it allows + // `let u = &raw const Box::new((1,)).0`, which creates an + // immediately dangling raw pointer. + self.typeck_results + .borrow() + .adjustments() + .get(base.hir_id) + .map_or(false, |x| x.iter().any(|adj| matches!(adj.kind, Adjust::Deref(_)))) + }); + if !is_named { + self.tcx.sess.emit_err(AddressOfTemporaryTaken { span: oprnd.span }); + } + } + + fn check_lang_item_path( + &self, + lang_item: hir::LangItem, + expr: &'tcx hir::Expr<'tcx>, + hir_id: Option, + ) -> Ty<'tcx> { + self.resolve_lang_item_path(lang_item, expr.span, expr.hir_id, hir_id).1 + } + + pub(crate) fn check_expr_path( + &self, + qpath: &'tcx hir::QPath<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + args: &'tcx [hir::Expr<'tcx>], + ) -> Ty<'tcx> { + let tcx = self.tcx; + let (res, opt_ty, segs) = + self.resolve_ty_and_res_fully_qualified_call(qpath, expr.hir_id, expr.span); + let ty = match res { + Res::Err => { + self.set_tainted_by_errors(); + tcx.ty_error() + } + Res::Def(DefKind::Ctor(_, CtorKind::Fictive), _) => { + report_unexpected_variant_res(tcx, res, qpath, expr.span); + tcx.ty_error() + } + _ => self.instantiate_value_path(segs, opt_ty, res, expr.span, expr.hir_id).0, + }; + + if let ty::FnDef(did, ..) = *ty.kind() { + let fn_sig = ty.fn_sig(tcx); + if tcx.fn_sig(did).abi() == RustIntrinsic && tcx.item_name(did) == sym::transmute { + let from = fn_sig.inputs().skip_binder()[0]; + let to = fn_sig.output().skip_binder(); + // We defer the transmute to the end of typeck, once all inference vars have + // been resolved or we errored. This is important as we can only check transmute + // on concrete types, but the output type may not be known yet (it would only + // be known if explicitly specified via turbofish). + self.deferred_transmute_checks.borrow_mut().push((from, to, expr.span)); + } + if !tcx.features().unsized_fn_params { + // We want to remove some Sized bounds from std functions, + // but don't want to expose the removal to stable Rust. + // i.e., we don't want to allow + // + // ```rust + // drop as fn(str); + // ``` + // + // to work in stable even if the Sized bound on `drop` is relaxed. + for i in 0..fn_sig.inputs().skip_binder().len() { + // We just want to check sizedness, so instead of introducing + // placeholder lifetimes with probing, we just replace higher lifetimes + // with fresh vars. + let span = args.get(i).map(|a| a.span).unwrap_or(expr.span); + let input = self.replace_bound_vars_with_fresh_vars( + span, + infer::LateBoundRegionConversionTime::FnCall, + fn_sig.input(i), + ); + self.require_type_is_sized_deferred( + input, + span, + traits::SizedArgumentType(None), + ); + } + } + // Here we want to prevent struct constructors from returning unsized types. + // There were two cases this happened: fn pointer coercion in stable + // and usual function call in presence of unsized_locals. + // Also, as we just want to check sizedness, instead of introducing + // placeholder lifetimes with probing, we just replace higher lifetimes + // with fresh vars. + let output = self.replace_bound_vars_with_fresh_vars( + expr.span, + infer::LateBoundRegionConversionTime::FnCall, + fn_sig.output(), + ); + self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType); + } + + // We always require that the type provided as the value for + // a type parameter outlives the moment of instantiation. + let substs = self.typeck_results.borrow().node_substs(expr.hir_id); + self.add_wf_bounds(substs, expr); + + ty + } + + fn check_expr_break( + &self, + destination: hir::Destination, + expr_opt: Option<&'tcx hir::Expr<'tcx>>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + if let Ok(target_id) = destination.target_id { + let (e_ty, cause); + if let Some(e) = expr_opt { + // If this is a break with a value, we need to type-check + // the expression. Get an expected type from the loop context. + let opt_coerce_to = { + // We should release `enclosing_breakables` before the `check_expr_with_hint` + // below, so can't move this block of code to the enclosing scope and share + // `ctxt` with the second `enclosing_breakables` borrow below. + let mut enclosing_breakables = self.enclosing_breakables.borrow_mut(); + match enclosing_breakables.opt_find_breakable(target_id) { + Some(ctxt) => ctxt.coerce.as_ref().map(|coerce| coerce.expected_ty()), + None => { + // Avoid ICE when `break` is inside a closure (#65383). + return tcx.ty_error_with_message( + expr.span, + "break was outside loop, but no error was emitted", + ); + } + } + }; + + // If the loop context is not a `loop { }`, then break with + // a value is illegal, and `opt_coerce_to` will be `None`. + // Just set expectation to error in that case. + let coerce_to = opt_coerce_to.unwrap_or_else(|| tcx.ty_error()); + + // Recurse without `enclosing_breakables` borrowed. + e_ty = self.check_expr_with_hint(e, coerce_to); + cause = self.misc(e.span); + } else { + // Otherwise, this is a break *without* a value. That's + // always legal, and is equivalent to `break ()`. + e_ty = tcx.mk_unit(); + cause = self.misc(expr.span); + } + + // Now that we have type-checked `expr_opt`, borrow + // the `enclosing_loops` field and let's coerce the + // type of `expr_opt` into what is expected. + let mut enclosing_breakables = self.enclosing_breakables.borrow_mut(); + let Some(ctxt) = enclosing_breakables.opt_find_breakable(target_id) else { + // Avoid ICE when `break` is inside a closure (#65383). + return tcx.ty_error_with_message( + expr.span, + "break was outside loop, but no error was emitted", + ); + }; + + if let Some(ref mut coerce) = ctxt.coerce { + if let Some(ref e) = expr_opt { + coerce.coerce(self, &cause, e, e_ty); + } else { + assert!(e_ty.is_unit()); + let ty = coerce.expected_ty(); + coerce.coerce_forced_unit( + self, + &cause, + &mut |mut err| { + self.suggest_mismatched_types_on_tail( + &mut err, expr, ty, e_ty, target_id, + ); + if let Some(val) = ty_kind_suggestion(ty) { + let label = destination + .label + .map(|l| format!(" {}", l.ident)) + .unwrap_or_else(String::new); + err.span_suggestion( + expr.span, + "give it a value of the expected type", + format!("break{label} {val}"), + Applicability::HasPlaceholders, + ); + } + }, + false, + ); + } + } else { + // If `ctxt.coerce` is `None`, we can just ignore + // the type of the expression. This is because + // either this was a break *without* a value, in + // which case it is always a legal type (`()`), or + // else an error would have been flagged by the + // `loops` pass for using break with an expression + // where you are not supposed to. + assert!(expr_opt.is_none() || self.tcx.sess.has_errors().is_some()); + } + + // If we encountered a `break`, then (no surprise) it may be possible to break from the + // loop... unless the value being returned from the loop diverges itself, e.g. + // `break return 5` or `break loop {}`. + ctxt.may_break |= !self.diverges.get().is_always(); + + // the type of a `break` is always `!`, since it diverges + tcx.types.never + } else { + // Otherwise, we failed to find the enclosing loop; + // this can only happen if the `break` was not + // inside a loop at all, which is caught by the + // loop-checking pass. + let err = self.tcx.ty_error_with_message( + expr.span, + "break was outside loop, but no error was emitted", + ); + + // We still need to assign a type to the inner expression to + // prevent the ICE in #43162. + if let Some(e) = expr_opt { + self.check_expr_with_hint(e, err); + + // ... except when we try to 'break rust;'. + // ICE this expression in particular (see #43162). + if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind { + if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust { + fatally_break_rust(self.tcx.sess); + } + } + } + + // There was an error; make type-check fail. + err + } + } + + fn check_expr_return( + &self, + expr_opt: Option<&'tcx hir::Expr<'tcx>>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + if self.ret_coercion.is_none() { + let mut err = ReturnStmtOutsideOfFnBody { + span: expr.span, + encl_body_span: None, + encl_fn_span: None, + }; + + let encl_item_id = self.tcx.hir().get_parent_item(expr.hir_id); + + if let Some(hir::Node::Item(hir::Item { + kind: hir::ItemKind::Fn(..), + span: encl_fn_span, + .. + })) + | Some(hir::Node::TraitItem(hir::TraitItem { + kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)), + span: encl_fn_span, + .. + })) + | Some(hir::Node::ImplItem(hir::ImplItem { + kind: hir::ImplItemKind::Fn(..), + span: encl_fn_span, + .. + })) = self.tcx.hir().find_by_def_id(encl_item_id) + { + // We are inside a function body, so reporting "return statement + // outside of function body" needs an explanation. + + let encl_body_owner_id = self.tcx.hir().enclosing_body_owner(expr.hir_id); + + // If this didn't hold, we would not have to report an error in + // the first place. + assert_ne!(encl_item_id, encl_body_owner_id); + + let encl_body_id = self.tcx.hir().body_owned_by(encl_body_owner_id); + let encl_body = self.tcx.hir().body(encl_body_id); + + err.encl_body_span = Some(encl_body.value.span); + err.encl_fn_span = Some(*encl_fn_span); + } + + self.tcx.sess.emit_err(err); + + if let Some(e) = expr_opt { + // We still have to type-check `e` (issue #86188), but calling + // `check_return_expr` only works inside fn bodies. + self.check_expr(e); + } + } else if let Some(e) = expr_opt { + if self.ret_coercion_span.get().is_none() { + self.ret_coercion_span.set(Some(e.span)); + } + self.check_return_expr(e, true); + } else { + let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut(); + if self.ret_coercion_span.get().is_none() { + self.ret_coercion_span.set(Some(expr.span)); + } + let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression); + if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) { + coercion.coerce_forced_unit( + self, + &cause, + &mut |db| { + let span = fn_decl.output.span(); + if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) { + db.span_label( + span, + format!("expected `{snippet}` because of this return type"), + ); + } + }, + true, + ); + } else { + coercion.coerce_forced_unit(self, &cause, &mut |_| (), true); + } + } + self.tcx.types.never + } + + /// `explicit_return` is `true` if we're checking an explicit `return expr`, + /// and `false` if we're checking a trailing expression. + pub(super) fn check_return_expr( + &self, + return_expr: &'tcx hir::Expr<'tcx>, + explicit_return: bool, + ) { + let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| { + span_bug!(return_expr.span, "check_return_expr called outside fn body") + }); + + let ret_ty = ret_coercion.borrow().expected_ty(); + let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty); + let mut span = return_expr.span; + // Use the span of the trailing expression for our cause, + // not the span of the entire function + if !explicit_return { + if let ExprKind::Block(body, _) = return_expr.kind && let Some(last_expr) = body.expr { + span = last_expr.span; + } + } + ret_coercion.borrow_mut().coerce( + self, + &self.cause(span, ObligationCauseCode::ReturnValue(return_expr.hir_id)), + return_expr, + return_expr_ty, + ); + + if self.return_type_has_opaque { + // Point any obligations that were registered due to opaque type + // inference at the return expression. + self.select_obligations_where_possible(false, |errors| { + self.point_at_return_for_opaque_ty_error(errors, span, return_expr_ty); + }); + } + } + + fn point_at_return_for_opaque_ty_error( + &self, + errors: &mut Vec>, + span: Span, + return_expr_ty: Ty<'tcx>, + ) { + // Don't point at the whole block if it's empty + if span == self.tcx.hir().span(self.body_id) { + return; + } + for err in errors { + let cause = &mut err.obligation.cause; + if let ObligationCauseCode::OpaqueReturnType(None) = cause.code() { + let new_cause = ObligationCause::new( + cause.span, + cause.body_id, + ObligationCauseCode::OpaqueReturnType(Some((return_expr_ty, span))), + ); + *cause = new_cause; + } + } + } + + pub(crate) fn check_lhs_assignable( + &self, + lhs: &'tcx hir::Expr<'tcx>, + err_code: &'static str, + op_span: Span, + adjust_err: impl FnOnce(&mut DiagnosticBuilder<'tcx, ErrorGuaranteed>), + ) { + if lhs.is_syntactic_place_expr() { + return; + } + + // FIXME: Make this use SessionDiagnostic once error codes can be dynamically set. + let mut err = self.tcx.sess.struct_span_err_with_code( + op_span, + "invalid left-hand side of assignment", + DiagnosticId::Error(err_code.into()), + ); + err.span_label(lhs.span, "cannot assign to this expression"); + + self.comes_from_while_condition(lhs.hir_id, |expr| { + err.span_suggestion_verbose( + expr.span.shrink_to_lo(), + "you might have meant to use pattern destructuring", + "let ", + Applicability::MachineApplicable, + ); + }); + + adjust_err(&mut err); + + err.emit(); + } + + // Check if an expression `original_expr_id` comes from the condition of a while loop, + // as opposed from the body of a while loop, which we can naively check by iterating + // parents until we find a loop... + pub(super) fn comes_from_while_condition( + &self, + original_expr_id: HirId, + then: impl FnOnce(&hir::Expr<'_>), + ) { + let mut parent = self.tcx.hir().get_parent_node(original_expr_id); + while let Some(node) = self.tcx.hir().find(parent) { + match node { + hir::Node::Expr(hir::Expr { + kind: + hir::ExprKind::Loop( + hir::Block { + expr: + Some(hir::Expr { + kind: + hir::ExprKind::Match(expr, ..) | hir::ExprKind::If(expr, ..), + .. + }), + .. + }, + _, + hir::LoopSource::While, + _, + ), + .. + }) => { + // Check if our original expression is a child of the condition of a while loop + let expr_is_ancestor = std::iter::successors(Some(original_expr_id), |id| { + self.tcx.hir().find_parent_node(*id) + }) + .take_while(|id| *id != parent) + .any(|id| id == expr.hir_id); + // if it is, then we have a situation like `while Some(0) = value.get(0) {`, + // where `while let` was more likely intended. + if expr_is_ancestor { + then(expr); + } + break; + } + hir::Node::Item(_) + | hir::Node::ImplItem(_) + | hir::Node::TraitItem(_) + | hir::Node::Crate(_) => break, + _ => { + parent = self.tcx.hir().get_parent_node(parent); + } + } + } + } + + // A generic function for checking the 'then' and 'else' clauses in an 'if' + // or 'if-else' expression. + fn check_then_else( + &self, + cond_expr: &'tcx hir::Expr<'tcx>, + then_expr: &'tcx hir::Expr<'tcx>, + opt_else_expr: Option<&'tcx hir::Expr<'tcx>>, + sp: Span, + orig_expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool, |_| {}); + + self.warn_if_unreachable( + cond_expr.hir_id, + then_expr.span, + "block in `if` or `while` expression", + ); + + let cond_diverges = self.diverges.get(); + self.diverges.set(Diverges::Maybe); + + let expected = orig_expected.adjust_for_branches(self); + let then_ty = self.check_expr_with_expectation(then_expr, expected); + let then_diverges = self.diverges.get(); + self.diverges.set(Diverges::Maybe); + + // We've already taken the expected type's preferences + // into account when typing the `then` branch. To figure + // out the initial shot at a LUB, we thus only consider + // `expected` if it represents a *hard* constraint + // (`only_has_type`); otherwise, we just go with a + // fresh type variable. + let coerce_to_ty = expected.coercion_target_type(self, sp); + let mut coerce: DynamicCoerceMany<'_> = CoerceMany::new(coerce_to_ty); + + coerce.coerce(self, &self.misc(sp), then_expr, then_ty); + + if let Some(else_expr) = opt_else_expr { + let else_ty = self.check_expr_with_expectation(else_expr, expected); + let else_diverges = self.diverges.get(); + + let opt_suggest_box_span = self.opt_suggest_box_span(else_ty, orig_expected); + let if_cause = self.if_cause( + sp, + cond_expr.span, + then_expr, + else_expr, + then_ty, + else_ty, + opt_suggest_box_span, + ); + + coerce.coerce(self, &if_cause, else_expr, else_ty); + + // We won't diverge unless both branches do (or the condition does). + self.diverges.set(cond_diverges | then_diverges & else_diverges); + } else { + self.if_fallback_coercion(sp, then_expr, &mut coerce); + + // If the condition is false we can't diverge. + self.diverges.set(cond_diverges); + } + + let result_ty = coerce.complete(self); + if cond_ty.references_error() { self.tcx.ty_error() } else { result_ty } + } + + /// Type check assignment expression `expr` of form `lhs = rhs`. + /// The expected type is `()` and is passed to the function for the purposes of diagnostics. + fn check_expr_assign( + &self, + expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + lhs: &'tcx hir::Expr<'tcx>, + rhs: &'tcx hir::Expr<'tcx>, + span: Span, + ) -> Ty<'tcx> { + let expected_ty = expected.coercion_target_type(self, expr.span); + if expected_ty == self.tcx.types.bool { + // The expected type is `bool` but this will result in `()` so we can reasonably + // say that the user intended to write `lhs == rhs` instead of `lhs = rhs`. + // The likely cause of this is `if foo = bar { .. }`. + let actual_ty = self.tcx.mk_unit(); + let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap(); + let lhs_ty = self.check_expr(&lhs); + let rhs_ty = self.check_expr(&rhs); + let (applicability, eq) = if self.can_coerce(rhs_ty, lhs_ty) { + (Applicability::MachineApplicable, true) + } else { + (Applicability::MaybeIncorrect, false) + }; + if !lhs.is_syntactic_place_expr() + && lhs.is_approximately_pattern() + && !matches!(lhs.kind, hir::ExprKind::Lit(_)) + { + // Do not suggest `if let x = y` as `==` is way more likely to be the intention. + let hir = self.tcx.hir(); + if let hir::Node::Expr(hir::Expr { kind: ExprKind::If { .. }, .. }) = + hir.get(hir.get_parent_node(hir.get_parent_node(expr.hir_id))) + { + err.span_suggestion_verbose( + expr.span.shrink_to_lo(), + "you might have meant to use pattern matching", + "let ", + applicability, + ); + }; + } + if eq { + err.span_suggestion_verbose( + span, + "you might have meant to compare for equality", + "==", + applicability, + ); + } + + // If the assignment expression itself is ill-formed, don't + // bother emitting another error + if lhs_ty.references_error() || rhs_ty.references_error() { + err.delay_as_bug() + } else { + err.emit(); + } + return self.tcx.ty_error(); + } + + let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace); + + let suggest_deref_binop = |err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>, + rhs_ty: Ty<'tcx>| { + if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) { + // Can only assign if the type is sized, so if `DerefMut` yields a type that is + // unsized, do not suggest dereferencing it. + let lhs_deref_ty_is_sized = self + .infcx + .type_implements_trait( + self.tcx.lang_items().sized_trait().unwrap(), + lhs_deref_ty, + ty::List::empty(), + self.param_env, + ) + .may_apply(); + if lhs_deref_ty_is_sized && self.can_coerce(rhs_ty, lhs_deref_ty) { + err.span_suggestion_verbose( + lhs.span.shrink_to_lo(), + "consider dereferencing here to assign to the mutably borrowed value", + "*", + Applicability::MachineApplicable, + ); + } + } + }; + + self.check_lhs_assignable(lhs, "E0070", span, |err| { + let rhs_ty = self.check_expr(&rhs); + suggest_deref_binop(err, rhs_ty); + }); + + // This is (basically) inlined `check_expr_coercable_to_type`, but we want + // to suggest an additional fixup here in `suggest_deref_binop`. + let rhs_ty = self.check_expr_with_hint(&rhs, lhs_ty); + if let (_, Some(mut diag)) = + self.demand_coerce_diag(rhs, rhs_ty, lhs_ty, Some(lhs), AllowTwoPhase::No) + { + suggest_deref_binop(&mut diag, rhs_ty); + diag.emit(); + } + + self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized); + + if lhs_ty.references_error() || rhs_ty.references_error() { + self.tcx.ty_error() + } else { + self.tcx.mk_unit() + } + } + + pub(super) fn check_expr_let(&self, let_expr: &'tcx hir::Let<'tcx>) -> Ty<'tcx> { + // for let statements, this is done in check_stmt + let init = let_expr.init; + self.warn_if_unreachable(init.hir_id, init.span, "block in `let` expression"); + // otherwise check exactly as a let statement + self.check_decl(let_expr.into()); + // but return a bool, for this is a boolean expression + self.tcx.types.bool + } + + fn check_expr_loop( + &self, + body: &'tcx hir::Block<'tcx>, + source: hir::LoopSource, + expected: Expectation<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let coerce = match source { + // you can only use break with a value from a normal `loop { }` + hir::LoopSource::Loop => { + let coerce_to = expected.coercion_target_type(self, body.span); + Some(CoerceMany::new(coerce_to)) + } + + hir::LoopSource::While | hir::LoopSource::ForLoop => None, + }; + + let ctxt = BreakableCtxt { + coerce, + may_break: false, // Will get updated if/when we find a `break`. + }; + + let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || { + self.check_block_no_value(&body); + }); + + if ctxt.may_break { + // No way to know whether it's diverging because + // of a `break` or an outer `break` or `return`. + self.diverges.set(Diverges::Maybe); + } + + // If we permit break with a value, then result type is + // the LUB of the breaks (possibly ! if none); else, it + // is nil. This makes sense because infinite loops + // (which would have type !) are only possible iff we + // permit break with a value [1]. + if ctxt.coerce.is_none() && !ctxt.may_break { + // [1] + self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break"); + } + ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit()) + } + + /// Checks a method call. + fn check_method_call( + &self, + expr: &'tcx hir::Expr<'tcx>, + segment: &hir::PathSegment<'_>, + args: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let rcvr = &args[0]; + let rcvr_t = self.check_expr(&rcvr); + // no need to check for bot/err -- callee does that + let rcvr_t = self.structurally_resolved_type(args[0].span, rcvr_t); + let span = segment.ident.span; + + let method = match self.lookup_method(rcvr_t, segment, span, expr, rcvr, args) { + Ok(method) => { + // We could add a "consider `foo::`" suggestion here, but I wasn't able to + // trigger this codepath causing `structurally_resolved_type` to emit an error. + + self.write_method_call(expr.hir_id, method); + Ok(method) + } + Err(error) => { + if segment.ident.name != kw::Empty { + if let Some(mut err) = self.report_method_error( + span, + rcvr_t, + segment.ident, + SelfSource::MethodCall(&args[0]), + error, + Some(args), + ) { + err.emit(); + } + } + Err(()) + } + }; + + // Call the generic checker. + self.check_method_argument_types( + span, + expr, + method, + &args[1..], + DontTupleArguments, + expected, + ) + } + + fn check_expr_cast( + &self, + e: &'tcx hir::Expr<'tcx>, + t: &'tcx hir::Ty<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + // Find the type of `e`. Supply hints based on the type we are casting to, + // if appropriate. + let t_cast = self.to_ty_saving_user_provided_ty(t); + let t_cast = self.resolve_vars_if_possible(t_cast); + let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast)); + let t_expr = self.resolve_vars_if_possible(t_expr); + + // Eagerly check for some obvious errors. + if t_expr.references_error() || t_cast.references_error() { + self.tcx.ty_error() + } else { + // Defer other checks until we're done type checking. + let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut(); + match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) { + Ok(cast_check) => { + debug!( + "check_expr_cast: deferring cast from {:?} to {:?}: {:?}", + t_cast, t_expr, cast_check, + ); + deferred_cast_checks.push(cast_check); + t_cast + } + Err(_) => self.tcx.ty_error(), + } + } + } + + fn check_expr_array( + &self, + args: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let element_ty = if !args.is_empty() { + let coerce_to = expected + .to_option(self) + .and_then(|uty| match *uty.kind() { + ty::Array(ty, _) | ty::Slice(ty) => Some(ty), + _ => None, + }) + .unwrap_or_else(|| { + self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span: expr.span, + }) + }); + let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args); + assert_eq!(self.diverges.get(), Diverges::Maybe); + for e in args { + let e_ty = self.check_expr_with_hint(e, coerce_to); + let cause = self.misc(e.span); + coerce.coerce(self, &cause, e, e_ty); + } + coerce.complete(self) + } else { + self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span: expr.span, + }) + }; + self.tcx.mk_array(element_ty, args.len() as u64) + } + + fn check_expr_const_block( + &self, + anon_const: &'tcx hir::AnonConst, + expected: Expectation<'tcx>, + _expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let body = self.tcx.hir().body(anon_const.body); + + // Create a new function context. + let fcx = FnCtxt::new(self, self.param_env.with_const(), body.value.hir_id); + crate::check::GatherLocalsVisitor::new(&fcx).visit_body(body); + + let ty = fcx.check_expr_with_expectation(&body.value, expected); + fcx.require_type_is_sized(ty, body.value.span, traits::ConstSized); + fcx.write_ty(anon_const.hir_id, ty); + ty + } + + fn check_expr_repeat( + &self, + element: &'tcx hir::Expr<'tcx>, + count: &'tcx hir::ArrayLen, + expected: Expectation<'tcx>, + _expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + let count = self.array_length_to_const(count); + + let uty = match expected { + ExpectHasType(uty) => match *uty.kind() { + ty::Array(ty, _) | ty::Slice(ty) => Some(ty), + _ => None, + }, + _ => None, + }; + + let (element_ty, t) = match uty { + Some(uty) => { + self.check_expr_coercable_to_type(&element, uty, None); + (uty, uty) + } + None => { + let ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: element.span, + }); + let element_ty = self.check_expr_has_type_or_error(&element, ty, |_| {}); + (element_ty, ty) + } + }; + + if element_ty.references_error() { + return tcx.ty_error(); + } + + self.check_repeat_element_needs_copy_bound(element, count, element_ty); + + tcx.mk_ty(ty::Array(t, count)) + } + + fn check_repeat_element_needs_copy_bound( + &self, + element: &hir::Expr<'_>, + count: ty::Const<'tcx>, + element_ty: Ty<'tcx>, + ) { + let tcx = self.tcx; + // Actual constants as the repeat element get inserted repeatedly instead of getting copied via Copy. + match &element.kind { + hir::ExprKind::ConstBlock(..) => return, + hir::ExprKind::Path(qpath) => { + let res = self.typeck_results.borrow().qpath_res(qpath, element.hir_id); + if let Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::AnonConst, _) = res + { + return; + } + } + _ => {} + } + // If someone calls a const fn, they can extract that call out into a separate constant (or a const + // block in the future), so we check that to tell them that in the diagnostic. Does not affect typeck. + let is_const_fn = match element.kind { + hir::ExprKind::Call(func, _args) => match *self.node_ty(func.hir_id).kind() { + ty::FnDef(def_id, _) => tcx.is_const_fn(def_id), + _ => false, + }, + _ => false, + }; + + // If the length is 0, we don't create any elements, so we don't copy any. If the length is 1, we + // don't copy that one element, we move it. Only check for Copy if the length is larger. + if count.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) { + let lang_item = self.tcx.require_lang_item(LangItem::Copy, None); + let code = traits::ObligationCauseCode::RepeatElementCopy { is_const_fn }; + self.require_type_meets(element_ty, element.span, code, lang_item); + } + } + + fn check_expr_tuple( + &self, + elts: &'tcx [hir::Expr<'tcx>], + expected: Expectation<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let flds = expected.only_has_type(self).and_then(|ty| { + let ty = self.resolve_vars_with_obligations(ty); + match ty.kind() { + ty::Tuple(flds) => Some(&flds[..]), + _ => None, + } + }); + + let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| match flds { + Some(fs) if i < fs.len() => { + let ety = fs[i]; + self.check_expr_coercable_to_type(&e, ety, None); + ety + } + _ => self.check_expr_with_expectation(&e, NoExpectation), + }); + let tuple = self.tcx.mk_tup(elt_ts_iter); + if tuple.references_error() { + self.tcx.ty_error() + } else { + self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized); + tuple + } + } + + fn check_expr_struct( + &self, + expr: &hir::Expr<'_>, + expected: Expectation<'tcx>, + qpath: &QPath<'_>, + fields: &'tcx [hir::ExprField<'tcx>], + base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>, + ) -> Ty<'tcx> { + // Find the relevant variant + let Some((variant, adt_ty)) = self.check_struct_path(qpath, expr.hir_id) else { + self.check_struct_fields_on_error(fields, base_expr); + return self.tcx.ty_error(); + }; + + // Prohibit struct expressions when non-exhaustive flag is set. + let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type"); + if !adt.did().is_local() && variant.is_field_list_non_exhaustive() { + self.tcx + .sess + .emit_err(StructExprNonExhaustive { span: expr.span, what: adt.variant_descr() }); + } + + self.check_expr_struct_fields( + adt_ty, + expected, + expr.hir_id, + qpath.span(), + variant, + fields, + base_expr, + expr.span, + ); + + self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized); + adt_ty + } + + fn check_expr_struct_fields( + &self, + adt_ty: Ty<'tcx>, + expected: Expectation<'tcx>, + expr_id: hir::HirId, + span: Span, + variant: &'tcx ty::VariantDef, + ast_fields: &'tcx [hir::ExprField<'tcx>], + base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>, + expr_span: Span, + ) { + let tcx = self.tcx; + + let expected_inputs = + self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty]); + let adt_ty_hint = if let Some(expected_inputs) = expected_inputs { + expected_inputs.get(0).cloned().unwrap_or(adt_ty) + } else { + adt_ty + }; + // re-link the regions that EIfEO can erase. + self.demand_eqtype(span, adt_ty_hint, adt_ty); + + let ty::Adt(adt, substs) = adt_ty.kind() else { + span_bug!(span, "non-ADT passed to check_expr_struct_fields"); + }; + let adt_kind = adt.adt_kind(); + + let mut remaining_fields = variant + .fields + .iter() + .enumerate() + .map(|(i, field)| (field.ident(tcx).normalize_to_macros_2_0(), (i, field))) + .collect::>(); + + let mut seen_fields = FxHashMap::default(); + + let mut error_happened = false; + + // Type-check each field. + for field in ast_fields { + let ident = tcx.adjust_ident(field.ident, variant.def_id); + let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) { + seen_fields.insert(ident, field.span); + self.write_field_index(field.hir_id, i); + + // We don't look at stability attributes on + // struct-like enums (yet...), but it's definitely not + // a bug to have constructed one. + if adt_kind != AdtKind::Enum { + tcx.check_stability(v_field.did, Some(expr_id), field.span, None); + } + + self.field_ty(field.span, v_field, substs) + } else { + error_happened = true; + if let Some(prev_span) = seen_fields.get(&ident) { + tcx.sess.emit_err(FieldMultiplySpecifiedInInitializer { + span: field.ident.span, + prev_span: *prev_span, + ident, + }); + } else { + self.report_unknown_field( + adt_ty, + variant, + field, + ast_fields, + adt.variant_descr(), + expr_span, + ); + } + + tcx.ty_error() + }; + + // Make sure to give a type to the field even if there's + // an error, so we can continue type-checking. + self.check_expr_coercable_to_type(&field.expr, field_type, None); + } + + // Make sure the programmer specified correct number of fields. + if adt_kind == AdtKind::Union { + if ast_fields.len() != 1 { + struct_span_err!( + tcx.sess, + span, + E0784, + "union expressions should have exactly one field", + ) + .emit(); + } + } + + // If check_expr_struct_fields hit an error, do not attempt to populate + // the fields with the base_expr. This could cause us to hit errors later + // when certain fields are assumed to exist that in fact do not. + if error_happened { + return; + } + + if let Some(base_expr) = base_expr { + // FIXME: We are currently creating two branches here in order to maintain + // consistency. But they should be merged as much as possible. + let fru_tys = if self.tcx.features().type_changing_struct_update { + if adt.is_struct() { + // Make some fresh substitutions for our ADT type. + let fresh_substs = self.fresh_substs_for_item(base_expr.span, adt.did()); + // We do subtyping on the FRU fields first, so we can + // learn exactly what types we expect the base expr + // needs constrained to be compatible with the struct + // type we expect from the expectation value. + let fru_tys = variant + .fields + .iter() + .map(|f| { + let fru_ty = self.normalize_associated_types_in( + expr_span, + self.field_ty(base_expr.span, f, fresh_substs), + ); + let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id); + if let Some(_) = remaining_fields.remove(&ident) { + let target_ty = self.field_ty(base_expr.span, f, substs); + let cause = self.misc(base_expr.span); + match self.at(&cause, self.param_env).sup(target_ty, fru_ty) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations) + } + Err(_) => { + // This should never happen, since we're just subtyping the + // remaining_fields, but it's fine to emit this, I guess. + self.report_mismatched_types( + &cause, + target_ty, + fru_ty, + FieldMisMatch(variant.name, ident.name), + ) + .emit(); + } + } + } + self.resolve_vars_if_possible(fru_ty) + }) + .collect(); + // The use of fresh substs that we have subtyped against + // our base ADT type's fields allows us to guide inference + // along so that, e.g. + // ``` + // MyStruct<'a, F1, F2, const C: usize> { + // f: F1, + // // Other fields that reference `'a`, `F2`, and `C` + // } + // + // let x = MyStruct { + // f: 1usize, + // ..other_struct + // }; + // ``` + // will have the `other_struct` expression constrained to + // `MyStruct<'a, _, F2, C>`, as opposed to just `_`... + // This is important to allow coercions to happen in + // `other_struct` itself. See `coerce-in-base-expr.rs`. + let fresh_base_ty = self.tcx.mk_adt(*adt, fresh_substs); + self.check_expr_has_type_or_error( + base_expr, + self.resolve_vars_if_possible(fresh_base_ty), + |_| {}, + ); + fru_tys + } else { + // Check the base_expr, regardless of a bad expected adt_ty, so we can get + // type errors on that expression, too. + self.check_expr(base_expr); + self.tcx + .sess + .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span }); + return; + } + } else { + self.check_expr_has_type_or_error(base_expr, adt_ty, |_| { + let base_ty = self.typeck_results.borrow().expr_ty(*base_expr); + let same_adt = match (adt_ty.kind(), base_ty.kind()) { + (ty::Adt(adt, _), ty::Adt(base_adt, _)) if adt == base_adt => true, + _ => false, + }; + if self.tcx.sess.is_nightly_build() && same_adt { + feature_err( + &self.tcx.sess.parse_sess, + sym::type_changing_struct_update, + base_expr.span, + "type changing struct updating is experimental", + ) + .emit(); + } + }); + match adt_ty.kind() { + ty::Adt(adt, substs) if adt.is_struct() => variant + .fields + .iter() + .map(|f| { + self.normalize_associated_types_in(expr_span, f.ty(self.tcx, substs)) + }) + .collect(), + _ => { + self.tcx + .sess + .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span }); + return; + } + } + }; + self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr_id, fru_tys); + } else if adt_kind != AdtKind::Union && !remaining_fields.is_empty() { + debug!(?remaining_fields); + let private_fields: Vec<&ty::FieldDef> = variant + .fields + .iter() + .filter(|field| { + !field.vis.is_accessible_from(tcx.parent_module(expr_id).to_def_id(), tcx) + }) + .collect(); + + if !private_fields.is_empty() { + self.report_private_fields(adt_ty, span, private_fields, ast_fields); + } else { + self.report_missing_fields( + adt_ty, + span, + remaining_fields, + variant, + ast_fields, + substs, + ); + } + } + } + + fn check_struct_fields_on_error( + &self, + fields: &'tcx [hir::ExprField<'tcx>], + base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>, + ) { + for field in fields { + self.check_expr(&field.expr); + } + if let Some(base) = *base_expr { + self.check_expr(&base); + } + } + + /// Report an error for a struct field expression when there are fields which aren't provided. + /// + /// ```text + /// error: missing field `you_can_use_this_field` in initializer of `foo::Foo` + /// --> src/main.rs:8:5 + /// | + /// 8 | foo::Foo {}; + /// | ^^^^^^^^ missing `you_can_use_this_field` + /// + /// error: aborting due to previous error + /// ``` + fn report_missing_fields( + &self, + adt_ty: Ty<'tcx>, + span: Span, + remaining_fields: FxHashMap, + variant: &'tcx ty::VariantDef, + ast_fields: &'tcx [hir::ExprField<'tcx>], + substs: SubstsRef<'tcx>, + ) { + let len = remaining_fields.len(); + + let mut displayable_field_names: Vec<&str> = + remaining_fields.keys().map(|ident| ident.as_str()).collect(); + // sorting &str primitives here, sort_unstable is ok + displayable_field_names.sort_unstable(); + + let mut truncated_fields_error = String::new(); + let remaining_fields_names = match &displayable_field_names[..] { + [field1] => format!("`{}`", field1), + [field1, field2] => format!("`{field1}` and `{field2}`"), + [field1, field2, field3] => format!("`{field1}`, `{field2}` and `{field3}`"), + _ => { + truncated_fields_error = + format!(" and {} other field{}", len - 3, pluralize!(len - 3)); + displayable_field_names + .iter() + .take(3) + .map(|n| format!("`{n}`")) + .collect::>() + .join(", ") + } + }; + + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0063, + "missing field{} {}{} in initializer of `{}`", + pluralize!(len), + remaining_fields_names, + truncated_fields_error, + adt_ty + ); + err.span_label(span, format!("missing {remaining_fields_names}{truncated_fields_error}")); + + // If the last field is a range literal, but it isn't supposed to be, then they probably + // meant to use functional update syntax. + // + // I don't use 'is_range_literal' because only double-sided, half-open ranges count. + if let Some(( + last, + ExprKind::Struct( + QPath::LangItem(LangItem::Range, ..), + &[ref range_start, ref range_end], + _, + ), + )) = ast_fields.last().map(|last| (last, &last.expr.kind)) && + let variant_field = + variant.fields.iter().find(|field| field.ident(self.tcx) == last.ident) && + let range_def_id = self.tcx.lang_items().range_struct() && + variant_field + .and_then(|field| field.ty(self.tcx, substs).ty_adt_def()) + .map(|adt| adt.did()) + != range_def_id + { + let instead = self + .tcx + .sess + .source_map() + .span_to_snippet(range_end.expr.span) + .map(|s| format!(" from `{s}`")) + .unwrap_or_default(); + err.span_suggestion( + range_start.span.shrink_to_hi(), + &format!("to set the remaining fields{instead}, separate the last named field with a comma"), + ",", + Applicability::MaybeIncorrect, + ); + } + + err.emit(); + } + + /// Report an error for a struct field expression when there are invisible fields. + /// + /// ```text + /// error: cannot construct `Foo` with struct literal syntax due to private fields + /// --> src/main.rs:8:5 + /// | + /// 8 | foo::Foo {}; + /// | ^^^^^^^^ + /// + /// error: aborting due to previous error + /// ``` + fn report_private_fields( + &self, + adt_ty: Ty<'tcx>, + span: Span, + private_fields: Vec<&ty::FieldDef>, + used_fields: &'tcx [hir::ExprField<'tcx>], + ) { + let mut err = self.tcx.sess.struct_span_err( + span, + &format!( + "cannot construct `{adt_ty}` with struct literal syntax due to private fields", + ), + ); + let (used_private_fields, remaining_private_fields): ( + Vec<(Symbol, Span, bool)>, + Vec<(Symbol, Span, bool)>, + ) = private_fields + .iter() + .map(|field| { + match used_fields.iter().find(|used_field| field.name == used_field.ident.name) { + Some(used_field) => (field.name, used_field.span, true), + None => (field.name, self.tcx.def_span(field.did), false), + } + }) + .partition(|field| field.2); + err.span_labels(used_private_fields.iter().map(|(_, span, _)| *span), "private field"); + if !remaining_private_fields.is_empty() { + let remaining_private_fields_len = remaining_private_fields.len(); + let names = match &remaining_private_fields + .iter() + .map(|(name, _, _)| name) + .collect::>()[..] + { + _ if remaining_private_fields_len > 6 => String::new(), + [name] => format!("`{name}` "), + [names @ .., last] => { + let names = names.iter().map(|name| format!("`{name}`")).collect::>(); + format!("{} and `{last}` ", names.join(", ")) + } + [] => unreachable!(), + }; + err.note(format!( + "... and other private field{s} {names}that {were} not provided", + s = pluralize!(remaining_private_fields_len), + were = pluralize!("was", remaining_private_fields_len), + )); + } + err.emit(); + } + + fn report_unknown_field( + &self, + ty: Ty<'tcx>, + variant: &'tcx ty::VariantDef, + field: &hir::ExprField<'_>, + skip_fields: &[hir::ExprField<'_>], + kind_name: &str, + expr_span: Span, + ) { + if variant.is_recovered() { + self.set_tainted_by_errors(); + return; + } + let mut err = self.type_error_struct_with_diag( + field.ident.span, + |actual| match ty.kind() { + ty::Adt(adt, ..) if adt.is_enum() => struct_span_err!( + self.tcx.sess, + field.ident.span, + E0559, + "{} `{}::{}` has no field named `{}`", + kind_name, + actual, + variant.name, + field.ident + ), + _ => struct_span_err!( + self.tcx.sess, + field.ident.span, + E0560, + "{} `{}` has no field named `{}`", + kind_name, + actual, + field.ident + ), + }, + ty, + ); + + let variant_ident_span = self.tcx.def_ident_span(variant.def_id).unwrap(); + match variant.ctor_kind { + CtorKind::Fn => match ty.kind() { + ty::Adt(adt, ..) if adt.is_enum() => { + err.span_label( + variant_ident_span, + format!( + "`{adt}::{variant}` defined here", + adt = ty, + variant = variant.name, + ), + ); + err.span_label(field.ident.span, "field does not exist"); + err.span_suggestion_verbose( + expr_span, + &format!( + "`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax", + adt = ty, + variant = variant.name, + ), + format!( + "{adt}::{variant}(/* fields */)", + adt = ty, + variant = variant.name, + ), + Applicability::HasPlaceholders, + ); + } + _ => { + err.span_label(variant_ident_span, format!("`{adt}` defined here", adt = ty)); + err.span_label(field.ident.span, "field does not exist"); + err.span_suggestion_verbose( + expr_span, + &format!( + "`{adt}` is a tuple {kind_name}, use the appropriate syntax", + adt = ty, + kind_name = kind_name, + ), + format!("{adt}(/* fields */)", adt = ty), + Applicability::HasPlaceholders, + ); + } + }, + _ => { + // prevent all specified fields from being suggested + let skip_fields = skip_fields.iter().map(|x| x.ident.name); + if let Some(field_name) = self.suggest_field_name( + variant, + field.ident.name, + skip_fields.collect(), + expr_span, + ) { + err.span_suggestion( + field.ident.span, + "a field with a similar name exists", + field_name, + Applicability::MaybeIncorrect, + ); + } else { + match ty.kind() { + ty::Adt(adt, ..) => { + if adt.is_enum() { + err.span_label( + field.ident.span, + format!("`{}::{}` does not have this field", ty, variant.name), + ); + } else { + err.span_label( + field.ident.span, + format!("`{ty}` does not have this field"), + ); + } + let available_field_names = + self.available_field_names(variant, expr_span); + if !available_field_names.is_empty() { + err.note(&format!( + "available fields are: {}", + self.name_series_display(available_field_names) + )); + } + } + _ => bug!("non-ADT passed to report_unknown_field"), + } + }; + } + } + err.emit(); + } + + // Return a hint about the closest match in field names + fn suggest_field_name( + &self, + variant: &'tcx ty::VariantDef, + field: Symbol, + skip: Vec, + // The span where stability will be checked + span: Span, + ) -> Option { + let names = variant + .fields + .iter() + .filter_map(|field| { + // ignore already set fields and private fields from non-local crates + // and unstable fields. + if skip.iter().any(|&x| x == field.name) + || (!variant.def_id.is_local() && !field.vis.is_public()) + || matches!( + self.tcx.eval_stability(field.did, None, span, None), + stability::EvalResult::Deny { .. } + ) + { + None + } else { + Some(field.name) + } + }) + .collect::>(); + + find_best_match_for_name(&names, field, None) + } + + fn available_field_names( + &self, + variant: &'tcx ty::VariantDef, + access_span: Span, + ) -> Vec { + variant + .fields + .iter() + .filter(|field| { + let def_scope = self + .tcx + .adjust_ident_and_get_scope(field.ident(self.tcx), variant.def_id, self.body_id) + .1; + field.vis.is_accessible_from(def_scope, self.tcx) + && !matches!( + self.tcx.eval_stability(field.did, None, access_span, None), + stability::EvalResult::Deny { .. } + ) + }) + .filter(|field| !self.tcx.is_doc_hidden(field.did)) + .map(|field| field.name) + .collect() + } + + fn name_series_display(&self, names: Vec) -> String { + // dynamic limit, to never omit just one field + let limit = if names.len() == 6 { 6 } else { 5 }; + let mut display = + names.iter().take(limit).map(|n| format!("`{}`", n)).collect::>().join(", "); + if names.len() > limit { + display = format!("{} ... and {} others", display, names.len() - limit); + } + display + } + + // Check field access expressions + fn check_field( + &self, + expr: &'tcx hir::Expr<'tcx>, + base: &'tcx hir::Expr<'tcx>, + field: Ident, + ) -> Ty<'tcx> { + debug!("check_field(expr: {:?}, base: {:?}, field: {:?})", expr, base, field); + let expr_t = self.check_expr(base); + let expr_t = self.structurally_resolved_type(base.span, expr_t); + let mut private_candidate = None; + let mut autoderef = self.autoderef(expr.span, expr_t); + while let Some((base_t, _)) = autoderef.next() { + debug!("base_t: {:?}", base_t); + match base_t.kind() { + ty::Adt(base_def, substs) if !base_def.is_enum() => { + debug!("struct named {:?}", base_t); + let (ident, def_scope) = + self.tcx.adjust_ident_and_get_scope(field, base_def.did(), self.body_id); + let fields = &base_def.non_enum_variant().fields; + if let Some(index) = fields + .iter() + .position(|f| f.ident(self.tcx).normalize_to_macros_2_0() == ident) + { + let field = &fields[index]; + let field_ty = self.field_ty(expr.span, field, substs); + // Save the index of all fields regardless of their visibility in case + // of error recovery. + self.write_field_index(expr.hir_id, index); + let adjustments = self.adjust_steps(&autoderef); + if field.vis.is_accessible_from(def_scope, self.tcx) { + self.apply_adjustments(base, adjustments); + self.register_predicates(autoderef.into_obligations()); + + self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None); + return field_ty; + } + private_candidate = Some((adjustments, base_def.did(), field_ty)); + } + } + ty::Tuple(tys) => { + let fstr = field.as_str(); + if let Ok(index) = fstr.parse::() { + if fstr == index.to_string() { + if let Some(&field_ty) = tys.get(index) { + let adjustments = self.adjust_steps(&autoderef); + self.apply_adjustments(base, adjustments); + self.register_predicates(autoderef.into_obligations()); + + self.write_field_index(expr.hir_id, index); + return field_ty; + } + } + } + } + _ => {} + } + } + self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false)); + + if let Some((adjustments, did, field_ty)) = private_candidate { + // (#90483) apply adjustments to avoid ExprUseVisitor from + // creating erroneous projection. + self.apply_adjustments(base, adjustments); + self.ban_private_field_access(expr, expr_t, field, did); + return field_ty; + } + + if field.name == kw::Empty { + } else if self.method_exists(field, expr_t, expr.hir_id, true) { + self.ban_take_value_of_method(expr, expr_t, field); + } else if !expr_t.is_primitive_ty() { + self.ban_nonexisting_field(field, base, expr, expr_t); + } else { + let field_name = field.to_string(); + let mut err = type_error_struct!( + self.tcx().sess, + field.span, + expr_t, + E0610, + "`{expr_t}` is a primitive type and therefore doesn't have fields", + ); + let is_valid_suffix = |field: String| { + if field == "f32" || field == "f64" { + return true; + } + let mut chars = field.chars().peekable(); + match chars.peek() { + Some('e') | Some('E') => { + chars.next(); + if let Some(c) = chars.peek() + && !c.is_numeric() && *c != '-' && *c != '+' + { + return false; + } + while let Some(c) = chars.peek() { + if !c.is_numeric() { + break; + } + chars.next(); + } + } + _ => (), + } + let suffix = chars.collect::(); + suffix.is_empty() || suffix == "f32" || suffix == "f64" + }; + if let ty::Infer(ty::IntVar(_)) = expr_t.kind() + && let ExprKind::Lit(Spanned { + node: ast::LitKind::Int(_, ast::LitIntType::Unsuffixed), + .. + }) = base.kind + && !base.span.from_expansion() + && is_valid_suffix(field_name) + { + err.span_suggestion_verbose( + field.span.shrink_to_lo(), + "If the number is meant to be a floating point number, consider adding a `0` after the period", + '0', + Applicability::MaybeIncorrect, + ); + } + err.emit(); + } + + self.tcx().ty_error() + } + + fn check_call_constructor( + &self, + err: &mut DiagnosticBuilder<'_, G>, + base: &'tcx hir::Expr<'tcx>, + def_id: DefId, + ) { + if let Some(local_id) = def_id.as_local() { + let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_id); + let node = self.tcx.hir().get(hir_id); + + if let Some(fields) = node.tuple_fields() { + let kind = match self.tcx.opt_def_kind(local_id) { + Some(DefKind::Ctor(of, _)) => of, + _ => return, + }; + + suggest_call_constructor(base.span, kind, fields.len(), err); + } + } else { + // The logic here isn't smart but `associated_item_def_ids` + // doesn't work nicely on local. + if let DefKind::Ctor(of, _) = self.tcx.def_kind(def_id) { + let parent_def_id = self.tcx.parent(def_id); + let fields = self.tcx.associated_item_def_ids(parent_def_id); + suggest_call_constructor(base.span, of, fields.len(), err); + } + } + } + + fn suggest_await_on_field_access( + &self, + err: &mut Diagnostic, + field_ident: Ident, + base: &'tcx hir::Expr<'tcx>, + ty: Ty<'tcx>, + ) { + let output_ty = match self.get_impl_future_output_ty(ty) { + Some(output_ty) => self.resolve_vars_if_possible(output_ty), + _ => return, + }; + let mut add_label = true; + if let ty::Adt(def, _) = output_ty.skip_binder().kind() { + // no field access on enum type + if !def.is_enum() { + if def + .non_enum_variant() + .fields + .iter() + .any(|field| field.ident(self.tcx) == field_ident) + { + add_label = false; + err.span_label( + field_ident.span, + "field not available in `impl Future`, but it is available in its `Output`", + ); + err.span_suggestion_verbose( + base.span.shrink_to_hi(), + "consider `await`ing on the `Future` and access the field of its `Output`", + ".await", + Applicability::MaybeIncorrect, + ); + } + } + } + if add_label { + err.span_label(field_ident.span, &format!("field not found in `{ty}`")); + } + } + + fn ban_nonexisting_field( + &self, + field: Ident, + base: &'tcx hir::Expr<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + expr_t: Ty<'tcx>, + ) { + debug!( + "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, expr_ty={:?}", + field, base, expr, expr_t + ); + let mut err = self.no_such_field_err(field, expr_t, base.hir_id); + + match *expr_t.peel_refs().kind() { + ty::Array(_, len) => { + self.maybe_suggest_array_indexing(&mut err, expr, base, field, len); + } + ty::RawPtr(..) => { + self.suggest_first_deref_field(&mut err, expr, base, field); + } + ty::Adt(def, _) if !def.is_enum() => { + self.suggest_fields_on_recordish(&mut err, def, field, expr.span); + } + ty::Param(param_ty) => { + self.point_at_param_definition(&mut err, param_ty); + } + ty::Opaque(_, _) => { + self.suggest_await_on_field_access(&mut err, field, base, expr_t.peel_refs()); + } + ty::FnDef(def_id, _) => { + self.check_call_constructor(&mut err, base, def_id); + } + _ => {} + } + + if field.name == kw::Await { + // We know by construction that `.await` is either on Rust 2015 + // or results in `ExprKind::Await`. Suggest switching the edition to 2018. + err.note("to `.await` a `Future`, switch to Rust 2018 or later"); + err.help_use_latest_edition(); + } + + err.emit(); + } + + fn ban_private_field_access( + &self, + expr: &hir::Expr<'_>, + expr_t: Ty<'tcx>, + field: Ident, + base_did: DefId, + ) { + let struct_path = self.tcx().def_path_str(base_did); + let kind_name = self.tcx().def_kind(base_did).descr(base_did); + let mut err = struct_span_err!( + self.tcx().sess, + field.span, + E0616, + "field `{field}` of {kind_name} `{struct_path}` is private", + ); + err.span_label(field.span, "private field"); + // Also check if an accessible method exists, which is often what is meant. + if self.method_exists(field, expr_t, expr.hir_id, false) && !self.expr_in_place(expr.hir_id) + { + self.suggest_method_call( + &mut err, + &format!("a method `{field}` also exists, call it with parentheses"), + field, + expr_t, + expr, + None, + ); + } + err.emit(); + } + + fn ban_take_value_of_method(&self, expr: &hir::Expr<'_>, expr_t: Ty<'tcx>, field: Ident) { + let mut err = type_error_struct!( + self.tcx().sess, + field.span, + expr_t, + E0615, + "attempted to take value of method `{field}` on type `{expr_t}`", + ); + err.span_label(field.span, "method, not a field"); + let expr_is_call = + if let hir::Node::Expr(hir::Expr { kind: ExprKind::Call(callee, _args), .. }) = + self.tcx.hir().get(self.tcx.hir().get_parent_node(expr.hir_id)) + { + expr.hir_id == callee.hir_id + } else { + false + }; + let expr_snippet = + self.tcx.sess.source_map().span_to_snippet(expr.span).unwrap_or_default(); + let is_wrapped = expr_snippet.starts_with('(') && expr_snippet.ends_with(')'); + let after_open = expr.span.lo() + rustc_span::BytePos(1); + let before_close = expr.span.hi() - rustc_span::BytePos(1); + + if expr_is_call && is_wrapped { + err.multipart_suggestion( + "remove wrapping parentheses to call the method", + vec![ + (expr.span.with_hi(after_open), String::new()), + (expr.span.with_lo(before_close), String::new()), + ], + Applicability::MachineApplicable, + ); + } else if !self.expr_in_place(expr.hir_id) { + // Suggest call parentheses inside the wrapping parentheses + let span = if is_wrapped { + expr.span.with_lo(after_open).with_hi(before_close) + } else { + expr.span + }; + self.suggest_method_call( + &mut err, + "use parentheses to call the method", + field, + expr_t, + expr, + Some(span), + ); + } else { + let mut found = false; + + if let ty::RawPtr(ty_and_mut) = expr_t.kind() + && let ty::Adt(adt_def, _) = ty_and_mut.ty.kind() + { + if adt_def.variants().len() == 1 + && adt_def + .variants() + .iter() + .next() + .unwrap() + .fields + .iter() + .any(|f| f.ident(self.tcx) == field) + { + if let Some(dot_loc) = expr_snippet.rfind('.') { + found = true; + err.span_suggestion( + expr.span.with_hi(expr.span.lo() + BytePos::from_usize(dot_loc)), + "to access the field, dereference first", + format!("(*{})", &expr_snippet[0..dot_loc]), + Applicability::MaybeIncorrect, + ); + } + } + } + + if !found { + err.help("methods are immutable and cannot be assigned to"); + } + } + + err.emit(); + } + + fn point_at_param_definition(&self, err: &mut Diagnostic, param: ty::ParamTy) { + let generics = self.tcx.generics_of(self.body_id.owner.to_def_id()); + let generic_param = generics.type_param(¶m, self.tcx); + if let ty::GenericParamDefKind::Type { synthetic: true, .. } = generic_param.kind { + return; + } + let param_def_id = generic_param.def_id; + let param_hir_id = match param_def_id.as_local() { + Some(x) => self.tcx.hir().local_def_id_to_hir_id(x), + None => return, + }; + let param_span = self.tcx.hir().span(param_hir_id); + let param_name = self.tcx.hir().ty_param_name(param_def_id.expect_local()); + + err.span_label(param_span, &format!("type parameter '{param_name}' declared here")); + } + + fn suggest_fields_on_recordish( + &self, + err: &mut Diagnostic, + def: ty::AdtDef<'tcx>, + field: Ident, + access_span: Span, + ) { + if let Some(suggested_field_name) = + self.suggest_field_name(def.non_enum_variant(), field.name, vec![], access_span) + { + err.span_suggestion( + field.span, + "a field with a similar name exists", + suggested_field_name, + Applicability::MaybeIncorrect, + ); + } else { + err.span_label(field.span, "unknown field"); + let struct_variant_def = def.non_enum_variant(); + let field_names = self.available_field_names(struct_variant_def, access_span); + if !field_names.is_empty() { + err.note(&format!( + "available fields are: {}", + self.name_series_display(field_names), + )); + } + } + } + + fn maybe_suggest_array_indexing( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + base: &hir::Expr<'_>, + field: Ident, + len: ty::Const<'tcx>, + ) { + if let (Some(len), Ok(user_index)) = + (len.try_eval_usize(self.tcx, self.param_env), field.as_str().parse::()) + && let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span) + { + let help = "instead of using tuple indexing, use array indexing"; + let suggestion = format!("{base}[{field}]"); + let applicability = if len < user_index { + Applicability::MachineApplicable + } else { + Applicability::MaybeIncorrect + }; + err.span_suggestion(expr.span, help, suggestion, applicability); + } + } + + fn suggest_first_deref_field( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + base: &hir::Expr<'_>, + field: Ident, + ) { + if let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span) { + let msg = format!("`{base}` is a raw pointer; try dereferencing it"); + let suggestion = format!("(*{base}).{field}"); + err.span_suggestion(expr.span, &msg, suggestion, Applicability::MaybeIncorrect); + } + } + + fn no_such_field_err( + &self, + field: Ident, + expr_t: Ty<'tcx>, + id: HirId, + ) -> DiagnosticBuilder<'_, ErrorGuaranteed> { + let span = field.span; + debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t); + + let mut err = type_error_struct!( + self.tcx().sess, + field.span, + expr_t, + E0609, + "no field `{field}` on type `{expr_t}`", + ); + + // try to add a suggestion in case the field is a nested field of a field of the Adt + if let Some((fields, substs)) = self.get_field_candidates(span, expr_t) { + for candidate_field in fields.iter() { + if let Some(mut field_path) = self.check_for_nested_field_satisfying( + span, + &|candidate_field, _| candidate_field.ident(self.tcx()) == field, + candidate_field, + substs, + vec![], + self.tcx.parent_module(id).to_def_id(), + ) { + // field_path includes `field` that we're looking for, so pop it. + field_path.pop(); + + let field_path_str = field_path + .iter() + .map(|id| id.name.to_ident_string()) + .collect::>() + .join("."); + debug!("field_path_str: {:?}", field_path_str); + + err.span_suggestion_verbose( + field.span.shrink_to_lo(), + "one of the expressions' fields has a field of the same name", + format!("{field_path_str}."), + Applicability::MaybeIncorrect, + ); + } + } + } + err + } + + pub(crate) fn get_field_candidates( + &self, + span: Span, + base_t: Ty<'tcx>, + ) -> Option<(&[ty::FieldDef], SubstsRef<'tcx>)> { + debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_t); + + for (base_t, _) in self.autoderef(span, base_t) { + match base_t.kind() { + ty::Adt(base_def, substs) if !base_def.is_enum() => { + let fields = &base_def.non_enum_variant().fields; + // For compile-time reasons put a limit on number of fields we search + if fields.len() > 100 { + return None; + } + return Some((fields, substs)); + } + _ => {} + } + } + None + } + + /// This method is called after we have encountered a missing field error to recursively + /// search for the field + pub(crate) fn check_for_nested_field_satisfying( + &self, + span: Span, + matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool, + candidate_field: &ty::FieldDef, + subst: SubstsRef<'tcx>, + mut field_path: Vec, + id: DefId, + ) -> Option> { + debug!( + "check_for_nested_field_satisfying(span: {:?}, candidate_field: {:?}, field_path: {:?}", + span, candidate_field, field_path + ); + + if field_path.len() > 3 { + // For compile-time reasons and to avoid infinite recursion we only check for fields + // up to a depth of three + None + } else { + // recursively search fields of `candidate_field` if it's a ty::Adt + field_path.push(candidate_field.ident(self.tcx).normalize_to_macros_2_0()); + let field_ty = candidate_field.ty(self.tcx, subst); + if let Some((nested_fields, subst)) = self.get_field_candidates(span, field_ty) { + for field in nested_fields.iter() { + if field.vis.is_accessible_from(id, self.tcx) { + if matches(candidate_field, field_ty) { + return Some(field_path); + } else if let Some(field_path) = self.check_for_nested_field_satisfying( + span, + matches, + field, + subst, + field_path.clone(), + id, + ) { + return Some(field_path); + } + } + } + } + None + } + } + + fn check_expr_index( + &self, + base: &'tcx hir::Expr<'tcx>, + idx: &'tcx hir::Expr<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + let base_t = self.check_expr(&base); + let idx_t = self.check_expr(&idx); + + if base_t.references_error() { + base_t + } else if idx_t.references_error() { + idx_t + } else { + let base_t = self.structurally_resolved_type(base.span, base_t); + match self.lookup_indexing(expr, base, base_t, idx, idx_t) { + Some((index_ty, element_ty)) => { + // two-phase not needed because index_ty is never mutable + self.demand_coerce(idx, idx_t, index_ty, None, AllowTwoPhase::No); + self.select_obligations_where_possible(false, |errors| { + self.point_at_index_if_possible(errors, idx.span) + }); + element_ty + } + None => { + let mut err = type_error_struct!( + self.tcx.sess, + expr.span, + base_t, + E0608, + "cannot index into a value of type `{base_t}`", + ); + // Try to give some advice about indexing tuples. + if let ty::Tuple(..) = base_t.kind() { + let mut needs_note = true; + // If the index is an integer, we can show the actual + // fixed expression: + if let ExprKind::Lit(ref lit) = idx.kind { + if let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node { + let snip = self.tcx.sess.source_map().span_to_snippet(base.span); + if let Ok(snip) = snip { + err.span_suggestion( + expr.span, + "to access tuple elements, use", + format!("{snip}.{i}"), + Applicability::MachineApplicable, + ); + needs_note = false; + } + } + } + if needs_note { + err.help( + "to access tuple elements, use tuple indexing \ + syntax (e.g., `tuple.0`)", + ); + } + } + err.emit(); + self.tcx.ty_error() + } + } + } + } + + fn point_at_index_if_possible( + &self, + errors: &mut Vec>, + span: Span, + ) { + for error in errors { + match error.obligation.predicate.kind().skip_binder() { + ty::PredicateKind::Trait(predicate) + if self.tcx.is_diagnostic_item(sym::SliceIndex, predicate.trait_ref.def_id) => { + } + _ => continue, + } + error.obligation.cause.span = span; + } + } + + fn check_expr_yield( + &self, + value: &'tcx hir::Expr<'tcx>, + expr: &'tcx hir::Expr<'tcx>, + src: &'tcx hir::YieldSource, + ) -> Ty<'tcx> { + match self.resume_yield_tys { + Some((resume_ty, yield_ty)) => { + self.check_expr_coercable_to_type(&value, yield_ty, None); + + resume_ty + } + // Given that this `yield` expression was generated as a result of lowering a `.await`, + // we know that the yield type must be `()`; however, the context won't contain this + // information. Hence, we check the source of the yield expression here and check its + // value's type against `()` (this check should always hold). + None if src.is_await() => { + self.check_expr_coercable_to_type(&value, self.tcx.mk_unit(), None); + self.tcx.mk_unit() + } + _ => { + self.tcx.sess.emit_err(YieldExprOutsideOfGenerator { span: expr.span }); + // Avoid expressions without types during writeback (#78653). + self.check_expr(value); + self.tcx.mk_unit() + } + } + } + + fn check_expr_asm_operand(&self, expr: &'tcx hir::Expr<'tcx>, is_input: bool) { + let needs = if is_input { Needs::None } else { Needs::MutPlace }; + let ty = self.check_expr_with_needs(expr, needs); + self.require_type_is_sized(ty, expr.span, traits::InlineAsmSized); + + if !is_input && !expr.is_syntactic_place_expr() { + let mut err = self.tcx.sess.struct_span_err(expr.span, "invalid asm output"); + err.span_label(expr.span, "cannot assign to this expression"); + err.emit(); + } + + // If this is an input value, we require its type to be fully resolved + // at this point. This allows us to provide helpful coercions which help + // pass the type candidate list in a later pass. + // + // We don't require output types to be resolved at this point, which + // allows them to be inferred based on how they are used later in the + // function. + if is_input { + let ty = self.structurally_resolved_type(expr.span, ty); + match *ty.kind() { + ty::FnDef(..) => { + let fnptr_ty = self.tcx.mk_fn_ptr(ty.fn_sig(self.tcx)); + self.demand_coerce(expr, ty, fnptr_ty, None, AllowTwoPhase::No); + } + ty::Ref(_, base_ty, mutbl) => { + let ptr_ty = self.tcx.mk_ptr(ty::TypeAndMut { ty: base_ty, mutbl }); + self.demand_coerce(expr, ty, ptr_ty, None, AllowTwoPhase::No); + } + _ => {} + } + } + } + + fn check_expr_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>) -> Ty<'tcx> { + for (op, _op_sp) in asm.operands { + match op { + hir::InlineAsmOperand::In { expr, .. } => { + self.check_expr_asm_operand(expr, true); + } + hir::InlineAsmOperand::Out { expr: Some(expr), .. } + | hir::InlineAsmOperand::InOut { expr, .. } => { + self.check_expr_asm_operand(expr, false); + } + hir::InlineAsmOperand::Out { expr: None, .. } => {} + hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => { + self.check_expr_asm_operand(in_expr, true); + if let Some(out_expr) = out_expr { + self.check_expr_asm_operand(out_expr, false); + } + } + // `AnonConst`s have their own body and is type-checked separately. + // As they don't flow into the type system we don't need them to + // be well-formed. + hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymFn { .. } => {} + hir::InlineAsmOperand::SymStatic { .. } => {} + } + } + if asm.options.contains(ast::InlineAsmOptions::NORETURN) { + self.tcx.types.never + } else { + self.tcx.mk_unit() + } + } +} + +pub(super) fn ty_kind_suggestion(ty: Ty<'_>) -> Option<&'static str> { + Some(match ty.kind() { + ty::Bool => "true", + ty::Char => "'a'", + ty::Int(_) | ty::Uint(_) => "42", + ty::Float(_) => "3.14159", + ty::Error(_) | ty::Never => return None, + _ => "value", + }) +} diff --git a/compiler/rustc_typeck/src/check/fallback.rs b/compiler/rustc_typeck/src/check/fallback.rs new file mode 100644 index 000000000..4059b3403 --- /dev/null +++ b/compiler/rustc_typeck/src/check/fallback.rs @@ -0,0 +1,398 @@ +use crate::check::FnCtxt; +use rustc_data_structures::{ + fx::{FxHashMap, FxHashSet}, + graph::WithSuccessors, + graph::{iterate::DepthFirstSearch, vec_graph::VecGraph}, +}; +use rustc_middle::ty::{self, Ty}; + +impl<'tcx> FnCtxt<'_, 'tcx> { + /// Performs type inference fallback, returning true if any fallback + /// occurs. + pub(super) fn type_inference_fallback(&self) -> bool { + debug!( + "type-inference-fallback start obligations: {:#?}", + self.fulfillment_cx.borrow_mut().pending_obligations() + ); + + // All type checking constraints were added, try to fallback unsolved variables. + self.select_obligations_where_possible(false, |_| {}); + + debug!( + "type-inference-fallback post selection obligations: {:#?}", + self.fulfillment_cx.borrow_mut().pending_obligations() + ); + + // Check if we have any unsolved variables. If not, no need for fallback. + let unsolved_variables = self.unsolved_variables(); + if unsolved_variables.is_empty() { + return false; + } + + let diverging_fallback = self.calculate_diverging_fallback(&unsolved_variables); + + let mut fallback_has_occurred = false; + // We do fallback in two passes, to try to generate + // better error messages. + // The first time, we do *not* replace opaque types. + for ty in unsolved_variables { + debug!("unsolved_variable = {:?}", ty); + fallback_has_occurred |= self.fallback_if_possible(ty, &diverging_fallback); + } + + // We now see if we can make progress. This might cause us to + // unify inference variables for opaque types, since we may + // have unified some other type variables during the first + // phase of fallback. This means that we only replace + // inference variables with their underlying opaque types as a + // last resort. + // + // In code like this: + // + // ```rust + // type MyType = impl Copy; + // fn produce() -> MyType { true } + // fn bad_produce() -> MyType { panic!() } + // ``` + // + // we want to unify the opaque inference variable in `bad_produce` + // with the diverging fallback for `panic!` (e.g. `()` or `!`). + // This will produce a nice error message about conflicting concrete + // types for `MyType`. + // + // If we had tried to fallback the opaque inference variable to `MyType`, + // we will generate a confusing type-check error that does not explicitly + // refer to opaque types. + self.select_obligations_where_possible(fallback_has_occurred, |_| {}); + + fallback_has_occurred + } + + // Tries to apply a fallback to `ty` if it is an unsolved variable. + // + // - Unconstrained ints are replaced with `i32`. + // + // - Unconstrained floats are replaced with with `f64`. + // + // - Non-numerics may get replaced with `()` or `!`, depending on + // how they were categorized by `calculate_diverging_fallback` + // (and the setting of `#![feature(never_type_fallback)]`). + // + // Fallback becomes very dubious if we have encountered + // type-checking errors. In that case, fallback to Error. + // + // The return value indicates whether fallback has occurred. + fn fallback_if_possible( + &self, + ty: Ty<'tcx>, + diverging_fallback: &FxHashMap, Ty<'tcx>>, + ) -> bool { + // Careful: we do NOT shallow-resolve `ty`. We know that `ty` + // is an unsolved variable, and we determine its fallback + // based solely on how it was created, not what other type + // variables it may have been unified with since then. + // + // The reason this matters is that other attempts at fallback + // may (in principle) conflict with this fallback, and we wish + // to generate a type error in that case. (However, this + // actually isn't true right now, because we're only using the + // builtin fallback rules. This would be true if we were using + // user-supplied fallbacks. But it's still useful to write the + // code to detect bugs.) + // + // (Note though that if we have a general type variable `?T` + // that is then unified with an integer type variable `?I` + // that ultimately never gets resolved to a special integral + // type, `?T` is not considered unsolved, but `?I` is. The + // same is true for float variables.) + let fallback = match ty.kind() { + _ if self.is_tainted_by_errors() => self.tcx.ty_error(), + ty::Infer(ty::IntVar(_)) => self.tcx.types.i32, + ty::Infer(ty::FloatVar(_)) => self.tcx.types.f64, + _ => match diverging_fallback.get(&ty) { + Some(&fallback_ty) => fallback_ty, + None => return false, + }, + }; + debug!("fallback_if_possible(ty={:?}): defaulting to `{:?}`", ty, fallback); + + let span = self + .infcx + .type_var_origin(ty) + .map(|origin| origin.span) + .unwrap_or(rustc_span::DUMMY_SP); + self.demand_eqtype(span, ty, fallback); + true + } + + /// The "diverging fallback" system is rather complicated. This is + /// a result of our need to balance 'do the right thing' with + /// backwards compatibility. + /// + /// "Diverging" type variables are variables created when we + /// coerce a `!` type into an unbound type variable `?X`. If they + /// never wind up being constrained, the "right and natural" thing + /// is that `?X` should "fallback" to `!`. This means that e.g. an + /// expression like `Some(return)` will ultimately wind up with a + /// type like `Option` (presuming it is not assigned or + /// constrained to have some other type). + /// + /// However, the fallback used to be `()` (before the `!` type was + /// added). Moreover, there are cases where the `!` type 'leaks + /// out' from dead code into type variables that affect live + /// code. The most common case is something like this: + /// + /// ```rust + /// # fn foo() -> i32 { 4 } + /// match foo() { + /// 22 => Default::default(), // call this type `?D` + /// _ => return, // return has type `!` + /// } // call the type of this match `?M` + /// ``` + /// + /// Here, coercing the type `!` into `?M` will create a diverging + /// type variable `?X` where `?X <: ?M`. We also have that `?D <: + /// ?M`. If `?M` winds up unconstrained, then `?X` will + /// fallback. If it falls back to `!`, then all the type variables + /// will wind up equal to `!` -- this includes the type `?D` + /// (since `!` doesn't implement `Default`, we wind up a "trait + /// not implemented" error in code like this). But since the + /// original fallback was `()`, this code used to compile with `?D + /// = ()`. This is somewhat surprising, since `Default::default()` + /// on its own would give an error because the types are + /// insufficiently constrained. + /// + /// Our solution to this dilemma is to modify diverging variables + /// so that they can *either* fallback to `!` (the default) or to + /// `()` (the backwards compatibility case). We decide which + /// fallback to use based on whether there is a coercion pattern + /// like this: + /// + /// ```ignore (not-rust) + /// ?Diverging -> ?V + /// ?NonDiverging -> ?V + /// ?V != ?NonDiverging + /// ``` + /// + /// Here `?Diverging` represents some diverging type variable and + /// `?NonDiverging` represents some non-diverging type + /// variable. `?V` can be any type variable (diverging or not), so + /// long as it is not equal to `?NonDiverging`. + /// + /// Intuitively, what we are looking for is a case where a + /// "non-diverging" type variable (like `?M` in our example above) + /// is coerced *into* some variable `?V` that would otherwise + /// fallback to `!`. In that case, we make `?V` fallback to `!`, + /// along with anything that would flow into `?V`. + /// + /// The algorithm we use: + /// * Identify all variables that are coerced *into* by a + /// diverging variable. Do this by iterating over each + /// diverging, unsolved variable and finding all variables + /// reachable from there. Call that set `D`. + /// * Walk over all unsolved, non-diverging variables, and find + /// any variable that has an edge into `D`. + fn calculate_diverging_fallback( + &self, + unsolved_variables: &[Ty<'tcx>], + ) -> FxHashMap, Ty<'tcx>> { + debug!("calculate_diverging_fallback({:?})", unsolved_variables); + + let relationships = self.fulfillment_cx.borrow_mut().relationships().clone(); + + // Construct a coercion graph where an edge `A -> B` indicates + // a type variable is that is coerced + let coercion_graph = self.create_coercion_graph(); + + // Extract the unsolved type inference variable vids; note that some + // unsolved variables are integer/float variables and are excluded. + let unsolved_vids = unsolved_variables.iter().filter_map(|ty| ty.ty_vid()); + + // Compute the diverging root vids D -- that is, the root vid of + // those type variables that (a) are the target of a coercion from + // a `!` type and (b) have not yet been solved. + // + // These variables are the ones that are targets for fallback to + // either `!` or `()`. + let diverging_roots: FxHashSet = self + .diverging_type_vars + .borrow() + .iter() + .map(|&ty| self.shallow_resolve(ty)) + .filter_map(|ty| ty.ty_vid()) + .map(|vid| self.root_var(vid)) + .collect(); + debug!( + "calculate_diverging_fallback: diverging_type_vars={:?}", + self.diverging_type_vars.borrow() + ); + debug!("calculate_diverging_fallback: diverging_roots={:?}", diverging_roots); + + // Find all type variables that are reachable from a diverging + // type variable. These will typically default to `!`, unless + // we find later that they are *also* reachable from some + // other type variable outside this set. + let mut roots_reachable_from_diverging = DepthFirstSearch::new(&coercion_graph); + let mut diverging_vids = vec![]; + let mut non_diverging_vids = vec![]; + for unsolved_vid in unsolved_vids { + let root_vid = self.root_var(unsolved_vid); + debug!( + "calculate_diverging_fallback: unsolved_vid={:?} root_vid={:?} diverges={:?}", + unsolved_vid, + root_vid, + diverging_roots.contains(&root_vid), + ); + if diverging_roots.contains(&root_vid) { + diverging_vids.push(unsolved_vid); + roots_reachable_from_diverging.push_start_node(root_vid); + + debug!( + "calculate_diverging_fallback: root_vid={:?} reaches {:?}", + root_vid, + coercion_graph.depth_first_search(root_vid).collect::>() + ); + + // drain the iterator to visit all nodes reachable from this node + roots_reachable_from_diverging.complete_search(); + } else { + non_diverging_vids.push(unsolved_vid); + } + } + + debug!( + "calculate_diverging_fallback: roots_reachable_from_diverging={:?}", + roots_reachable_from_diverging, + ); + + // Find all type variables N0 that are not reachable from a + // diverging variable, and then compute the set reachable from + // N0, which we call N. These are the *non-diverging* type + // variables. (Note that this set consists of "root variables".) + let mut roots_reachable_from_non_diverging = DepthFirstSearch::new(&coercion_graph); + for &non_diverging_vid in &non_diverging_vids { + let root_vid = self.root_var(non_diverging_vid); + if roots_reachable_from_diverging.visited(root_vid) { + continue; + } + roots_reachable_from_non_diverging.push_start_node(root_vid); + roots_reachable_from_non_diverging.complete_search(); + } + debug!( + "calculate_diverging_fallback: roots_reachable_from_non_diverging={:?}", + roots_reachable_from_non_diverging, + ); + + debug!("inherited: {:#?}", self.inh.fulfillment_cx.borrow_mut().pending_obligations()); + debug!("obligations: {:#?}", self.fulfillment_cx.borrow_mut().pending_obligations()); + debug!("relationships: {:#?}", relationships); + + // For each diverging variable, figure out whether it can + // reach a member of N. If so, it falls back to `()`. Else + // `!`. + let mut diverging_fallback = FxHashMap::default(); + diverging_fallback.reserve(diverging_vids.len()); + for &diverging_vid in &diverging_vids { + let diverging_ty = self.tcx.mk_ty_var(diverging_vid); + let root_vid = self.root_var(diverging_vid); + let can_reach_non_diverging = coercion_graph + .depth_first_search(root_vid) + .any(|n| roots_reachable_from_non_diverging.visited(n)); + + let mut relationship = ty::FoundRelationships { self_in_trait: false, output: false }; + + for (vid, rel) in relationships.iter() { + if self.root_var(*vid) == root_vid { + relationship.self_in_trait |= rel.self_in_trait; + relationship.output |= rel.output; + } + } + + if relationship.self_in_trait && relationship.output { + // This case falls back to () to ensure that the code pattern in + // src/test/ui/never_type/fallback-closure-ret.rs continues to + // compile when never_type_fallback is enabled. + // + // This rule is not readily explainable from first principles, + // but is rather intended as a patchwork fix to ensure code + // which compiles before the stabilization of never type + // fallback continues to work. + // + // Typically this pattern is encountered in a function taking a + // closure as a parameter, where the return type of that closure + // (checked by `relationship.output`) is expected to implement + // some trait (checked by `relationship.self_in_trait`). This + // can come up in non-closure cases too, so we do not limit this + // rule to specifically `FnOnce`. + // + // When the closure's body is something like `panic!()`, the + // return type would normally be inferred to `!`. However, it + // needs to fall back to `()` in order to still compile, as the + // trait is specifically implemented for `()` but not `!`. + // + // For details on the requirements for these relationships to be + // set, see the relationship finding module in + // compiler/rustc_trait_selection/src/traits/relationships.rs. + debug!("fallback to () - found trait and projection: {:?}", diverging_vid); + diverging_fallback.insert(diverging_ty, self.tcx.types.unit); + } else if can_reach_non_diverging { + debug!("fallback to () - reached non-diverging: {:?}", diverging_vid); + diverging_fallback.insert(diverging_ty, self.tcx.types.unit); + } else { + debug!("fallback to ! - all diverging: {:?}", diverging_vid); + diverging_fallback.insert(diverging_ty, self.tcx.mk_diverging_default()); + } + } + + diverging_fallback + } + + /// Returns a graph whose nodes are (unresolved) inference variables and where + /// an edge `?A -> ?B` indicates that the variable `?A` is coerced to `?B`. + fn create_coercion_graph(&self) -> VecGraph { + let pending_obligations = self.fulfillment_cx.borrow_mut().pending_obligations(); + debug!("create_coercion_graph: pending_obligations={:?}", pending_obligations); + let coercion_edges: Vec<(ty::TyVid, ty::TyVid)> = pending_obligations + .into_iter() + .filter_map(|obligation| { + // The predicates we are looking for look like `Coerce(?A -> ?B)`. + // They will have no bound variables. + obligation.predicate.kind().no_bound_vars() + }) + .filter_map(|atom| { + // We consider both subtyping and coercion to imply 'flow' from + // some position in the code `a` to a different position `b`. + // This is then used to determine which variables interact with + // live code, and as such must fall back to `()` to preserve + // soundness. + // + // In practice currently the two ways that this happens is + // coercion and subtyping. + let (a, b) = if let ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) = atom { + (a, b) + } else if let ty::PredicateKind::Subtype(ty::SubtypePredicate { + a_is_expected: _, + a, + b, + }) = atom + { + (a, b) + } else { + return None; + }; + + let a_vid = self.root_vid(a)?; + let b_vid = self.root_vid(b)?; + Some((a_vid, b_vid)) + }) + .collect(); + debug!("create_coercion_graph: coercion_edges={:?}", coercion_edges); + let num_ty_vars = self.num_ty_vars(); + VecGraph::new(num_ty_vars, coercion_edges) + } + + /// If `ty` is an unresolved type variable, returns its root vid. + fn root_vid(&self, ty: Ty<'tcx>) -> Option { + Some(self.root_var(self.shallow_resolve(ty).ty_vid()?)) + } +} diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs new file mode 100644 index 000000000..3a8093345 --- /dev/null +++ b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs @@ -0,0 +1,1510 @@ +use crate::astconv::{ + AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch, + GenericArgCountResult, IsMethodCall, PathSeg, +}; +use crate::check::callee::{self, DeferredCallResolution}; +use crate::check::method::{self, MethodCallee, SelfSource}; +use crate::check::rvalue_scopes; +use crate::check::{BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy}; + +use rustc_data_structures::captures::Captures; +use rustc_data_structures::fx::FxHashSet; +use rustc_errors::{Applicability, Diagnostic, ErrorGuaranteed, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def::{CtorOf, DefKind, Res}; +use rustc_hir::def_id::DefId; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{ExprKind, GenericArg, Node, QPath}; +use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse}; +use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282; +use rustc_infer::infer::{InferOk, InferResult}; +use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability}; +use rustc_middle::ty::fold::TypeFoldable; +use rustc_middle::ty::subst::{ + self, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSelfTy, UserSubsts, +}; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{ + self, AdtKind, CanonicalUserType, DefIdTree, EarlyBinder, GenericParamDefKind, ToPolyTraitRef, + ToPredicate, Ty, UserType, +}; +use rustc_session::lint; +use rustc_span::def_id::LocalDefId; +use rustc_span::hygiene::DesugaringKind; +use rustc_span::symbol::{kw, sym, Ident}; +use rustc_span::{Span, DUMMY_SP}; +use rustc_trait_selection::infer::InferCtxtExt as _; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _; +use rustc_trait_selection::traits::{ + self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt, +}; + +use std::collections::hash_map::Entry; +use std::slice; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Produces warning on the given node, if the current point in the + /// function is unreachable, and there hasn't been another warning. + pub(in super::super) fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) { + // FIXME: Combine these two 'if' expressions into one once + // let chains are implemented + if let Diverges::Always { span: orig_span, custom_note } = self.diverges.get() { + // If span arose from a desugaring of `if` or `while`, then it is the condition itself, + // which diverges, that we are about to lint on. This gives suboptimal diagnostics. + // Instead, stop here so that the `if`- or `while`-expression's block is linted instead. + if !span.is_desugaring(DesugaringKind::CondTemporary) + && !span.is_desugaring(DesugaringKind::Async) + && !orig_span.is_desugaring(DesugaringKind::Await) + { + self.diverges.set(Diverges::WarnedAlways); + + debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind); + + self.tcx().struct_span_lint_hir(lint::builtin::UNREACHABLE_CODE, id, span, |lint| { + let msg = format!("unreachable {}", kind); + lint.build(&msg) + .span_label(span, &msg) + .span_label( + orig_span, + custom_note + .unwrap_or("any code following this expression is unreachable"), + ) + .emit(); + }) + } + } + } + + /// Resolves type and const variables in `ty` if possible. Unlike the infcx + /// version (resolve_vars_if_possible), this version will + /// also select obligations if it seems useful, in an effort + /// to get more type information. + pub(in super::super) fn resolve_vars_with_obligations(&self, ty: Ty<'tcx>) -> Ty<'tcx> { + self.resolve_vars_with_obligations_and_mutate_fulfillment(ty, |_| {}) + } + + #[instrument(skip(self, mutate_fulfillment_errors), level = "debug")] + pub(in super::super) fn resolve_vars_with_obligations_and_mutate_fulfillment( + &self, + mut ty: Ty<'tcx>, + mutate_fulfillment_errors: impl Fn(&mut Vec>), + ) -> Ty<'tcx> { + // No Infer()? Nothing needs doing. + if !ty.has_infer_types_or_consts() { + debug!("no inference var, nothing needs doing"); + return ty; + } + + // If `ty` is a type variable, see whether we already know what it is. + ty = self.resolve_vars_if_possible(ty); + if !ty.has_infer_types_or_consts() { + debug!(?ty); + return ty; + } + + // If not, try resolving pending obligations as much as + // possible. This can help substantially when there are + // indirect dependencies that don't seem worth tracking + // precisely. + self.select_obligations_where_possible(false, mutate_fulfillment_errors); + ty = self.resolve_vars_if_possible(ty); + + debug!(?ty); + ty + } + + pub(in super::super) fn record_deferred_call_resolution( + &self, + closure_def_id: LocalDefId, + r: DeferredCallResolution<'tcx>, + ) { + let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut(); + deferred_call_resolutions.entry(closure_def_id).or_default().push(r); + } + + pub(in super::super) fn remove_deferred_call_resolutions( + &self, + closure_def_id: LocalDefId, + ) -> Vec> { + let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut(); + deferred_call_resolutions.remove(&closure_def_id).unwrap_or_default() + } + + pub fn tag(&self) -> String { + format!("{:p}", self) + } + + pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> { + self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| { + span_bug!(span, "no type for local variable {}", self.tcx.hir().node_to_string(nid)) + }) + } + + #[inline] + pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) { + debug!("write_ty({:?}, {:?}) in fcx {}", id, self.resolve_vars_if_possible(ty), self.tag()); + self.typeck_results.borrow_mut().node_types_mut().insert(id, ty); + + if ty.references_error() { + self.has_errors.set(true); + self.set_tainted_by_errors(); + } + } + + pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) { + self.typeck_results.borrow_mut().field_indices_mut().insert(hir_id, index); + } + + #[instrument(level = "debug", skip(self))] + pub(in super::super) fn write_resolution( + &self, + hir_id: hir::HirId, + r: Result<(DefKind, DefId), ErrorGuaranteed>, + ) { + self.typeck_results.borrow_mut().type_dependent_defs_mut().insert(hir_id, r); + } + + #[instrument(level = "debug", skip(self))] + pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) { + self.write_resolution(hir_id, Ok((DefKind::AssocFn, method.def_id))); + self.write_substs(hir_id, method.substs); + + // When the method is confirmed, the `method.substs` includes + // parameters from not just the method, but also the impl of + // the method -- in particular, the `Self` type will be fully + // resolved. However, those are not something that the "user + // specified" -- i.e., those types come from the inferred type + // of the receiver, not something the user wrote. So when we + // create the user-substs, we want to replace those earlier + // types with just the types that the user actually wrote -- + // that is, those that appear on the *method itself*. + // + // As an example, if the user wrote something like + // `foo.bar::(...)` -- the `Self` type here will be the + // type of `foo` (possibly adjusted), but we don't want to + // include that. We want just the `[_, u32]` part. + if !method.substs.is_empty() { + let method_generics = self.tcx.generics_of(method.def_id); + if !method_generics.params.is_empty() { + let user_type_annotation = self.probe(|_| { + let user_substs = UserSubsts { + substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| { + let i = param.index as usize; + if i < method_generics.parent_count { + self.var_for_def(DUMMY_SP, param) + } else { + method.substs[i] + } + }), + user_self_ty: None, // not relevant here + }; + + self.canonicalize_user_type_annotation(UserType::TypeOf( + method.def_id, + user_substs, + )) + }); + + debug!("write_method_call: user_type_annotation={:?}", user_type_annotation); + self.write_user_type_annotation(hir_id, user_type_annotation); + } + } + } + + pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) { + if !substs.is_empty() { + debug!("write_substs({:?}, {:?}) in fcx {}", node_id, substs, self.tag()); + + self.typeck_results.borrow_mut().node_substs_mut().insert(node_id, substs); + } + } + + /// Given the substs that we just converted from the HIR, try to + /// canonicalize them and store them as user-given substitutions + /// (i.e., substitutions that must be respected by the NLL check). + /// + /// This should be invoked **before any unifications have + /// occurred**, so that annotations like `Vec<_>` are preserved + /// properly. + #[instrument(skip(self), level = "debug")] + pub fn write_user_type_annotation_from_substs( + &self, + hir_id: hir::HirId, + def_id: DefId, + substs: SubstsRef<'tcx>, + user_self_ty: Option>, + ) { + debug!("fcx {}", self.tag()); + + if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) { + let canonicalized = self.canonicalize_user_type_annotation(UserType::TypeOf( + def_id, + UserSubsts { substs, user_self_ty }, + )); + debug!(?canonicalized); + self.write_user_type_annotation(hir_id, canonicalized); + } + } + + #[instrument(skip(self), level = "debug")] + pub fn write_user_type_annotation( + &self, + hir_id: hir::HirId, + canonical_user_type_annotation: CanonicalUserType<'tcx>, + ) { + debug!("fcx {}", self.tag()); + + if !canonical_user_type_annotation.is_identity() { + self.typeck_results + .borrow_mut() + .user_provided_types_mut() + .insert(hir_id, canonical_user_type_annotation); + } else { + debug!("skipping identity substs"); + } + } + + #[instrument(skip(self, expr), level = "debug")] + pub fn apply_adjustments(&self, expr: &hir::Expr<'_>, adj: Vec>) { + debug!("expr = {:#?}", expr); + + if adj.is_empty() { + return; + } + + for a in &adj { + if let Adjust::NeverToAny = a.kind { + if a.target.is_ty_var() { + self.diverging_type_vars.borrow_mut().insert(a.target); + debug!("apply_adjustments: adding `{:?}` as diverging type var", a.target); + } + } + } + + let autoborrow_mut = adj.iter().any(|adj| { + matches!( + adj, + &Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(_, AutoBorrowMutability::Mut { .. })), + .. + } + ) + }); + + match self.typeck_results.borrow_mut().adjustments_mut().entry(expr.hir_id) { + Entry::Vacant(entry) => { + entry.insert(adj); + } + Entry::Occupied(mut entry) => { + debug!(" - composing on top of {:?}", entry.get()); + match (&entry.get()[..], &adj[..]) { + // Applying any adjustment on top of a NeverToAny + // is a valid NeverToAny adjustment, because it can't + // be reached. + (&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return, + ( + &[ + Adjustment { kind: Adjust::Deref(_), .. }, + Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. }, + ], + &[ + Adjustment { kind: Adjust::Deref(_), .. }, + .., // Any following adjustments are allowed. + ], + ) => { + // A reborrow has no effect before a dereference. + } + // FIXME: currently we never try to compose autoderefs + // and ReifyFnPointer/UnsafeFnPointer, but we could. + _ => { + self.tcx.sess.delay_span_bug( + expr.span, + &format!( + "while adjusting {:?}, can't compose {:?} and {:?}", + expr, + entry.get(), + adj + ), + ); + } + } + *entry.get_mut() = adj; + } + } + + // If there is an mutable auto-borrow, it is equivalent to `&mut `. + // In this case implicit use of `Deref` and `Index` within `` should + // instead be `DerefMut` and `IndexMut`, so fix those up. + if autoborrow_mut { + self.convert_place_derefs_to_mutable(expr); + } + } + + /// Basically whenever we are converting from a type scheme into + /// the fn body space, we always want to normalize associated + /// types as well. This function combines the two. + fn instantiate_type_scheme(&self, span: Span, substs: SubstsRef<'tcx>, value: T) -> T + where + T: TypeFoldable<'tcx>, + { + debug!("instantiate_type_scheme(value={:?}, substs={:?})", value, substs); + let value = EarlyBinder(value).subst(self.tcx, substs); + let result = self.normalize_associated_types_in(span, value); + debug!("instantiate_type_scheme = {:?}", result); + result + } + + /// As `instantiate_type_scheme`, but for the bounds found in a + /// generic type scheme. + pub(in super::super) fn instantiate_bounds( + &self, + span: Span, + def_id: DefId, + substs: SubstsRef<'tcx>, + ) -> (ty::InstantiatedPredicates<'tcx>, Vec) { + let bounds = self.tcx.predicates_of(def_id); + let spans: Vec = bounds.predicates.iter().map(|(_, span)| *span).collect(); + let result = bounds.instantiate(self.tcx, substs); + let result = self.normalize_associated_types_in(span, result); + debug!( + "instantiate_bounds(bounds={:?}, substs={:?}) = {:?}, {:?}", + bounds, substs, result, spans, + ); + (result, spans) + } + + pub(in super::super) fn normalize_associated_types_in(&self, span: Span, value: T) -> T + where + T: TypeFoldable<'tcx>, + { + self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value) + } + + pub(in super::super) fn normalize_associated_types_in_as_infer_ok( + &self, + span: Span, + value: T, + ) -> InferOk<'tcx, T> + where + T: TypeFoldable<'tcx>, + { + self.inh.partially_normalize_associated_types_in( + ObligationCause::misc(span, self.body_id), + self.param_env, + value, + ) + } + + pub(in super::super) fn normalize_op_associated_types_in_as_infer_ok( + &self, + span: Span, + value: T, + opt_input_expr: Option<&hir::Expr<'_>>, + ) -> InferOk<'tcx, T> + where + T: TypeFoldable<'tcx>, + { + self.inh.partially_normalize_associated_types_in( + ObligationCause::new( + span, + self.body_id, + traits::BinOp { + rhs_span: opt_input_expr.map(|expr| expr.span), + is_lit: opt_input_expr + .map_or(false, |expr| matches!(expr.kind, ExprKind::Lit(_))), + output_pred: None, + }, + ), + self.param_env, + value, + ) + } + + pub fn require_type_meets( + &self, + ty: Ty<'tcx>, + span: Span, + code: traits::ObligationCauseCode<'tcx>, + def_id: DefId, + ) { + self.register_bound(ty, def_id, traits::ObligationCause::new(span, self.body_id, code)); + } + + pub fn require_type_is_sized( + &self, + ty: Ty<'tcx>, + span: Span, + code: traits::ObligationCauseCode<'tcx>, + ) { + if !ty.references_error() { + let lang_item = self.tcx.require_lang_item(LangItem::Sized, None); + self.require_type_meets(ty, span, code, lang_item); + } + } + + pub fn require_type_is_sized_deferred( + &self, + ty: Ty<'tcx>, + span: Span, + code: traits::ObligationCauseCode<'tcx>, + ) { + if !ty.references_error() { + self.deferred_sized_obligations.borrow_mut().push((ty, span, code)); + } + } + + pub fn register_bound( + &self, + ty: Ty<'tcx>, + def_id: DefId, + cause: traits::ObligationCause<'tcx>, + ) { + if !ty.references_error() { + self.fulfillment_cx.borrow_mut().register_bound( + self, + self.param_env, + ty, + def_id, + cause, + ); + } + } + + pub fn to_ty(&self, ast_t: &hir::Ty<'_>) -> Ty<'tcx> { + let t = >::ast_ty_to_ty(self, ast_t); + self.register_wf_obligation(t.into(), ast_t.span, traits::WellFormed(None)); + t + } + + pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> { + let ty = self.to_ty(ast_ty); + debug!("to_ty_saving_user_provided_ty: ty={:?}", ty); + + if Self::can_contain_user_lifetime_bounds(ty) { + let c_ty = self.canonicalize_response(UserType::Ty(ty)); + debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty); + self.typeck_results.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty); + } + + ty + } + + pub fn array_length_to_const(&self, length: &hir::ArrayLen) -> ty::Const<'tcx> { + match length { + &hir::ArrayLen::Infer(_, span) => self.ct_infer(self.tcx.types.usize, None, span), + hir::ArrayLen::Body(anon_const) => self.to_const(anon_const), + } + } + + pub fn to_const(&self, ast_c: &hir::AnonConst) -> ty::Const<'tcx> { + let const_def_id = self.tcx.hir().local_def_id(ast_c.hir_id); + let c = ty::Const::from_anon_const(self.tcx, const_def_id); + self.register_wf_obligation( + c.into(), + self.tcx.hir().span(ast_c.hir_id), + ObligationCauseCode::WellFormed(None), + ); + c + } + + pub fn const_arg_to_const( + &self, + ast_c: &hir::AnonConst, + param_def_id: DefId, + ) -> ty::Const<'tcx> { + let const_def = ty::WithOptConstParam { + did: self.tcx.hir().local_def_id(ast_c.hir_id), + const_param_did: Some(param_def_id), + }; + let c = ty::Const::from_opt_const_arg_anon_const(self.tcx, const_def); + self.register_wf_obligation( + c.into(), + self.tcx.hir().span(ast_c.hir_id), + ObligationCauseCode::WellFormed(None), + ); + c + } + + // If the type given by the user has free regions, save it for later, since + // NLL would like to enforce those. Also pass in types that involve + // projections, since those can resolve to `'static` bounds (modulo #54940, + // which hopefully will be fixed by the time you see this comment, dear + // reader, although I have my doubts). Also pass in types with inference + // types, because they may be repeated. Other sorts of things are already + // sufficiently enforced with erased regions. =) + fn can_contain_user_lifetime_bounds(t: T) -> bool + where + T: TypeVisitable<'tcx>, + { + t.has_free_regions() || t.has_projections() || t.has_infer_types() + } + + pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> { + match self.typeck_results.borrow().node_types().get(id) { + Some(&t) => t, + None if self.is_tainted_by_errors() => self.tcx.ty_error(), + None => { + bug!( + "no type for node {}: {} in fcx {}", + id, + self.tcx.hir().node_to_string(id), + self.tag() + ); + } + } + } + + pub fn node_ty_opt(&self, id: hir::HirId) -> Option> { + match self.typeck_results.borrow().node_types().get(id) { + Some(&t) => Some(t), + None if self.is_tainted_by_errors() => Some(self.tcx.ty_error()), + None => None, + } + } + + /// Registers an obligation for checking later, during regionck, that `arg` is well-formed. + pub fn register_wf_obligation( + &self, + arg: subst::GenericArg<'tcx>, + span: Span, + code: traits::ObligationCauseCode<'tcx>, + ) { + // WF obligations never themselves fail, so no real need to give a detailed cause: + let cause = traits::ObligationCause::new(span, self.body_id, code); + self.register_predicate(traits::Obligation::new( + cause, + self.param_env, + ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx), + )); + } + + /// Registers obligations that all `substs` are well-formed. + pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr<'_>) { + for arg in substs.iter().filter(|arg| { + matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..)) + }) { + self.register_wf_obligation(arg, expr.span, traits::WellFormed(None)); + } + } + + // FIXME(arielb1): use this instead of field.ty everywhere + // Only for fields! Returns for methods> + // Indifferent to privacy flags + pub fn field_ty( + &self, + span: Span, + field: &'tcx ty::FieldDef, + substs: SubstsRef<'tcx>, + ) -> Ty<'tcx> { + self.normalize_associated_types_in(span, field.ty(self.tcx, substs)) + } + + pub(in super::super) fn resolve_rvalue_scopes(&self, def_id: DefId) { + let scope_tree = self.tcx.region_scope_tree(def_id); + let rvalue_scopes = { rvalue_scopes::resolve_rvalue_scopes(self, &scope_tree, def_id) }; + let mut typeck_results = self.inh.typeck_results.borrow_mut(); + typeck_results.rvalue_scopes = rvalue_scopes; + } + + pub(in super::super) fn resolve_generator_interiors(&self, def_id: DefId) { + let mut generators = self.deferred_generator_interiors.borrow_mut(); + for (body_id, interior, kind) in generators.drain(..) { + self.select_obligations_where_possible(false, |_| {}); + crate::check::generator_interior::resolve_interior( + self, def_id, body_id, interior, kind, + ); + } + } + + #[instrument(skip(self), level = "debug")] + pub(in super::super) fn select_all_obligations_or_error(&self) { + let errors = self.fulfillment_cx.borrow_mut().select_all_or_error(&self); + + if !errors.is_empty() { + self.report_fulfillment_errors(&errors, self.inh.body_id, false); + } + } + + /// Select as many obligations as we can at present. + pub(in super::super) fn select_obligations_where_possible( + &self, + fallback_has_occurred: bool, + mutate_fulfillment_errors: impl Fn(&mut Vec>), + ) { + let mut result = self.fulfillment_cx.borrow_mut().select_where_possible(self); + if !result.is_empty() { + mutate_fulfillment_errors(&mut result); + self.report_fulfillment_errors(&result, self.inh.body_id, fallback_has_occurred); + } + } + + /// For the overloaded place expressions (`*x`, `x[3]`), the trait + /// returns a type of `&T`, but the actual type we assign to the + /// *expression* is `T`. So this function just peels off the return + /// type by one layer to yield `T`. + pub(in super::super) fn make_overloaded_place_return_type( + &self, + method: MethodCallee<'tcx>, + ) -> ty::TypeAndMut<'tcx> { + // extract method return type, which will be &T; + let ret_ty = method.sig.output(); + + // method returns &T, but the type as visible to user is T, so deref + ret_ty.builtin_deref(true).unwrap() + } + + #[instrument(skip(self), level = "debug")] + fn self_type_matches_expected_vid( + &self, + trait_ref: ty::PolyTraitRef<'tcx>, + expected_vid: ty::TyVid, + ) -> bool { + let self_ty = self.shallow_resolve(trait_ref.skip_binder().self_ty()); + debug!(?self_ty); + + match *self_ty.kind() { + ty::Infer(ty::TyVar(found_vid)) => { + // FIXME: consider using `sub_root_var` here so we + // can see through subtyping. + let found_vid = self.root_var(found_vid); + debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid); + expected_vid == found_vid + } + _ => false, + } + } + + #[instrument(skip(self), level = "debug")] + pub(in super::super) fn obligations_for_self_ty<'b>( + &'b self, + self_ty: ty::TyVid, + ) -> impl Iterator, traits::PredicateObligation<'tcx>)> + + Captures<'tcx> + + 'b { + // FIXME: consider using `sub_root_var` here so we + // can see through subtyping. + let ty_var_root = self.root_var(self_ty); + trace!("pending_obligations = {:#?}", self.fulfillment_cx.borrow().pending_obligations()); + + self.fulfillment_cx + .borrow() + .pending_obligations() + .into_iter() + .filter_map(move |obligation| { + let bound_predicate = obligation.predicate.kind(); + match bound_predicate.skip_binder() { + ty::PredicateKind::Projection(data) => Some(( + bound_predicate.rebind(data).required_poly_trait_ref(self.tcx), + obligation, + )), + ty::PredicateKind::Trait(data) => { + Some((bound_predicate.rebind(data).to_poly_trait_ref(), obligation)) + } + ty::PredicateKind::Subtype(..) => None, + ty::PredicateKind::Coerce(..) => None, + ty::PredicateKind::RegionOutlives(..) => None, + ty::PredicateKind::TypeOutlives(..) => None, + ty::PredicateKind::WellFormed(..) => None, + ty::PredicateKind::ObjectSafe(..) => None, + ty::PredicateKind::ConstEvaluatable(..) => None, + ty::PredicateKind::ConstEquate(..) => None, + // N.B., this predicate is created by breaking down a + // `ClosureType: FnFoo()` predicate, where + // `ClosureType` represents some `Closure`. It can't + // possibly be referring to the current closure, + // because we haven't produced the `Closure` for + // this closure yet; this is exactly why the other + // code is looking for a self type of an unresolved + // inference variable. + ty::PredicateKind::ClosureKind(..) => None, + ty::PredicateKind::TypeWellFormedFromEnv(..) => None, + } + }) + .filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root)) + } + + pub(in super::super) fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool { + self.obligations_for_self_ty(self_ty) + .any(|(tr, _)| Some(tr.def_id()) == self.tcx.lang_items().sized_trait()) + } + + pub(in super::super) fn err_args(&self, len: usize) -> Vec> { + vec![self.tcx.ty_error(); len] + } + + /// Unifies the output type with the expected type early, for more coercions + /// and forward type information on the input expressions. + #[instrument(skip(self, call_span), level = "debug")] + pub(in super::super) fn expected_inputs_for_expected_output( + &self, + call_span: Span, + expected_ret: Expectation<'tcx>, + formal_ret: Ty<'tcx>, + formal_args: &[Ty<'tcx>], + ) -> Option>> { + let formal_ret = self.resolve_vars_with_obligations(formal_ret); + let ret_ty = expected_ret.only_has_type(self)?; + + // HACK(oli-obk): This is a hack to keep RPIT and TAIT in sync wrt their behaviour. + // Without it, the inference + // variable will get instantiated with the opaque type. The inference variable often + // has various helpful obligations registered for it that help closures figure out their + // signature. If we infer the inference var to the opaque type, the closure won't be able + // to find those obligations anymore, and it can't necessarily find them from the opaque + // type itself. We could be more powerful with inference if we *combined* the obligations + // so that we got both the obligations from the opaque type and the ones from the inference + // variable. That will accept more code than we do right now, so we need to carefully consider + // the implications. + // Note: this check is pessimistic, as the inference type could be matched with something other + // than the opaque type, but then we need a new `TypeRelation` just for this specific case and + // can't re-use `sup` below. + // See src/test/ui/impl-trait/hidden-type-is-opaque.rs and + // src/test/ui/impl-trait/hidden-type-is-opaque-2.rs for examples that hit this path. + if formal_ret.has_infer_types() { + for ty in ret_ty.walk() { + if let ty::subst::GenericArgKind::Type(ty) = ty.unpack() + && let ty::Opaque(def_id, _) = *ty.kind() + && let Some(def_id) = def_id.as_local() + && self.opaque_type_origin(def_id, DUMMY_SP).is_some() { + return None; + } + } + } + + let expect_args = self + .fudge_inference_if_ok(|| { + // Attempt to apply a subtyping relationship between the formal + // return type (likely containing type variables if the function + // is polymorphic) and the expected return type. + // No argument expectations are produced if unification fails. + let origin = self.misc(call_span); + let ures = self.at(&origin, self.param_env).sup(ret_ty, formal_ret); + + // FIXME(#27336) can't use ? here, Try::from_error doesn't default + // to identity so the resulting type is not constrained. + match ures { + Ok(ok) => { + // Process any obligations locally as much as + // we can. We don't care if some things turn + // out unconstrained or ambiguous, as we're + // just trying to get hints here. + let errors = self.save_and_restore_in_snapshot_flag(|_| { + let mut fulfill = >::new(self.tcx); + for obligation in ok.obligations { + fulfill.register_predicate_obligation(self, obligation); + } + fulfill.select_where_possible(self) + }); + + if !errors.is_empty() { + return Err(()); + } + } + Err(_) => return Err(()), + } + + // Record all the argument types, with the substitutions + // produced from the above subtyping unification. + Ok(Some(formal_args.iter().map(|&ty| self.resolve_vars_if_possible(ty)).collect())) + }) + .unwrap_or_default(); + debug!(?formal_args, ?formal_ret, ?expect_args, ?expected_ret); + expect_args + } + + pub(in super::super) fn resolve_lang_item_path( + &self, + lang_item: hir::LangItem, + span: Span, + hir_id: hir::HirId, + expr_hir_id: Option, + ) -> (Res, Ty<'tcx>) { + let def_id = self.tcx.require_lang_item(lang_item, Some(span)); + let def_kind = self.tcx.def_kind(def_id); + + let item_ty = if let DefKind::Variant = def_kind { + self.tcx.bound_type_of(self.tcx.parent(def_id)) + } else { + self.tcx.bound_type_of(def_id) + }; + let substs = self.fresh_substs_for_item(span, def_id); + let ty = item_ty.subst(self.tcx, substs); + + self.write_resolution(hir_id, Ok((def_kind, def_id))); + self.add_required_obligations_with_code( + span, + def_id, + &substs, + match lang_item { + hir::LangItem::IntoFutureIntoFuture => { + ObligationCauseCode::AwaitableExpr(expr_hir_id) + } + hir::LangItem::IteratorNext | hir::LangItem::IntoIterIntoIter => { + ObligationCauseCode::ForLoopIterator + } + hir::LangItem::TryTraitFromOutput + | hir::LangItem::TryTraitFromResidual + | hir::LangItem::TryTraitBranch => ObligationCauseCode::QuestionMark, + _ => traits::ItemObligation(def_id), + }, + ); + (Res::Def(def_kind, def_id), ty) + } + + /// Resolves an associated value path into a base type and associated constant, or method + /// resolution. The newly resolved definition is written into `type_dependent_defs`. + pub fn resolve_ty_and_res_fully_qualified_call( + &self, + qpath: &'tcx QPath<'tcx>, + hir_id: hir::HirId, + span: Span, + ) -> (Res, Option>, &'tcx [hir::PathSegment<'tcx>]) { + debug!( + "resolve_ty_and_res_fully_qualified_call: qpath={:?} hir_id={:?} span={:?}", + qpath, hir_id, span + ); + let (ty, qself, item_segment) = match *qpath { + QPath::Resolved(ref opt_qself, ref path) => { + return ( + path.res, + opt_qself.as_ref().map(|qself| self.to_ty(qself)), + path.segments, + ); + } + QPath::TypeRelative(ref qself, ref segment) => { + // Don't use `self.to_ty`, since this will register a WF obligation. + // If we're trying to call a non-existent method on a trait + // (e.g. `MyTrait::missing_method`), then resolution will + // give us a `QPath::TypeRelative` with a trait object as + // `qself`. In that case, we want to avoid registering a WF obligation + // for `dyn MyTrait`, since we don't actually need the trait + // to be object-safe. + // We manually call `register_wf_obligation` in the success path + // below. + (>::ast_ty_to_ty_in_path(self, qself), qself, segment) + } + QPath::LangItem(..) => { + bug!("`resolve_ty_and_res_fully_qualified_call` called on `LangItem`") + } + }; + if let Some(&cached_result) = self.typeck_results.borrow().type_dependent_defs().get(hir_id) + { + self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None)); + // Return directly on cache hit. This is useful to avoid doubly reporting + // errors with default match binding modes. See #44614. + let def = cached_result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)); + return (def, Some(ty), slice::from_ref(&**item_segment)); + } + let item_name = item_segment.ident; + let result = self + .resolve_fully_qualified_call(span, item_name, ty, qself.span, hir_id) + .or_else(|error| { + let result = match error { + method::MethodError::PrivateMatch(kind, def_id, _) => Ok((kind, def_id)), + _ => Err(ErrorGuaranteed::unchecked_claim_error_was_emitted()), + }; + + // If we have a path like `MyTrait::missing_method`, then don't register + // a WF obligation for `dyn MyTrait` when method lookup fails. Otherwise, + // register a WF obligation so that we can detect any additional + // errors in the self type. + if !(matches!(error, method::MethodError::NoMatch(_)) && ty.is_trait()) { + self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None)); + } + if item_name.name != kw::Empty { + if let Some(mut e) = self.report_method_error( + span, + ty, + item_name, + SelfSource::QPath(qself), + error, + None, + ) { + e.emit(); + } + } + result + }); + + if result.is_ok() { + self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None)); + } + + // Write back the new resolution. + self.write_resolution(hir_id, result); + ( + result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), + Some(ty), + slice::from_ref(&**item_segment), + ) + } + + /// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise. + pub(in super::super) fn get_node_fn_decl( + &self, + node: Node<'tcx>, + ) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident, bool)> { + match node { + Node::Item(&hir::Item { ident, kind: hir::ItemKind::Fn(ref sig, ..), .. }) => { + // This is less than ideal, it will not suggest a return type span on any + // method called `main`, regardless of whether it is actually the entry point, + // but it will still present it as the reason for the expected type. + Some((&sig.decl, ident, ident.name != sym::main)) + } + Node::TraitItem(&hir::TraitItem { + ident, + kind: hir::TraitItemKind::Fn(ref sig, ..), + .. + }) => Some((&sig.decl, ident, true)), + Node::ImplItem(&hir::ImplItem { + ident, + kind: hir::ImplItemKind::Fn(ref sig, ..), + .. + }) => Some((&sig.decl, ident, false)), + _ => None, + } + } + + /// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a + /// suggestion can be made, `None` otherwise. + pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, bool)> { + // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or + // `while` before reaching it, as block tail returns are not available in them. + self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| { + let parent = self.tcx.hir().get(blk_id); + self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main)) + }) + } + + pub(in super::super) fn note_internal_mutation_in_method( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ) { + if found != self.tcx.types.unit { + return; + } + if let ExprKind::MethodCall(path_segment, [rcvr, ..], _) = expr.kind { + if self + .typeck_results + .borrow() + .expr_ty_adjusted_opt(rcvr) + .map_or(true, |ty| expected.peel_refs() != ty.peel_refs()) + { + return; + } + let mut sp = MultiSpan::from_span(path_segment.ident.span); + sp.push_span_label( + path_segment.ident.span, + format!( + "this call modifies {} in-place", + match rcvr.kind { + ExprKind::Path(QPath::Resolved( + None, + hir::Path { segments: [segment], .. }, + )) => format!("`{}`", segment.ident), + _ => "its receiver".to_string(), + } + ), + ); + sp.push_span_label( + rcvr.span, + "you probably want to use this value after calling the method...", + ); + err.span_note( + sp, + &format!("method `{}` modifies its receiver in-place", path_segment.ident), + ); + err.note(&format!("...instead of the `()` output of method `{}`", path_segment.ident)); + } + } + + pub(in super::super) fn note_need_for_fn_pointer( + &self, + err: &mut Diagnostic, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ) { + let (sig, did, substs) = match (&expected.kind(), &found.kind()) { + (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => { + let sig1 = self.tcx.bound_fn_sig(*did1).subst(self.tcx, substs1); + let sig2 = self.tcx.bound_fn_sig(*did2).subst(self.tcx, substs2); + if sig1 != sig2 { + return; + } + err.note( + "different `fn` items always have unique types, even if their signatures are \ + the same", + ); + (sig1, *did1, substs1) + } + (ty::FnDef(did, substs), ty::FnPtr(sig2)) => { + let sig1 = self.tcx.bound_fn_sig(*did).subst(self.tcx, substs); + if sig1 != *sig2 { + return; + } + (sig1, *did, substs) + } + _ => return, + }; + err.help(&format!("change the expected type to be function pointer `{}`", sig)); + err.help(&format!( + "if the expected type is due to type inference, cast the expected `fn` to a function \ + pointer: `{} as {}`", + self.tcx.def_path_str_with_substs(did, substs), + sig + )); + } + + // Instantiates the given path, which must refer to an item with the given + // number of type parameters and type. + #[instrument(skip(self, span), level = "debug")] + pub fn instantiate_value_path( + &self, + segments: &[hir::PathSegment<'_>], + self_ty: Option>, + res: Res, + span: Span, + hir_id: hir::HirId, + ) -> (Ty<'tcx>, Res) { + let tcx = self.tcx; + + let path_segs = match res { + Res::Local(_) | Res::SelfCtor(_) => vec![], + Res::Def(kind, def_id) => >::def_ids_for_value_path_segments( + self, segments, self_ty, kind, def_id, + ), + _ => bug!("instantiate_value_path on {:?}", res), + }; + + let mut user_self_ty = None; + let mut is_alias_variant_ctor = false; + match res { + Res::Def(DefKind::Ctor(CtorOf::Variant, _), _) + if let Some(self_ty) = self_ty => + { + let adt_def = self_ty.ty_adt_def().unwrap(); + user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did(), self_ty }); + is_alias_variant_ctor = true; + } + Res::Def(DefKind::AssocFn | DefKind::AssocConst, def_id) => { + let assoc_item = tcx.associated_item(def_id); + let container = assoc_item.container; + let container_id = assoc_item.container_id(tcx); + debug!(?def_id, ?container, ?container_id); + match container { + ty::TraitContainer => { + callee::check_legal_trait_for_method_call(tcx, span, None, span, container_id) + } + ty::ImplContainer => { + if segments.len() == 1 { + // `::assoc` will end up here, and so + // can `T::assoc`. It this came from an + // inherent impl, we need to record the + // `T` for posterity (see `UserSelfTy` for + // details). + let self_ty = self_ty.expect("UFCS sugared assoc missing Self"); + user_self_ty = Some(UserSelfTy { impl_def_id: container_id, self_ty }); + } + } + } + } + _ => {} + } + + // Now that we have categorized what space the parameters for each + // segment belong to, let's sort out the parameters that the user + // provided (if any) into their appropriate spaces. We'll also report + // errors if type parameters are provided in an inappropriate place. + + let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect(); + let generics_has_err = >::prohibit_generics( + self, + segments.iter().enumerate().filter_map(|(index, seg)| { + if !generic_segs.contains(&index) || is_alias_variant_ctor { + Some(seg) + } else { + None + } + }), + |_| {}, + ); + + if let Res::Local(hid) = res { + let ty = self.local_ty(span, hid).decl_ty; + let ty = self.normalize_associated_types_in(span, ty); + self.write_ty(hir_id, ty); + return (ty, res); + } + + if generics_has_err { + // Don't try to infer type parameters when prohibited generic arguments were given. + user_self_ty = None; + } + + // Now we have to compare the types that the user *actually* + // provided against the types that were *expected*. If the user + // did not provide any types, then we want to substitute inference + // variables. If the user provided some types, we may still need + // to add defaults. If the user provided *too many* types, that's + // a problem. + + let mut infer_args_for_err = FxHashSet::default(); + + let mut explicit_late_bound = ExplicitLateBound::No; + for &PathSeg(def_id, index) in &path_segs { + let seg = &segments[index]; + let generics = tcx.generics_of(def_id); + + // Argument-position `impl Trait` is treated as a normal generic + // parameter internally, but we don't allow users to specify the + // parameter's value explicitly, so we have to do some error- + // checking here. + let arg_count = >::check_generic_arg_count_for_call( + tcx, + span, + def_id, + &generics, + seg, + IsMethodCall::No, + ); + + if let ExplicitLateBound::Yes = arg_count.explicit_late_bound { + explicit_late_bound = ExplicitLateBound::Yes; + } + + if let Err(GenericArgCountMismatch { reported: Some(_), .. }) = arg_count.correct { + infer_args_for_err.insert(index); + self.set_tainted_by_errors(); // See issue #53251. + } + } + + let has_self = path_segs + .last() + .map(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self) + .unwrap_or(false); + + let (res, self_ctor_substs) = if let Res::SelfCtor(impl_def_id) = res { + let ty = self.normalize_ty(span, tcx.at(span).type_of(impl_def_id)); + match *ty.kind() { + ty::Adt(adt_def, substs) if adt_def.has_ctor() => { + let variant = adt_def.non_enum_variant(); + let ctor_def_id = variant.ctor_def_id.unwrap(); + ( + Res::Def(DefKind::Ctor(CtorOf::Struct, variant.ctor_kind), ctor_def_id), + Some(substs), + ) + } + _ => { + let mut err = tcx.sess.struct_span_err( + span, + "the `Self` constructor can only be used with tuple or unit structs", + ); + if let Some(adt_def) = ty.ty_adt_def() { + match adt_def.adt_kind() { + AdtKind::Enum => { + err.help("did you mean to use one of the enum's variants?"); + } + AdtKind::Struct | AdtKind::Union => { + err.span_suggestion( + span, + "use curly brackets", + "Self { /* fields */ }", + Applicability::HasPlaceholders, + ); + } + } + } + err.emit(); + + return (tcx.ty_error(), res); + } + } + } else { + (res, None) + }; + let def_id = res.def_id(); + + // The things we are substituting into the type should not contain + // escaping late-bound regions, and nor should the base type scheme. + let ty = tcx.type_of(def_id); + + let arg_count = GenericArgCountResult { + explicit_late_bound, + correct: if infer_args_for_err.is_empty() { + Ok(()) + } else { + Err(GenericArgCountMismatch::default()) + }, + }; + + struct CreateCtorSubstsContext<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + span: Span, + path_segs: &'a [PathSeg], + infer_args_for_err: &'a FxHashSet, + segments: &'a [hir::PathSegment<'a>], + } + impl<'tcx, 'a> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for CreateCtorSubstsContext<'a, 'tcx> { + fn args_for_def_id( + &mut self, + def_id: DefId, + ) -> (Option<&'a hir::GenericArgs<'a>>, bool) { + if let Some(&PathSeg(_, index)) = + self.path_segs.iter().find(|&PathSeg(did, _)| *did == def_id) + { + // If we've encountered an `impl Trait`-related error, we're just + // going to infer the arguments for better error messages. + if !self.infer_args_for_err.contains(&index) { + // Check whether the user has provided generic arguments. + if let Some(ref data) = self.segments[index].args { + return (Some(data), self.segments[index].infer_args); + } + } + return (None, self.segments[index].infer_args); + } + + (None, true) + } + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx> { + match (¶m.kind, arg) { + (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { + >::ast_region_to_region(self.fcx, lt, Some(param)).into() + } + (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => { + self.fcx.to_ty(ty).into() + } + (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => { + self.fcx.const_arg_to_const(&ct.value, param.def_id).into() + } + (GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => { + self.fcx.ty_infer(Some(param), inf.span).into() + } + (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => { + let tcx = self.fcx.tcx(); + self.fcx.ct_infer(tcx.type_of(param.def_id), Some(param), inf.span).into() + } + _ => unreachable!(), + } + } + + fn inferred_kind( + &mut self, + substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + infer_args: bool, + ) -> subst::GenericArg<'tcx> { + let tcx = self.fcx.tcx(); + match param.kind { + GenericParamDefKind::Lifetime => { + self.fcx.re_infer(Some(param), self.span).unwrap().into() + } + GenericParamDefKind::Type { has_default, .. } => { + if !infer_args && has_default { + // If we have a default, then we it doesn't matter that we're not + // inferring the type arguments: we provide the default where any + // is missing. + let default = tcx.bound_type_of(param.def_id); + self.fcx + .normalize_ty(self.span, default.subst(tcx, substs.unwrap())) + .into() + } else { + // If no type arguments were provided, we have to infer them. + // This case also occurs as a result of some malformed input, e.g. + // a lifetime argument being given instead of a type parameter. + // Using inference instead of `Error` gives better error messages. + self.fcx.var_for_def(self.span, param) + } + } + GenericParamDefKind::Const { has_default } => { + if !infer_args && has_default { + tcx.bound_const_param_default(param.def_id) + .subst(tcx, substs.unwrap()) + .into() + } else { + self.fcx.var_for_def(self.span, param) + } + } + } + } + } + + let substs = self_ctor_substs.unwrap_or_else(|| { + >::create_substs_for_generic_args( + tcx, + def_id, + &[], + has_self, + self_ty, + &arg_count, + &mut CreateCtorSubstsContext { + fcx: self, + span, + path_segs: &path_segs, + infer_args_for_err: &infer_args_for_err, + segments, + }, + ) + }); + assert!(!substs.has_escaping_bound_vars()); + assert!(!ty.has_escaping_bound_vars()); + + // First, store the "user substs" for later. + self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty); + + self.add_required_obligations(span, def_id, &substs); + + // Substitute the values for the type parameters into the type of + // the referenced item. + let ty_substituted = self.instantiate_type_scheme(span, &substs, ty); + + if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty { + // In the case of `Foo::method` and `>::method`, if `method` + // is inherent, there is no `Self` parameter; instead, the impl needs + // type parameters, which we can infer by unifying the provided `Self` + // with the substituted impl type. + // This also occurs for an enum variant on a type alias. + let ty = tcx.type_of(impl_def_id); + + let impl_ty = self.instantiate_type_scheme(span, &substs, ty); + match self.at(&self.misc(span), self.param_env).eq(impl_ty, self_ty) { + Ok(ok) => self.register_infer_ok_obligations(ok), + Err(_) => { + self.tcx.sess.delay_span_bug( + span, + &format!( + "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?", + self_ty, + impl_ty, + ), + ); + } + } + } + + debug!("instantiate_value_path: type of {:?} is {:?}", hir_id, ty_substituted); + self.write_substs(hir_id, substs); + + (ty_substituted, res) + } + + /// Add all the obligations that are required, substituting and normalized appropriately. + pub(crate) fn add_required_obligations( + &self, + span: Span, + def_id: DefId, + substs: &SubstsRef<'tcx>, + ) { + self.add_required_obligations_with_code( + span, + def_id, + substs, + traits::ItemObligation(def_id), + ) + } + + #[tracing::instrument(level = "debug", skip(self, span, def_id, substs))] + fn add_required_obligations_with_code( + &self, + span: Span, + def_id: DefId, + substs: &SubstsRef<'tcx>, + code: ObligationCauseCode<'tcx>, + ) { + let (bounds, _) = self.instantiate_bounds(span, def_id, &substs); + + for obligation in traits::predicates_for_generics( + traits::ObligationCause::new(span, self.body_id, code), + self.param_env, + bounds, + ) { + self.register_predicate(obligation); + } + } + + /// Resolves `typ` by a single level if `typ` is a type variable. + /// If no resolution is possible, then an error is reported. + /// Numeric inference variables may be left unresolved. + pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> { + let ty = self.resolve_vars_with_obligations(ty); + if !ty.is_ty_var() { + ty + } else { + if !self.is_tainted_by_errors() { + self.emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282, true) + .emit(); + } + let err = self.tcx.ty_error(); + self.demand_suptype(sp, err, ty); + err + } + } + + pub(in super::super) fn with_breakable_ctxt R, R>( + &self, + id: hir::HirId, + ctxt: BreakableCtxt<'tcx>, + f: F, + ) -> (BreakableCtxt<'tcx>, R) { + let index; + { + let mut enclosing_breakables = self.enclosing_breakables.borrow_mut(); + index = enclosing_breakables.stack.len(); + enclosing_breakables.by_id.insert(id, index); + enclosing_breakables.stack.push(ctxt); + } + let result = f(); + let ctxt = { + let mut enclosing_breakables = self.enclosing_breakables.borrow_mut(); + debug_assert!(enclosing_breakables.stack.len() == index + 1); + enclosing_breakables.by_id.remove(&id).expect("missing breakable context"); + enclosing_breakables.stack.pop().expect("missing breakable context") + }; + (ctxt, result) + } + + /// Instantiate a QueryResponse in a probe context, without a + /// good ObligationCause. + pub(in super::super) fn probe_instantiate_query_response( + &self, + span: Span, + original_values: &OriginalQueryValues<'tcx>, + query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>, + ) -> InferResult<'tcx, Ty<'tcx>> { + self.instantiate_query_response_and_region_obligations( + &traits::ObligationCause::misc(span, self.body_id), + self.param_env, + original_values, + query_result, + ) + } + + /// Returns `true` if an expression is contained inside the LHS of an assignment expression. + pub(in super::super) fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool { + let mut contained_in_place = false; + + while let hir::Node::Expr(parent_expr) = + self.tcx.hir().get(self.tcx.hir().get_parent_node(expr_id)) + { + match &parent_expr.kind { + hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => { + if lhs.hir_id == expr_id { + contained_in_place = true; + break; + } + } + _ => (), + } + expr_id = parent_expr.hir_id; + } + + contained_in_place + } +} diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs b/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs new file mode 100644 index 000000000..7602f2550 --- /dev/null +++ b/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs @@ -0,0 +1,376 @@ +use std::cmp; + +use rustc_index::vec::IndexVec; +use rustc_middle::ty::error::TypeError; + +rustc_index::newtype_index! { + pub(crate) struct ExpectedIdx { + DEBUG_FORMAT = "ExpectedIdx({})", + } +} + +rustc_index::newtype_index! { + pub(crate) struct ProvidedIdx { + DEBUG_FORMAT = "ProvidedIdx({})", + } +} + +impl ExpectedIdx { + pub fn to_provided_idx(self) -> ProvidedIdx { + ProvidedIdx::from_usize(self.as_usize()) + } +} + +// An issue that might be found in the compatibility matrix +#[derive(Debug)] +enum Issue { + /// The given argument is the invalid type for the input + Invalid(usize), + /// There is a missing input + Missing(usize), + /// There's a superfluous argument + Extra(usize), + /// Two arguments should be swapped + Swap(usize, usize), + /// Several arguments should be reordered + Permutation(Vec>), +} + +#[derive(Clone, Debug)] +pub(crate) enum Compatibility<'tcx> { + Compatible, + Incompatible(Option>), +} + +/// Similar to `Issue`, but contains some extra information +#[derive(Debug)] +pub(crate) enum Error<'tcx> { + /// The provided argument is the invalid type for the expected input + Invalid(ProvidedIdx, ExpectedIdx, Compatibility<'tcx>), + /// There is a missing input + Missing(ExpectedIdx), + /// There's a superfluous argument + Extra(ProvidedIdx), + /// Two arguments should be swapped + Swap(ProvidedIdx, ProvidedIdx, ExpectedIdx, ExpectedIdx), + /// Several arguments should be reordered + Permutation(Vec<(ExpectedIdx, ProvidedIdx)>), +} + +pub(crate) struct ArgMatrix<'tcx> { + /// Maps the indices in the `compatibility_matrix` rows to the indices of + /// the *user provided* inputs + provided_indices: Vec, + /// Maps the indices in the `compatibility_matrix` columns to the indices + /// of the *expected* args + expected_indices: Vec, + /// The first dimension (rows) are the remaining user provided inputs to + /// match and the second dimension (cols) are the remaining expected args + /// to match + compatibility_matrix: Vec>>, +} + +impl<'tcx> ArgMatrix<'tcx> { + pub(crate) fn new Compatibility<'tcx>>( + provided_count: usize, + expected_input_count: usize, + mut is_compatible: F, + ) -> Self { + let compatibility_matrix = (0..provided_count) + .map(|i| { + (0..expected_input_count) + .map(|j| is_compatible(ProvidedIdx::from_usize(i), ExpectedIdx::from_usize(j))) + .collect() + }) + .collect(); + ArgMatrix { + provided_indices: (0..provided_count).map(ProvidedIdx::from_usize).collect(), + expected_indices: (0..expected_input_count).map(ExpectedIdx::from_usize).collect(), + compatibility_matrix, + } + } + + /// Remove a given input from consideration + fn eliminate_provided(&mut self, idx: usize) { + self.provided_indices.remove(idx); + self.compatibility_matrix.remove(idx); + } + + /// Remove a given argument from consideration + fn eliminate_expected(&mut self, idx: usize) { + self.expected_indices.remove(idx); + for row in &mut self.compatibility_matrix { + row.remove(idx); + } + } + + /// "satisfy" an input with a given arg, removing both from consideration + fn satisfy_input(&mut self, provided_idx: usize, expected_idx: usize) { + self.eliminate_provided(provided_idx); + self.eliminate_expected(expected_idx); + } + + // Returns a `Vec` of (user input, expected arg) of matched arguments. These + // are inputs on the remaining diagonal that match. + fn eliminate_satisfied(&mut self) -> Vec<(ProvidedIdx, ExpectedIdx)> { + let num_args = cmp::min(self.provided_indices.len(), self.expected_indices.len()); + let mut eliminated = vec![]; + for i in (0..num_args).rev() { + if matches!(self.compatibility_matrix[i][i], Compatibility::Compatible) { + eliminated.push((self.provided_indices[i], self.expected_indices[i])); + self.satisfy_input(i, i); + } + } + eliminated + } + + // Find some issue in the compatibility matrix + fn find_issue(&self) -> Option { + let mat = &self.compatibility_matrix; + let ai = &self.expected_indices; + let ii = &self.provided_indices; + + for i in 0..cmp::max(ai.len(), ii.len()) { + // If we eliminate the last row, any left-over inputs are considered missing + if i >= mat.len() { + return Some(Issue::Missing(i)); + } + // If we eliminate the last column, any left-over arguments are extra + if mat[i].len() == 0 { + return Some(Issue::Extra(i)); + } + + // Make sure we don't pass the bounds of our matrix + let is_arg = i < ai.len(); + let is_input = i < ii.len(); + if is_arg && is_input && matches!(mat[i][i], Compatibility::Compatible) { + // This is a satisfied input, so move along + continue; + } + + let mut useless = true; + let mut unsatisfiable = true; + if is_arg { + for j in 0..ii.len() { + // If we find at least one input this argument could satisfy + // this argument isn't unsatisfiable + if matches!(mat[j][i], Compatibility::Compatible) { + unsatisfiable = false; + break; + } + } + } + if is_input { + for j in 0..ai.len() { + // If we find at least one argument that could satisfy this input + // this argument isn't useless + if matches!(mat[i][j], Compatibility::Compatible) { + useless = false; + break; + } + } + } + + match (is_input, is_arg, useless, unsatisfiable) { + // If an argument is unsatisfied, and the input in its position is useless + // then the most likely explanation is that we just got the types wrong + (true, true, true, true) => return Some(Issue::Invalid(i)), + // Otherwise, if an input is useless, then indicate that this is an extra argument + (true, _, true, _) => return Some(Issue::Extra(i)), + // Otherwise, if an argument is unsatisfiable, indicate that it's missing + (_, true, _, true) => return Some(Issue::Missing(i)), + (true, true, _, _) => { + // The argument isn't useless, and the input isn't unsatisfied, + // so look for a parameter we might swap it with + // We look for swaps explicitly, instead of just falling back on permutations + // so that cases like (A,B,C,D) given (B,A,D,C) show up as two swaps, + // instead of a large permutation of 4 elements. + for j in 0..cmp::min(ai.len(), ii.len()) { + if i == j || matches!(mat[j][j], Compatibility::Compatible) { + continue; + } + if matches!(mat[i][j], Compatibility::Compatible) + && matches!(mat[j][i], Compatibility::Compatible) + { + return Some(Issue::Swap(i, j)); + } + } + } + _ => { + continue; + } + } + } + + // We didn't find any of the individual issues above, but + // there might be a larger permutation of parameters, so we now check for that + // by checking for cycles + // We use a double option at position i in this vec to represent: + // - None: We haven't computed anything about this argument yet + // - Some(None): This argument definitely doesn't participate in a cycle + // - Some(Some(x)): the i-th argument could permute to the x-th position + let mut permutation: Vec>> = vec![None; mat.len()]; + let mut permutation_found = false; + for i in 0..mat.len() { + if permutation[i].is_some() { + // We've already decided whether this argument is or is not in a loop + continue; + } + + let mut stack = vec![]; + let mut j = i; + let mut last = i; + let mut is_cycle = true; + loop { + stack.push(j); + // Look for params this one could slot into + let compat: Vec<_> = + mat[j] + .iter() + .enumerate() + .filter_map(|(i, c)| { + if matches!(c, Compatibility::Compatible) { Some(i) } else { None } + }) + .collect(); + if compat.len() != 1 { + // this could go into multiple slots, don't bother exploring both + is_cycle = false; + break; + } + j = compat[0]; + if stack.contains(&j) { + last = j; + break; + } + } + if stack.len() <= 2 { + // If we encounter a cycle of 1 or 2 elements, we'll let the + // "satisfy" and "swap" code above handle those + is_cycle = false; + } + // We've built up some chain, some of which might be a cycle + // ex: [1,2,3,4]; last = 2; j = 2; + // So, we want to mark 4, 3, and 2 as part of a permutation + permutation_found = is_cycle; + while let Some(x) = stack.pop() { + if is_cycle { + permutation[x] = Some(Some(j)); + j = x; + if j == last { + // From here on out, we're a tail leading into a cycle, + // not the cycle itself + is_cycle = false; + } + } else { + // Some(None) ensures we save time by skipping this argument again + permutation[x] = Some(None); + } + } + } + + if permutation_found { + // Map unwrap to remove the first layer of Some + let final_permutation: Vec> = + permutation.into_iter().map(|x| x.unwrap()).collect(); + return Some(Issue::Permutation(final_permutation)); + } + return None; + } + + // Obviously, detecting exact user intention is impossible, so the goal here is to + // come up with as likely of a story as we can to be helpful. + // + // We'll iteratively removed "satisfied" input/argument pairs, + // then check for the cases above, until we've eliminated the entire grid + // + // We'll want to know which arguments and inputs these rows and columns correspond to + // even after we delete them. + pub(crate) fn find_errors( + mut self, + ) -> (Vec>, IndexVec>) { + let provided_arg_count = self.provided_indices.len(); + + let mut errors: Vec> = vec![]; + // For each expected argument, the matched *actual* input + let mut matched_inputs: IndexVec> = + IndexVec::from_elem_n(None, self.expected_indices.len()); + + // Before we start looking for issues, eliminate any arguments that are already satisfied, + // so that an argument which is already spoken for by the input it's in doesn't + // spill over into another similarly typed input + // ex: + // fn some_func(_a: i32, _b: i32) {} + // some_func(1, ""); + // Without this elimination, the first argument causes the second argument + // to show up as both a missing input and extra argument, rather than + // just an invalid type. + for (provided, expected) in self.eliminate_satisfied() { + matched_inputs[expected] = Some(provided); + } + + while !self.provided_indices.is_empty() || !self.expected_indices.is_empty() { + match self.find_issue() { + Some(Issue::Invalid(idx)) => { + let compatibility = self.compatibility_matrix[idx][idx].clone(); + let input_idx = self.provided_indices[idx]; + let arg_idx = self.expected_indices[idx]; + self.satisfy_input(idx, idx); + errors.push(Error::Invalid(input_idx, arg_idx, compatibility)); + } + Some(Issue::Extra(idx)) => { + let input_idx = self.provided_indices[idx]; + self.eliminate_provided(idx); + errors.push(Error::Extra(input_idx)); + } + Some(Issue::Missing(idx)) => { + let arg_idx = self.expected_indices[idx]; + self.eliminate_expected(idx); + errors.push(Error::Missing(arg_idx)); + } + Some(Issue::Swap(idx, other)) => { + let input_idx = self.provided_indices[idx]; + let other_input_idx = self.provided_indices[other]; + let arg_idx = self.expected_indices[idx]; + let other_arg_idx = self.expected_indices[other]; + let (min, max) = (cmp::min(idx, other), cmp::max(idx, other)); + self.satisfy_input(min, max); + // Subtract 1 because we already removed the "min" row + self.satisfy_input(max - 1, min); + errors.push(Error::Swap(input_idx, other_input_idx, arg_idx, other_arg_idx)); + matched_inputs[other_arg_idx] = Some(input_idx); + matched_inputs[arg_idx] = Some(other_input_idx); + } + Some(Issue::Permutation(args)) => { + let mut idxs: Vec = args.iter().filter_map(|&a| a).collect(); + + let mut real_idxs: IndexVec> = + IndexVec::from_elem_n(None, provided_arg_count); + for (src, dst) in + args.iter().enumerate().filter_map(|(src, dst)| dst.map(|dst| (src, dst))) + { + let src_input_idx = self.provided_indices[src]; + let dst_input_idx = self.provided_indices[dst]; + let dest_arg_idx = self.expected_indices[dst]; + real_idxs[src_input_idx] = Some((dest_arg_idx, dst_input_idx)); + matched_inputs[dest_arg_idx] = Some(src_input_idx); + } + idxs.sort(); + idxs.reverse(); + for i in idxs { + self.satisfy_input(i, i); + } + errors.push(Error::Permutation(real_idxs.into_iter().flatten().collect())); + } + None => { + // We didn't find any issues, so we need to push the algorithm forward + // First, eliminate any arguments that currently satisfy their inputs + for (inp, arg) in self.eliminate_satisfied() { + matched_inputs[arg] = Some(inp); + } + } + }; + } + + return (errors, matched_inputs); + } +} diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs new file mode 100644 index 000000000..660e7e4e3 --- /dev/null +++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs @@ -0,0 +1,1900 @@ +use crate::astconv::AstConv; +use crate::check::coercion::CoerceMany; +use crate::check::fn_ctxt::arg_matrix::{ + ArgMatrix, Compatibility, Error, ExpectedIdx, ProvidedIdx, +}; +use crate::check::gather_locals::Declaration; +use crate::check::intrinsicck::InlineAsmCtxt; +use crate::check::method::MethodCallee; +use crate::check::Expectation::*; +use crate::check::TupleArgumentsFlag::*; +use crate::check::{ + potentially_plural_count, struct_span_err, BreakableCtxt, Diverges, Expectation, FnCtxt, + LocalTy, Needs, TupleArgumentsFlag, +}; +use crate::structured_errors::StructuredDiagnostic; + +use rustc_ast as ast; +use rustc_errors::{pluralize, Applicability, Diagnostic, DiagnosticId, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def::{CtorOf, DefKind, Res}; +use rustc_hir::def_id::DefId; +use rustc_hir::{ExprKind, Node, QPath}; +use rustc_index::vec::IndexVec; +use rustc_infer::infer::error_reporting::{FailureCode, ObligationCauseExt}; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::InferOk; +use rustc_infer::infer::TypeTrace; +use rustc_middle::ty::adjustment::AllowTwoPhase; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{self, DefIdTree, IsSuggestable, Ty}; +use rustc_session::Session; +use rustc_span::symbol::Ident; +use rustc_span::{self, Span}; +use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext}; + +use std::iter; +use std::slice; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub(in super::super) fn check_casts(&self) { + let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut(); + debug!("FnCtxt::check_casts: {} deferred checks", deferred_cast_checks.len()); + for cast in deferred_cast_checks.drain(..) { + cast.check(self); + } + } + + pub(in super::super) fn check_transmutes(&self) { + let mut deferred_transmute_checks = self.deferred_transmute_checks.borrow_mut(); + debug!("FnCtxt::check_transmutes: {} deferred checks", deferred_transmute_checks.len()); + for (from, to, span) in deferred_transmute_checks.drain(..) { + self.check_transmute(span, from, to); + } + } + + pub(in super::super) fn check_asms(&self) { + let mut deferred_asm_checks = self.deferred_asm_checks.borrow_mut(); + debug!("FnCtxt::check_asm: {} deferred checks", deferred_asm_checks.len()); + for (asm, hir_id) in deferred_asm_checks.drain(..) { + let enclosing_id = self.tcx.hir().enclosing_body_owner(hir_id); + InlineAsmCtxt::new_in_fn(self) + .check_asm(asm, self.tcx.hir().local_def_id_to_hir_id(enclosing_id)); + } + } + + pub(in super::super) fn check_method_argument_types( + &self, + sp: Span, + expr: &'tcx hir::Expr<'tcx>, + method: Result, ()>, + args_no_rcvr: &'tcx [hir::Expr<'tcx>], + tuple_arguments: TupleArgumentsFlag, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let has_error = match method { + Ok(method) => method.substs.references_error() || method.sig.references_error(), + Err(_) => true, + }; + if has_error { + let err_inputs = self.err_args(args_no_rcvr.len()); + + let err_inputs = match tuple_arguments { + DontTupleArguments => err_inputs, + TupleArguments => vec![self.tcx.intern_tup(&err_inputs)], + }; + + self.check_argument_types( + sp, + expr, + &err_inputs, + None, + args_no_rcvr, + false, + tuple_arguments, + method.ok().map(|method| method.def_id), + ); + return self.tcx.ty_error(); + } + + let method = method.unwrap(); + // HACK(eddyb) ignore self in the definition (see above). + let expected_input_tys = self.expected_inputs_for_expected_output( + sp, + expected, + method.sig.output(), + &method.sig.inputs()[1..], + ); + self.check_argument_types( + sp, + expr, + &method.sig.inputs()[1..], + expected_input_tys, + args_no_rcvr, + method.sig.c_variadic, + tuple_arguments, + Some(method.def_id), + ); + method.sig.output() + } + + /// Generic function that factors out common logic from function calls, + /// method calls and overloaded operators. + pub(in super::super) fn check_argument_types( + &self, + // Span enclosing the call site + call_span: Span, + // Expression of the call site + call_expr: &'tcx hir::Expr<'tcx>, + // Types (as defined in the *signature* of the target function) + formal_input_tys: &[Ty<'tcx>], + // More specific expected types, after unifying with caller output types + expected_input_tys: Option>>, + // The expressions for each provided argument + provided_args: &'tcx [hir::Expr<'tcx>], + // Whether the function is variadic, for example when imported from C + c_variadic: bool, + // Whether the arguments have been bundled in a tuple (ex: closures) + tuple_arguments: TupleArgumentsFlag, + // The DefId for the function being called, for better error messages + fn_def_id: Option, + ) { + let tcx = self.tcx; + + // Conceptually, we've got some number of expected inputs, and some number of provided aguments + // and we can form a grid of whether each argument could satisfy a given input: + // in1 | in2 | in3 | ... + // arg1 ? | | | + // arg2 | ? | | + // arg3 | | ? | + // ... + // Initially, we just check the diagonal, because in the case of correct code + // these are the only checks that matter + // However, in the unhappy path, we'll fill in this whole grid to attempt to provide + // better error messages about invalid method calls. + + // All the input types from the fn signature must outlive the call + // so as to validate implied bounds. + for (&fn_input_ty, arg_expr) in iter::zip(formal_input_tys, provided_args) { + self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation); + } + + let mut err_code = "E0061"; + + // If the arguments should be wrapped in a tuple (ex: closures), unwrap them here + let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments { + let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]); + match tuple_type.kind() { + // We expected a tuple and got a tuple + ty::Tuple(arg_types) => { + // Argument length differs + if arg_types.len() != provided_args.len() { + err_code = "E0057"; + } + let expected_input_tys = match expected_input_tys { + Some(expected_input_tys) => match expected_input_tys.get(0) { + Some(ty) => match ty.kind() { + ty::Tuple(tys) => Some(tys.iter().collect()), + _ => None, + }, + None => None, + }, + None => None, + }; + (arg_types.iter().collect(), expected_input_tys) + } + _ => { + // Otherwise, there's a mismatch, so clear out what we're expecting, and set + // our input types to err_args so we don't blow up the error messages + struct_span_err!( + tcx.sess, + call_span, + E0059, + "cannot use call notation; the first type parameter \ + for the function trait is neither a tuple nor unit" + ) + .emit(); + (self.err_args(provided_args.len()), None) + } + } + } else { + (formal_input_tys.to_vec(), expected_input_tys) + }; + + // If there are no external expectations at the call site, just use the types from the function defn + let expected_input_tys = if let Some(expected_input_tys) = expected_input_tys { + assert_eq!(expected_input_tys.len(), formal_input_tys.len()); + expected_input_tys + } else { + formal_input_tys.clone() + }; + + let minimum_input_count = expected_input_tys.len(); + let provided_arg_count = provided_args.len(); + + // We introduce a helper function to demand that a given argument satisfy a given input + // This is more complicated than just checking type equality, as arguments could be coerced + // This version writes those types back so further type checking uses the narrowed types + let demand_compatible = |idx| { + let formal_input_ty: Ty<'tcx> = formal_input_tys[idx]; + let expected_input_ty: Ty<'tcx> = expected_input_tys[idx]; + let provided_arg = &provided_args[idx]; + + debug!("checking argument {}: {:?} = {:?}", idx, provided_arg, formal_input_ty); + + // We're on the happy path here, so we'll do a more involved check and write back types + // To check compatibility, we'll do 3 things: + // 1. Unify the provided argument with the expected type + let expectation = Expectation::rvalue_hint(self, expected_input_ty); + + let checked_ty = self.check_expr_with_expectation(provided_arg, expectation); + + // 2. Coerce to the most detailed type that could be coerced + // to, which is `expected_ty` if `rvalue_hint` returns an + // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise. + let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty); + + // Cause selection errors caused by resolving a single argument to point at the + // argument and not the call. This lets us customize the span pointed to in the + // fulfillment error to be more accurate. + let coerced_ty = + self.resolve_vars_with_obligations_and_mutate_fulfillment(coerced_ty, |errors| { + self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr); + self.point_at_arg_instead_of_call_if_possible( + errors, + call_expr, + call_span, + provided_args, + &expected_input_tys, + ); + }); + + let coerce_error = self + .try_coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None) + .err(); + + if coerce_error.is_some() { + return Compatibility::Incompatible(coerce_error); + } + + // 3. Check if the formal type is a supertype of the checked one + // and register any such obligations for future type checks + let supertype_error = self + .at(&self.misc(provided_arg.span), self.param_env) + .sup(formal_input_ty, coerced_ty); + let subtyping_error = match supertype_error { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + None + } + Err(err) => Some(err), + }; + + // If neither check failed, the types are compatible + match subtyping_error { + None => Compatibility::Compatible, + Some(_) => Compatibility::Incompatible(subtyping_error), + } + }; + + // To start, we only care "along the diagonal", where we expect every + // provided arg to be in the right spot + let mut compatibility_diagonal = + vec![Compatibility::Incompatible(None); provided_args.len()]; + + // Keep track of whether we *could possibly* be satisfied, i.e. whether we're on the happy path + // if the wrong number of arguments were supplied, we CAN'T be satisfied, + // and if we're c_variadic, the supplied arguments must be >= the minimum count from the function + // otherwise, they need to be identical, because rust doesn't currently support variadic functions + let mut call_appears_satisfied = if c_variadic { + provided_arg_count >= minimum_input_count + } else { + provided_arg_count == minimum_input_count + }; + + // Check the arguments. + // We do this in a pretty awful way: first we type-check any arguments + // that are not closures, then we type-check the closures. This is so + // that we have more information about the types of arguments when we + // type-check the functions. This isn't really the right way to do this. + for check_closures in [false, true] { + // More awful hacks: before we check argument types, try to do + // an "opportunistic" trait resolution of any trait bounds on + // the call. This helps coercions. + if check_closures { + self.select_obligations_where_possible(false, |errors| { + self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr); + self.point_at_arg_instead_of_call_if_possible( + errors, + call_expr, + call_span, + &provided_args, + &expected_input_tys, + ); + }) + } + + // Check each argument, to satisfy the input it was provided for + // Visually, we're traveling down the diagonal of the compatibility matrix + for (idx, arg) in provided_args.iter().enumerate() { + // Warn only for the first loop (the "no closures" one). + // Closure arguments themselves can't be diverging, but + // a previous argument can, e.g., `foo(panic!(), || {})`. + if !check_closures { + self.warn_if_unreachable(arg.hir_id, arg.span, "expression"); + } + + // For C-variadic functions, we don't have a declared type for all of + // the arguments hence we only do our usual type checking with + // the arguments who's types we do know. However, we *can* check + // for unreachable expressions (see above). + // FIXME: unreachable warning current isn't emitted + if idx >= minimum_input_count { + continue; + } + + let is_closure = matches!(arg.kind, ExprKind::Closure { .. }); + if is_closure != check_closures { + continue; + } + + let compatible = demand_compatible(idx); + let is_compatible = matches!(compatible, Compatibility::Compatible); + compatibility_diagonal[idx] = compatible; + + if !is_compatible { + call_appears_satisfied = false; + } + } + } + + if c_variadic && provided_arg_count < minimum_input_count { + err_code = "E0060"; + } + + for arg in provided_args.iter().skip(minimum_input_count) { + // Make sure we've checked this expr at least once. + let arg_ty = self.check_expr(&arg); + + // If the function is c-style variadic, we skipped a bunch of arguments + // so we need to check those, and write out the types + // Ideally this would be folded into the above, for uniform style + // but c-variadic is already a corner case + if c_variadic { + fn variadic_error<'tcx>( + sess: &'tcx Session, + span: Span, + ty: Ty<'tcx>, + cast_ty: &str, + ) { + use crate::structured_errors::MissingCastForVariadicArg; + + MissingCastForVariadicArg { sess, span, ty, cast_ty }.diagnostic().emit(); + } + + // There are a few types which get autopromoted when passed via varargs + // in C but we just error out instead and require explicit casts. + let arg_ty = self.structurally_resolved_type(arg.span, arg_ty); + match arg_ty.kind() { + ty::Float(ty::FloatTy::F32) => { + variadic_error(tcx.sess, arg.span, arg_ty, "c_double"); + } + ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => { + variadic_error(tcx.sess, arg.span, arg_ty, "c_int"); + } + ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => { + variadic_error(tcx.sess, arg.span, arg_ty, "c_uint"); + } + ty::FnDef(..) => { + let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx)); + let ptr_ty = self.resolve_vars_if_possible(ptr_ty); + variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string()); + } + _ => {} + } + } + } + + if !call_appears_satisfied { + let compatibility_diagonal = IndexVec::from_raw(compatibility_diagonal); + let provided_args = IndexVec::from_iter(provided_args.iter().take(if c_variadic { + minimum_input_count + } else { + provided_arg_count + })); + debug_assert_eq!( + formal_input_tys.len(), + expected_input_tys.len(), + "expected formal_input_tys to be the same size as expected_input_tys" + ); + let formal_and_expected_inputs = IndexVec::from_iter( + formal_input_tys + .iter() + .copied() + .zip(expected_input_tys.iter().copied()) + .map(|vars| self.resolve_vars_if_possible(vars)), + ); + + self.report_arg_errors( + compatibility_diagonal, + formal_and_expected_inputs, + provided_args, + c_variadic, + err_code, + fn_def_id, + call_span, + call_expr, + ); + } + } + + fn report_arg_errors( + &self, + compatibility_diagonal: IndexVec>, + formal_and_expected_inputs: IndexVec, Ty<'tcx>)>, + provided_args: IndexVec>, + c_variadic: bool, + err_code: &str, + fn_def_id: Option, + call_span: Span, + call_expr: &hir::Expr<'tcx>, + ) { + // Next, let's construct the error + let (error_span, full_call_span, ctor_of) = match &call_expr.kind { + hir::ExprKind::Call( + hir::Expr { hir_id, span, kind: hir::ExprKind::Path(qpath), .. }, + _, + ) => { + if let Res::Def(DefKind::Ctor(of, _), _) = + self.typeck_results.borrow().qpath_res(qpath, *hir_id) + { + (call_span, *span, Some(of)) + } else { + (call_span, *span, None) + } + } + hir::ExprKind::Call(hir::Expr { span, .. }, _) => (call_span, *span, None), + hir::ExprKind::MethodCall(path_segment, _, span) => { + let ident_span = path_segment.ident.span; + let ident_span = if let Some(args) = path_segment.args { + ident_span.with_hi(args.span_ext.hi()) + } else { + ident_span + }; + ( + *span, ident_span, None, // methods are never ctors + ) + } + k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k), + }; + let args_span = error_span.trim_start(full_call_span).unwrap_or(error_span); + let call_name = match ctor_of { + Some(CtorOf::Struct) => "struct", + Some(CtorOf::Variant) => "enum variant", + None => "function", + }; + + // Don't print if it has error types or is just plain `_` + fn has_error_or_infer<'tcx>(tys: impl IntoIterator>) -> bool { + tys.into_iter().any(|ty| ty.references_error() || ty.is_ty_var()) + } + + self.set_tainted_by_errors(); + let tcx = self.tcx; + + // Get the argument span in the context of the call span so that + // suggestions and labels are (more) correct when an arg is a + // macro invocation. + let normalize_span = |span: Span| -> Span { + let normalized_span = span.find_ancestor_inside(error_span).unwrap_or(span); + // Sometimes macros mess up the spans, so do not normalize the + // arg span to equal the error span, because that's less useful + // than pointing out the arg expr in the wrong context. + if normalized_span.source_equal(error_span) { span } else { normalized_span } + }; + + // Precompute the provided types and spans, since that's all we typically need for below + let provided_arg_tys: IndexVec, Span)> = provided_args + .iter() + .map(|expr| { + let ty = self + .typeck_results + .borrow() + .expr_ty_adjusted_opt(*expr) + .unwrap_or_else(|| tcx.ty_error()); + (self.resolve_vars_if_possible(ty), normalize_span(expr.span)) + }) + .collect(); + let callee_expr = match &call_expr.peel_blocks().kind { + hir::ExprKind::Call(callee, _) => Some(*callee), + hir::ExprKind::MethodCall(_, callee, _) => { + if let Some((DefKind::AssocFn, def_id)) = + self.typeck_results.borrow().type_dependent_def(call_expr.hir_id) + && let Some(assoc) = tcx.opt_associated_item(def_id) + && assoc.fn_has_self_parameter + { + Some(&callee[0]) + } else { + None + } + } + _ => None, + }; + let callee_ty = callee_expr + .and_then(|callee_expr| self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr)); + + // A "softer" version of the `demand_compatible`, which checks types without persisting them, + // and treats error types differently + // This will allow us to "probe" for other argument orders that would likely have been correct + let check_compatible = |provided_idx: ProvidedIdx, expected_idx: ExpectedIdx| { + if provided_idx.as_usize() == expected_idx.as_usize() { + return compatibility_diagonal[provided_idx].clone(); + } + + let (formal_input_ty, expected_input_ty) = formal_and_expected_inputs[expected_idx]; + // If either is an error type, we defy the usual convention and consider them to *not* be + // coercible. This prevents our error message heuristic from trying to pass errors into + // every argument. + if (formal_input_ty, expected_input_ty).references_error() { + return Compatibility::Incompatible(None); + } + + let (arg_ty, arg_span) = provided_arg_tys[provided_idx]; + + let expectation = Expectation::rvalue_hint(self, expected_input_ty); + let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty); + let can_coerce = self.can_coerce(arg_ty, coerced_ty); + if !can_coerce { + return Compatibility::Incompatible(None); + } + + // Using probe here, since we don't want this subtyping to affect inference. + let subtyping_error = self.probe(|_| { + self.at(&self.misc(arg_span), self.param_env).sup(formal_input_ty, coerced_ty).err() + }); + + // Same as above: if either the coerce type or the checked type is an error type, + // consider them *not* compatible. + let references_error = (coerced_ty, arg_ty).references_error(); + match (references_error, subtyping_error) { + (false, None) => Compatibility::Compatible, + (_, subtyping_error) => Compatibility::Incompatible(subtyping_error), + } + }; + + // The algorithm here is inspired by levenshtein distance and longest common subsequence. + // We'll try to detect 4 different types of mistakes: + // - An extra parameter has been provided that doesn't satisfy *any* of the other inputs + // - An input is missing, which isn't satisfied by *any* of the other arguments + // - Some number of arguments have been provided in the wrong order + // - A type is straight up invalid + + // First, let's find the errors + let (mut errors, matched_inputs) = + ArgMatrix::new(provided_args.len(), formal_and_expected_inputs.len(), check_compatible) + .find_errors(); + + // First, check if we just need to wrap some arguments in a tuple. + if let Some((mismatch_idx, terr)) = + compatibility_diagonal.iter().enumerate().find_map(|(i, c)| { + if let Compatibility::Incompatible(Some(terr)) = c { Some((i, terr)) } else { None } + }) + { + // Is the first bad expected argument a tuple? + // Do we have as many extra provided arguments as the tuple's length? + // If so, we might have just forgotten to wrap some args in a tuple. + if let Some(ty::Tuple(tys)) = + formal_and_expected_inputs.get(mismatch_idx.into()).map(|tys| tys.1.kind()) + // If the tuple is unit, we're not actually wrapping any arguments. + && !tys.is_empty() + && provided_arg_tys.len() == formal_and_expected_inputs.len() - 1 + tys.len() + { + // Wrap up the N provided arguments starting at this position in a tuple. + let provided_as_tuple = tcx.mk_tup( + provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx).take(tys.len()), + ); + + let mut satisfied = true; + // Check if the newly wrapped tuple + rest of the arguments are compatible. + for ((_, expected_ty), provided_ty) in std::iter::zip( + formal_and_expected_inputs.iter().skip(mismatch_idx), + [provided_as_tuple].into_iter().chain( + provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx + tys.len()), + ), + ) { + if !self.can_coerce(provided_ty, *expected_ty) { + satisfied = false; + break; + } + } + + // If they're compatible, suggest wrapping in an arg, and we're done! + // Take some care with spans, so we don't suggest wrapping a macro's + // innards in parenthesis, for example. + if satisfied + && let Some((_, lo)) = + provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx)) + && let Some((_, hi)) = + provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx + tys.len() - 1)) + { + let mut err; + if tys.len() == 1 { + // A tuple wrap suggestion actually occurs within, + // so don't do anything special here. + err = self.report_and_explain_type_error( + TypeTrace::types( + &self.misc(*lo), + true, + formal_and_expected_inputs[mismatch_idx.into()].1, + provided_arg_tys[mismatch_idx.into()].0, + ), + terr, + ); + err.span_label( + full_call_span, + format!("arguments to this {} are incorrect", call_name), + ); + } else { + err = tcx.sess.struct_span_err_with_code( + full_call_span, + &format!( + "this {} takes {}{} but {} {} supplied", + call_name, + if c_variadic { "at least " } else { "" }, + potentially_plural_count( + formal_and_expected_inputs.len(), + "argument" + ), + potentially_plural_count(provided_args.len(), "argument"), + pluralize!("was", provided_args.len()) + ), + DiagnosticId::Error(err_code.to_owned()), + ); + err.multipart_suggestion_verbose( + "wrap these arguments in parentheses to construct a tuple", + vec![ + (lo.shrink_to_lo(), "(".to_string()), + (hi.shrink_to_hi(), ")".to_string()), + ], + Applicability::MachineApplicable, + ); + }; + self.label_fn_like(&mut err, fn_def_id, callee_ty); + err.emit(); + return; + } + } + } + + // Okay, so here's where it gets complicated in regards to what errors + // we emit and how. + // There are 3 different "types" of errors we might encounter. + // 1) Missing/extra/swapped arguments + // 2) Valid but incorrect arguments + // 3) Invalid arguments + // - Currently I think this only comes up with `CyclicTy` + // + // We first need to go through, remove those from (3) and emit those + // as their own error, particularly since they're error code and + // message is special. From what I can tell, we *must* emit these + // here (vs somewhere prior to this function) since the arguments + // become invalid *because* of how they get used in the function. + // It is what it is. + + if errors.is_empty() { + if cfg!(debug_assertions) { + span_bug!(error_span, "expected errors from argument matrix"); + } else { + tcx.sess + .struct_span_err( + error_span, + "argument type mismatch was detected, \ + but rustc had trouble determining where", + ) + .note( + "we would appreciate a bug report: \ + https://github.com/rust-lang/rust/issues/new", + ) + .emit(); + } + return; + } + + errors.drain_filter(|error| { + let Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(error)) = error else { return false }; + let (provided_ty, provided_span) = provided_arg_tys[*provided_idx]; + let (expected_ty, _) = formal_and_expected_inputs[*expected_idx]; + let cause = &self.misc(provided_span); + let trace = TypeTrace::types(cause, true, expected_ty, provided_ty); + if let Some(e) = error { + if !matches!(trace.cause.as_failure_code(e), FailureCode::Error0308(_)) { + self.report_and_explain_type_error(trace, e).emit(); + return true; + } + } + false + }); + + // We're done if we found errors, but we already emitted them. + if errors.is_empty() { + return; + } + + // Okay, now that we've emitted the special errors separately, we + // are only left missing/extra/swapped and mismatched arguments, both + // can be collated pretty easily if needed. + + // Next special case: if there is only one "Incompatible" error, just emit that + if let [ + Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(err))), + ] = &errors[..] + { + let (formal_ty, expected_ty) = formal_and_expected_inputs[*expected_idx]; + let (provided_ty, provided_arg_span) = provided_arg_tys[*provided_idx]; + let cause = &self.misc(provided_arg_span); + let trace = TypeTrace::types(cause, true, expected_ty, provided_ty); + let mut err = self.report_and_explain_type_error(trace, err); + self.emit_coerce_suggestions( + &mut err, + &provided_args[*provided_idx], + provided_ty, + Expectation::rvalue_hint(self, expected_ty) + .only_has_type(self) + .unwrap_or(formal_ty), + None, + None, + ); + err.span_label( + full_call_span, + format!("arguments to this {} are incorrect", call_name), + ); + // Call out where the function is defined + self.label_fn_like(&mut err, fn_def_id, callee_ty); + err.emit(); + return; + } + + let mut err = if formal_and_expected_inputs.len() == provided_args.len() { + struct_span_err!( + tcx.sess, + full_call_span, + E0308, + "arguments to this {} are incorrect", + call_name, + ) + } else { + tcx.sess.struct_span_err_with_code( + full_call_span, + &format!( + "this {} takes {}{} but {} {} supplied", + call_name, + if c_variadic { "at least " } else { "" }, + potentially_plural_count(formal_and_expected_inputs.len(), "argument"), + potentially_plural_count(provided_args.len(), "argument"), + pluralize!("was", provided_args.len()) + ), + DiagnosticId::Error(err_code.to_owned()), + ) + }; + + // As we encounter issues, keep track of what we want to provide for the suggestion + let mut labels = vec![]; + // If there is a single error, we give a specific suggestion; otherwise, we change to + // "did you mean" with the suggested function call + enum SuggestionText { + None, + Provide(bool), + Remove(bool), + Swap, + Reorder, + DidYouMean, + } + let mut suggestion_text = SuggestionText::None; + + let mut errors = errors.into_iter().peekable(); + while let Some(error) = errors.next() { + match error { + Error::Invalid(provided_idx, expected_idx, compatibility) => { + let (formal_ty, expected_ty) = formal_and_expected_inputs[expected_idx]; + let (provided_ty, provided_span) = provided_arg_tys[provided_idx]; + if let Compatibility::Incompatible(error) = &compatibility { + let cause = &self.misc(provided_span); + let trace = TypeTrace::types(cause, true, expected_ty, provided_ty); + if let Some(e) = error { + self.note_type_err( + &mut err, + &trace.cause, + None, + Some(trace.values), + e, + false, + true, + ); + } + } + + self.emit_coerce_suggestions( + &mut err, + &provided_args[provided_idx], + provided_ty, + Expectation::rvalue_hint(self, expected_ty) + .only_has_type(self) + .unwrap_or(formal_ty), + None, + None, + ); + } + Error::Extra(arg_idx) => { + let (provided_ty, provided_span) = provided_arg_tys[arg_idx]; + let provided_ty_name = if !has_error_or_infer([provided_ty]) { + // FIXME: not suggestable, use something else + format!(" of type `{}`", provided_ty) + } else { + "".to_string() + }; + labels + .push((provided_span, format!("argument{} unexpected", provided_ty_name))); + suggestion_text = match suggestion_text { + SuggestionText::None => SuggestionText::Remove(false), + SuggestionText::Remove(_) => SuggestionText::Remove(true), + _ => SuggestionText::DidYouMean, + }; + } + Error::Missing(expected_idx) => { + // If there are multiple missing arguments adjacent to each other, + // then we can provide a single error. + + let mut missing_idxs = vec![expected_idx]; + while let Some(e) = errors.next_if(|e| { + matches!(e, Error::Missing(next_expected_idx) + if *next_expected_idx == *missing_idxs.last().unwrap() + 1) + }) { + match e { + Error::Missing(expected_idx) => missing_idxs.push(expected_idx), + _ => unreachable!(), + } + } + + // NOTE: Because we might be re-arranging arguments, might have extra + // arguments, etc. it's hard to *really* know where we should provide + // this error label, so as a heuristic, we point to the provided arg, or + // to the call if the missing inputs pass the provided args. + match &missing_idxs[..] { + &[expected_idx] => { + let (_, input_ty) = formal_and_expected_inputs[expected_idx]; + let span = if let Some((_, arg_span)) = + provided_arg_tys.get(expected_idx.to_provided_idx()) + { + *arg_span + } else { + args_span + }; + let rendered = if !has_error_or_infer([input_ty]) { + format!(" of type `{}`", input_ty) + } else { + "".to_string() + }; + labels.push((span, format!("an argument{} is missing", rendered))); + suggestion_text = match suggestion_text { + SuggestionText::None => SuggestionText::Provide(false), + SuggestionText::Provide(_) => SuggestionText::Provide(true), + _ => SuggestionText::DidYouMean, + }; + } + &[first_idx, second_idx] => { + let (_, first_expected_ty) = formal_and_expected_inputs[first_idx]; + let (_, second_expected_ty) = formal_and_expected_inputs[second_idx]; + let span = if let (Some((_, first_span)), Some((_, second_span))) = ( + provided_arg_tys.get(first_idx.to_provided_idx()), + provided_arg_tys.get(second_idx.to_provided_idx()), + ) { + first_span.to(*second_span) + } else { + args_span + }; + let rendered = + if !has_error_or_infer([first_expected_ty, second_expected_ty]) { + format!( + " of type `{}` and `{}`", + first_expected_ty, second_expected_ty + ) + } else { + "".to_string() + }; + labels.push((span, format!("two arguments{} are missing", rendered))); + suggestion_text = match suggestion_text { + SuggestionText::None | SuggestionText::Provide(_) => { + SuggestionText::Provide(true) + } + _ => SuggestionText::DidYouMean, + }; + } + &[first_idx, second_idx, third_idx] => { + let (_, first_expected_ty) = formal_and_expected_inputs[first_idx]; + let (_, second_expected_ty) = formal_and_expected_inputs[second_idx]; + let (_, third_expected_ty) = formal_and_expected_inputs[third_idx]; + let span = if let (Some((_, first_span)), Some((_, third_span))) = ( + provided_arg_tys.get(first_idx.to_provided_idx()), + provided_arg_tys.get(third_idx.to_provided_idx()), + ) { + first_span.to(*third_span) + } else { + args_span + }; + let rendered = if !has_error_or_infer([ + first_expected_ty, + second_expected_ty, + third_expected_ty, + ]) { + format!( + " of type `{}`, `{}`, and `{}`", + first_expected_ty, second_expected_ty, third_expected_ty + ) + } else { + "".to_string() + }; + labels.push((span, format!("three arguments{} are missing", rendered))); + suggestion_text = match suggestion_text { + SuggestionText::None | SuggestionText::Provide(_) => { + SuggestionText::Provide(true) + } + _ => SuggestionText::DidYouMean, + }; + } + missing_idxs => { + let first_idx = *missing_idxs.first().unwrap(); + let last_idx = *missing_idxs.last().unwrap(); + // NOTE: Because we might be re-arranging arguments, might have extra arguments, etc. + // It's hard to *really* know where we should provide this error label, so this is a + // decent heuristic + let span = if let (Some((_, first_span)), Some((_, last_span))) = ( + provided_arg_tys.get(first_idx.to_provided_idx()), + provided_arg_tys.get(last_idx.to_provided_idx()), + ) { + first_span.to(*last_span) + } else { + args_span + }; + labels.push((span, format!("multiple arguments are missing"))); + suggestion_text = match suggestion_text { + SuggestionText::None | SuggestionText::Provide(_) => { + SuggestionText::Provide(true) + } + _ => SuggestionText::DidYouMean, + }; + } + } + } + Error::Swap( + first_provided_idx, + second_provided_idx, + first_expected_idx, + second_expected_idx, + ) => { + let (first_provided_ty, first_span) = provided_arg_tys[first_provided_idx]; + let (_, first_expected_ty) = formal_and_expected_inputs[first_expected_idx]; + let first_provided_ty_name = if !has_error_or_infer([first_provided_ty]) { + format!(", found `{}`", first_provided_ty) + } else { + String::new() + }; + labels.push(( + first_span, + format!("expected `{}`{}", first_expected_ty, first_provided_ty_name), + )); + + let (second_provided_ty, second_span) = provided_arg_tys[second_provided_idx]; + let (_, second_expected_ty) = formal_and_expected_inputs[second_expected_idx]; + let second_provided_ty_name = if !has_error_or_infer([second_provided_ty]) { + format!(", found `{}`", second_provided_ty) + } else { + String::new() + }; + labels.push(( + second_span, + format!("expected `{}`{}", second_expected_ty, second_provided_ty_name), + )); + + suggestion_text = match suggestion_text { + SuggestionText::None => SuggestionText::Swap, + _ => SuggestionText::DidYouMean, + }; + } + Error::Permutation(args) => { + for (dst_arg, dest_input) in args { + let (_, expected_ty) = formal_and_expected_inputs[dst_arg]; + let (provided_ty, provided_span) = provided_arg_tys[dest_input]; + let provided_ty_name = if !has_error_or_infer([provided_ty]) { + format!(", found `{}`", provided_ty) + } else { + String::new() + }; + labels.push(( + provided_span, + format!("expected `{}`{}", expected_ty, provided_ty_name), + )); + } + + suggestion_text = match suggestion_text { + SuggestionText::None => SuggestionText::Reorder, + _ => SuggestionText::DidYouMean, + }; + } + } + } + + // If we have less than 5 things to say, it would be useful to call out exactly what's wrong + if labels.len() <= 5 { + for (span, label) in labels { + err.span_label(span, label); + } + } + + // Call out where the function is defined + self.label_fn_like(&mut err, fn_def_id, callee_ty); + + // And add a suggestion block for all of the parameters + let suggestion_text = match suggestion_text { + SuggestionText::None => None, + SuggestionText::Provide(plural) => { + Some(format!("provide the argument{}", if plural { "s" } else { "" })) + } + SuggestionText::Remove(plural) => { + Some(format!("remove the extra argument{}", if plural { "s" } else { "" })) + } + SuggestionText::Swap => Some("swap these arguments".to_string()), + SuggestionText::Reorder => Some("reorder these arguments".to_string()), + SuggestionText::DidYouMean => Some("did you mean".to_string()), + }; + if let Some(suggestion_text) = suggestion_text { + let source_map = self.sess().source_map(); + let mut suggestion = format!( + "{}(", + source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| fn_def_id + .map_or("".to_string(), |fn_def_id| tcx.item_name(fn_def_id).to_string())) + ); + let mut needs_comma = false; + for (expected_idx, provided_idx) in matched_inputs.iter_enumerated() { + if needs_comma { + suggestion += ", "; + } else { + needs_comma = true; + } + let suggestion_text = if let Some(provided_idx) = provided_idx + && let (_, provided_span) = provided_arg_tys[*provided_idx] + && let Ok(arg_text) = + source_map.span_to_snippet(provided_span) + { + arg_text + } else { + // Propose a placeholder of the correct type + let (_, expected_ty) = formal_and_expected_inputs[expected_idx]; + if expected_ty.is_unit() { + "()".to_string() + } else if expected_ty.is_suggestable(tcx, false) { + format!("/* {} */", expected_ty) + } else { + "/* value */".to_string() + } + }; + suggestion += &suggestion_text; + } + suggestion += ")"; + err.span_suggestion_verbose( + error_span, + &suggestion_text, + suggestion, + Applicability::HasPlaceholders, + ); + } + + err.emit(); + } + + // AST fragment checking + pub(in super::super) fn check_lit( + &self, + lit: &hir::Lit, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + + match lit.node { + ast::LitKind::Str(..) => tcx.mk_static_str(), + ast::LitKind::ByteStr(ref v) => { + tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64)) + } + ast::LitKind::Byte(_) => tcx.types.u8, + ast::LitKind::Char(_) => tcx.types.char, + ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(ty::int_ty(t)), + ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(ty::uint_ty(t)), + ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => { + let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() { + ty::Int(_) | ty::Uint(_) => Some(ty), + ty::Char => Some(tcx.types.u8), + ty::RawPtr(..) => Some(tcx.types.usize), + ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize), + _ => None, + }); + opt_ty.unwrap_or_else(|| self.next_int_var()) + } + ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => { + tcx.mk_mach_float(ty::float_ty(t)) + } + ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => { + let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() { + ty::Float(_) => Some(ty), + _ => None, + }); + opt_ty.unwrap_or_else(|| self.next_float_var()) + } + ast::LitKind::Bool(_) => tcx.types.bool, + ast::LitKind::Err(_) => tcx.ty_error(), + } + } + + pub fn check_struct_path( + &self, + qpath: &QPath<'_>, + hir_id: hir::HirId, + ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> { + let path_span = qpath.span(); + let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id); + let variant = match def { + Res::Err => { + self.set_tainted_by_errors(); + return None; + } + Res::Def(DefKind::Variant, _) => match ty.kind() { + ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did(), substs)), + _ => bug!("unexpected type: {:?}", ty), + }, + Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _) + | Res::SelfTy { .. } => match ty.kind() { + ty::Adt(adt, substs) if !adt.is_enum() => { + Some((adt.non_enum_variant(), adt.did(), substs)) + } + _ => None, + }, + _ => bug!("unexpected definition: {:?}", def), + }; + + if let Some((variant, did, substs)) = variant { + debug!("check_struct_path: did={:?} substs={:?}", did, substs); + self.write_user_type_annotation_from_substs(hir_id, did, substs, None); + + // Check bounds on type arguments used in the path. + self.add_required_obligations(path_span, did, substs); + + Some((variant, ty)) + } else { + match ty.kind() { + ty::Error(_) => { + // E0071 might be caused by a spelling error, which will have + // already caused an error message and probably a suggestion + // elsewhere. Refrain from emitting more unhelpful errors here + // (issue #88844). + } + _ => { + struct_span_err!( + self.tcx.sess, + path_span, + E0071, + "expected struct, variant or union type, found {}", + ty.sort_string(self.tcx) + ) + .span_label(path_span, "not a struct") + .emit(); + } + } + None + } + } + + pub fn check_decl_initializer( + &self, + hir_id: hir::HirId, + pat: &'tcx hir::Pat<'tcx>, + init: &'tcx hir::Expr<'tcx>, + ) -> Ty<'tcx> { + // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed + // for #42640 (default match binding modes). + // + // See #44848. + let ref_bindings = pat.contains_explicit_ref_binding(); + + let local_ty = self.local_ty(init.span, hir_id).revealed_ty; + if let Some(m) = ref_bindings { + // Somewhat subtle: if we have a `ref` binding in the pattern, + // we want to avoid introducing coercions for the RHS. This is + // both because it helps preserve sanity and, in the case of + // ref mut, for soundness (issue #23116). In particular, in + // the latter case, we need to be clear that the type of the + // referent for the reference that results is *equal to* the + // type of the place it is referencing, and not some + // supertype thereof. + let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m)); + self.demand_eqtype(init.span, local_ty, init_ty); + init_ty + } else { + self.check_expr_coercable_to_type(init, local_ty, None) + } + } + + pub(in super::super) fn check_decl(&self, decl: Declaration<'tcx>) { + // Determine and write the type which we'll check the pattern against. + let decl_ty = self.local_ty(decl.span, decl.hir_id).decl_ty; + self.write_ty(decl.hir_id, decl_ty); + + // Type check the initializer. + if let Some(ref init) = decl.init { + let init_ty = self.check_decl_initializer(decl.hir_id, decl.pat, &init); + self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, init_ty); + } + + // Does the expected pattern type originate from an expression and what is the span? + let (origin_expr, ty_span) = match (decl.ty, decl.init) { + (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type. + (_, Some(init)) => { + (true, Some(init.span.find_ancestor_inside(decl.span).unwrap_or(init.span))) + } // No explicit type; so use the scrutinee. + _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained. + }; + + // Type check the pattern. Override if necessary to avoid knock-on errors. + self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr); + let pat_ty = self.node_ty(decl.pat.hir_id); + self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, pat_ty); + + if let Some(blk) = decl.els { + let previous_diverges = self.diverges.get(); + let else_ty = self.check_block_with_expected(blk, NoExpectation); + let cause = self.cause(blk.span, ObligationCauseCode::LetElse); + if let Some(mut err) = + self.demand_eqtype_with_origin(&cause, self.tcx.types.never, else_ty) + { + err.emit(); + } + self.diverges.set(previous_diverges); + } + } + + /// Type check a `let` statement. + pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) { + self.check_decl(local.into()); + } + + pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) { + // Don't do all the complex logic below for `DeclItem`. + match stmt.kind { + hir::StmtKind::Item(..) => return, + hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {} + } + + self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement"); + + // Hide the outer diverging and `has_errors` flags. + let old_diverges = self.diverges.replace(Diverges::Maybe); + let old_has_errors = self.has_errors.replace(false); + + match stmt.kind { + hir::StmtKind::Local(l) => { + self.check_decl_local(l); + } + // Ignore for now. + hir::StmtKind::Item(_) => {} + hir::StmtKind::Expr(ref expr) => { + // Check with expected type of `()`. + self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| { + if expr.can_have_side_effects() { + self.suggest_semicolon_at_end(expr.span, err); + } + }); + } + hir::StmtKind::Semi(ref expr) => { + // All of this is equivalent to calling `check_expr`, but it is inlined out here + // in order to capture the fact that this `match` is the last statement in its + // function. This is done for better suggestions to remove the `;`. + let expectation = match expr.kind { + hir::ExprKind::Match(..) if is_last => IsLast(stmt.span), + _ => NoExpectation, + }; + self.check_expr_with_expectation(expr, expectation); + } + } + + // Combine the diverging and `has_error` flags. + self.diverges.set(self.diverges.get() | old_diverges); + self.has_errors.set(self.has_errors.get() | old_has_errors); + } + + pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) { + let unit = self.tcx.mk_unit(); + let ty = self.check_block_with_expected(blk, ExpectHasType(unit)); + + // if the block produces a `!` value, that can always be + // (effectively) coerced to unit. + if !ty.is_never() { + self.demand_suptype(blk.span, unit, ty); + } + } + + pub(in super::super) fn check_block_with_expected( + &self, + blk: &'tcx hir::Block<'tcx>, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let prev = self.ps.replace(self.ps.get().recurse(blk)); + + // In some cases, blocks have just one exit, but other blocks + // can be targeted by multiple breaks. This can happen both + // with labeled blocks as well as when we desugar + // a `try { ... }` expression. + // + // Example 1: + // + // 'a: { if true { break 'a Err(()); } Ok(()) } + // + // Here we would wind up with two coercions, one from + // `Err(())` and the other from the tail expression + // `Ok(())`. If the tail expression is omitted, that's a + // "forced unit" -- unless the block diverges, in which + // case we can ignore the tail expression (e.g., `'a: { + // break 'a 22; }` would not force the type of the block + // to be `()`). + let tail_expr = blk.expr.as_ref(); + let coerce_to_ty = expected.coercion_target_type(self, blk.span); + let coerce = if blk.targeted_by_break { + CoerceMany::new(coerce_to_ty) + } else { + let tail_expr: &[&hir::Expr<'_>] = match tail_expr { + Some(e) => slice::from_ref(e), + None => &[], + }; + CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr) + }; + + let prev_diverges = self.diverges.get(); + let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false }; + + let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || { + for (pos, s) in blk.stmts.iter().enumerate() { + self.check_stmt(s, blk.stmts.len() - 1 == pos); + } + + // check the tail expression **without** holding the + // `enclosing_breakables` lock below. + let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected)); + + let mut enclosing_breakables = self.enclosing_breakables.borrow_mut(); + let ctxt = enclosing_breakables.find_breakable(blk.hir_id); + let coerce = ctxt.coerce.as_mut().unwrap(); + if let Some(tail_expr_ty) = tail_expr_ty { + let tail_expr = tail_expr.unwrap(); + let span = self.get_expr_coercion_span(tail_expr); + let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id)); + let ty_for_diagnostic = coerce.merged_ty(); + // We use coerce_inner here because we want to augment the error + // suggesting to wrap the block in square brackets if it might've + // been mistaken array syntax + coerce.coerce_inner( + self, + &cause, + Some(tail_expr), + tail_expr_ty, + Some(&mut |diag: &mut Diagnostic| { + self.suggest_block_to_brackets(diag, blk, tail_expr_ty, ty_for_diagnostic); + }), + false, + ); + } else { + // Subtle: if there is no explicit tail expression, + // that is typically equivalent to a tail expression + // of `()` -- except if the block diverges. In that + // case, there is no value supplied from the tail + // expression (assuming there are no other breaks, + // this implies that the type of the block will be + // `!`). + // + // #41425 -- label the implicit `()` as being the + // "found type" here, rather than the "expected type". + if !self.diverges.get().is_always() { + // #50009 -- Do not point at the entire fn block span, point at the return type + // span, as it is the cause of the requirement, and + // `consider_hint_about_removing_semicolon` will point at the last expression + // if it were a relevant part of the error. This improves usability in editors + // that highlight errors inline. + let mut sp = blk.span; + let mut fn_span = None; + if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) { + let ret_sp = decl.output.span(); + if let Some(block_sp) = self.parent_item_span(blk.hir_id) { + // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the + // output would otherwise be incorrect and even misleading. Make sure + // the span we're aiming at correspond to a `fn` body. + if block_sp == blk.span { + sp = ret_sp; + fn_span = Some(ident.span); + } + } + } + coerce.coerce_forced_unit( + self, + &self.misc(sp), + &mut |err| { + if let Some(expected_ty) = expected.only_has_type(self) { + if !self.consider_removing_semicolon(blk, expected_ty, err) { + self.consider_returning_binding(blk, expected_ty, err); + } + if expected_ty == self.tcx.types.bool { + // If this is caused by a missing `let` in a `while let`, + // silence this redundant error, as we already emit E0070. + + // Our block must be a `assign desugar local; assignment` + if let Some(hir::Node::Block(hir::Block { + stmts: + [ + hir::Stmt { + kind: + hir::StmtKind::Local(hir::Local { + source: + hir::LocalSource::AssignDesugar(_), + .. + }), + .. + }, + hir::Stmt { + kind: + hir::StmtKind::Expr(hir::Expr { + kind: hir::ExprKind::Assign(..), + .. + }), + .. + }, + ], + .. + })) = self.tcx.hir().find(blk.hir_id) + { + self.comes_from_while_condition(blk.hir_id, |_| { + err.downgrade_to_delayed_bug(); + }) + } + } + } + if let Some(fn_span) = fn_span { + err.span_label( + fn_span, + "implicitly returns `()` as its body has no tail or `return` \ + expression", + ); + } + }, + false, + ); + } + } + }); + + if ctxt.may_break { + // If we can break from the block, then the block's exit is always reachable + // (... as long as the entry is reachable) - regardless of the tail of the block. + self.diverges.set(prev_diverges); + } + + let mut ty = ctxt.coerce.unwrap().complete(self); + + if self.has_errors.get() || ty.references_error() { + ty = self.tcx.ty_error() + } + + self.write_ty(blk.hir_id, ty); + + self.ps.set(prev); + ty + } + + fn parent_item_span(&self, id: hir::HirId) -> Option { + let node = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(id)); + match node { + Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. }) + | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => { + let body = self.tcx.hir().body(body_id); + if let ExprKind::Block(block, _) = &body.value.kind { + return Some(block.span); + } + } + _ => {} + } + None + } + + /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise. + fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> { + let parent = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(blk_id)); + self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident)) + } + + /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail + /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors + /// when given code like the following: + /// ```text + /// if false { return 0i32; } else { 1u32 } + /// // ^^^^ point at this instead of the whole `if` expression + /// ``` + fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span { + let check_in_progress = |elem: &hir::Expr<'_>| { + self.typeck_results.borrow().node_type_opt(elem.hir_id).filter(|ty| !ty.is_never()).map( + |_| match elem.kind { + // Point at the tail expression when possible. + hir::ExprKind::Block(block, _) => block.expr.map_or(block.span, |e| e.span), + _ => elem.span, + }, + ) + }; + + if let hir::ExprKind::If(_, _, Some(el)) = expr.kind { + if let Some(rslt) = check_in_progress(el) { + return rslt; + } + } + + if let hir::ExprKind::Match(_, arms, _) = expr.kind { + let mut iter = arms.iter().filter_map(|arm| check_in_progress(arm.body)); + if let Some(span) = iter.next() { + if iter.next().is_none() { + return span; + } + } + } + + expr.span + } + + fn overwrite_local_ty_if_err( + &self, + hir_id: hir::HirId, + pat: &'tcx hir::Pat<'tcx>, + decl_ty: Ty<'tcx>, + ty: Ty<'tcx>, + ) { + if ty.references_error() { + // Override the types everywhere with `err()` to avoid knock on errors. + self.write_ty(hir_id, ty); + self.write_ty(pat.hir_id, ty); + let local_ty = LocalTy { decl_ty, revealed_ty: ty }; + self.locals.borrow_mut().insert(hir_id, local_ty); + self.locals.borrow_mut().insert(pat.hir_id, local_ty); + } + } + + // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary. + // The newly resolved definition is written into `type_dependent_defs`. + fn finish_resolving_struct_path( + &self, + qpath: &QPath<'_>, + path_span: Span, + hir_id: hir::HirId, + ) -> (Res, Ty<'tcx>) { + match *qpath { + QPath::Resolved(ref maybe_qself, ref path) => { + let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself)); + let ty = >::res_to_ty(self, self_ty, path, true); + (path.res, ty) + } + QPath::TypeRelative(ref qself, ref segment) => { + let ty = self.to_ty(qself); + + let result = >::associated_path_to_ty( + self, hir_id, path_span, ty, qself, segment, true, + ); + let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error()); + let result = result.map(|(_, kind, def_id)| (kind, def_id)); + + // Write back the new resolution. + self.write_resolution(hir_id, result); + + (result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), ty) + } + QPath::LangItem(lang_item, span, id) => { + self.resolve_lang_item_path(lang_item, span, hir_id, id) + } + } + } + + /// Given a vec of evaluated `FulfillmentError`s and an `fn` call argument expressions, we walk + /// the checked and coerced types for each argument to see if any of the `FulfillmentError`s + /// reference a type argument. The reason to walk also the checked type is that the coerced type + /// can be not easily comparable with predicate type (because of coercion). If the types match + /// for either checked or coerced type, and there's only *one* argument that does, we point at + /// the corresponding argument's expression span instead of the `fn` call path span. + fn point_at_arg_instead_of_call_if_possible( + &self, + errors: &mut Vec>, + expr: &'tcx hir::Expr<'tcx>, + call_sp: Span, + args: &'tcx [hir::Expr<'tcx>], + expected_tys: &[Ty<'tcx>], + ) { + // We *do not* do this for desugared call spans to keep good diagnostics when involving + // the `?` operator. + if call_sp.desugaring_kind().is_some() { + return; + } + + 'outer: for error in errors { + // Only if the cause is somewhere inside the expression we want try to point at arg. + // Otherwise, it means that the cause is somewhere else and we should not change + // anything because we can break the correct span. + if !call_sp.contains(error.obligation.cause.span) { + continue; + } + + // Peel derived obligation, because it's the type that originally + // started this inference chain that matters, not the one we wound + // up with at the end. + fn unpeel_to_top<'a, 'tcx>( + mut code: &'a ObligationCauseCode<'tcx>, + ) -> &'a ObligationCauseCode<'tcx> { + let mut result_code = code; + loop { + let parent = match code { + ObligationCauseCode::ImplDerivedObligation(c) => &c.derived.parent_code, + ObligationCauseCode::BuiltinDerivedObligation(c) + | ObligationCauseCode::DerivedObligation(c) => &c.parent_code, + _ => break result_code, + }; + (result_code, code) = (code, parent); + } + } + let self_: ty::subst::GenericArg<'_> = + match unpeel_to_top(error.obligation.cause.code()) { + ObligationCauseCode::BuiltinDerivedObligation(code) + | ObligationCauseCode::DerivedObligation(code) => { + code.parent_trait_pred.self_ty().skip_binder().into() + } + ObligationCauseCode::ImplDerivedObligation(code) => { + code.derived.parent_trait_pred.self_ty().skip_binder().into() + } + _ if let ty::PredicateKind::Trait(predicate) = + error.obligation.predicate.kind().skip_binder() => + { + predicate.self_ty().into() + } + _ => continue, + }; + let self_ = self.resolve_vars_if_possible(self_); + let ty_matches_self = |ty: Ty<'tcx>| ty.walk().any(|arg| arg == self_); + + let typeck_results = self.typeck_results.borrow(); + + for (idx, arg) in args.iter().enumerate() { + // Don't adjust the span if we already have a more precise span + // within one of the args. + if arg.span.contains(error.obligation.cause.span) { + let references_arg = + typeck_results.expr_ty_opt(arg).map_or(false, &ty_matches_self) + || expected_tys.get(idx).copied().map_or(false, &ty_matches_self); + if references_arg && !arg.span.from_expansion() { + error.obligation.cause.map_code(|parent_code| { + ObligationCauseCode::FunctionArgumentObligation { + arg_hir_id: args[idx].hir_id, + call_hir_id: expr.hir_id, + parent_code, + } + }) + } + continue 'outer; + } + } + + // Collect the argument position for all arguments that could have caused this + // `FulfillmentError`. + let mut referenced_in: Vec<_> = std::iter::zip(expected_tys, args) + .enumerate() + .flat_map(|(idx, (expected_ty, arg))| { + if let Some(arg_ty) = typeck_results.expr_ty_opt(arg) { + vec![(idx, arg_ty), (idx, *expected_ty)] + } else { + vec![] + } + }) + .filter_map(|(i, ty)| { + let ty = self.resolve_vars_if_possible(ty); + // We walk the argument type because the argument's type could have + // been `Option`, but the `FulfillmentError` references `T`. + if ty_matches_self(ty) { Some(i) } else { None } + }) + .collect(); + + // Both checked and coerced types could have matched, thus we need to remove + // duplicates. + + // We sort primitive type usize here and can use unstable sort + referenced_in.sort_unstable(); + referenced_in.dedup(); + + if let &[idx] = &referenced_in[..] { + // Do not point at the inside of a macro. + // That would often result in poor error messages. + if args[idx].span.from_expansion() { + continue; + } + // We make sure that only *one* argument matches the obligation failure + // and we assign the obligation's span to its expression's. + error.obligation.cause.span = args[idx].span; + error.obligation.cause.map_code(|parent_code| { + ObligationCauseCode::FunctionArgumentObligation { + arg_hir_id: args[idx].hir_id, + call_hir_id: expr.hir_id, + parent_code, + } + }); + } else if error.obligation.cause.span == call_sp { + // Make function calls point at the callee, not the whole thing. + if let hir::ExprKind::Call(callee, _) = expr.kind { + error.obligation.cause.span = callee.span; + } + } + } + } + + /// Given a vec of evaluated `FulfillmentError`s and an `fn` call expression, we walk the + /// `PathSegment`s and resolve their type parameters to see if any of the `FulfillmentError`s + /// were caused by them. If they were, we point at the corresponding type argument's span + /// instead of the `fn` call path span. + fn point_at_type_arg_instead_of_call_if_possible( + &self, + errors: &mut Vec>, + call_expr: &'tcx hir::Expr<'tcx>, + ) { + if let hir::ExprKind::Call(path, _) = &call_expr.kind { + if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &path.kind { + for error in errors { + if let ty::PredicateKind::Trait(predicate) = + error.obligation.predicate.kind().skip_binder() + { + // If any of the type arguments in this path segment caused the + // `FulfillmentError`, point at its span (#61860). + for arg in path + .segments + .iter() + .filter_map(|seg| seg.args.as_ref()) + .flat_map(|a| a.args.iter()) + { + if let hir::GenericArg::Type(hir_ty) = &arg + && let Some(ty) = + self.typeck_results.borrow().node_type_opt(hir_ty.hir_id) + && self.resolve_vars_if_possible(ty) == predicate.self_ty() + { + error.obligation.cause.span = hir_ty.span; + break; + } + } + } + } + } + } + } + + fn label_fn_like( + &self, + err: &mut rustc_errors::DiagnosticBuilder<'tcx, rustc_errors::ErrorGuaranteed>, + callable_def_id: Option, + callee_ty: Option>, + ) { + let Some(mut def_id) = callable_def_id else { + return; + }; + + if let Some(assoc_item) = self.tcx.opt_associated_item(def_id) + // Possibly points at either impl or trait item, so try to get it + // to point to trait item, then get the parent. + // This parent might be an impl in the case of an inherent function, + // but the next check will fail. + && let maybe_trait_item_def_id = assoc_item.trait_item_def_id.unwrap_or(def_id) + && let maybe_trait_def_id = self.tcx.parent(maybe_trait_item_def_id) + // Just an easy way to check "trait_def_id == Fn/FnMut/FnOnce" + && let Some(call_kind) = ty::ClosureKind::from_def_id(self.tcx, maybe_trait_def_id) + && let Some(callee_ty) = callee_ty + { + let callee_ty = callee_ty.peel_refs(); + match *callee_ty.kind() { + ty::Param(param) => { + let param = + self.tcx.generics_of(self.body_id.owner).type_param(¶m, self.tcx); + if param.kind.is_synthetic() { + // if it's `impl Fn() -> ..` then just fall down to the def-id based logic + def_id = param.def_id; + } else { + // Otherwise, find the predicate that makes this generic callable, + // and point at that. + let instantiated = self + .tcx + .explicit_predicates_of(self.body_id.owner) + .instantiate_identity(self.tcx); + // FIXME(compiler-errors): This could be problematic if something has two + // fn-like predicates with different args, but callable types really never + // do that, so it's OK. + for (predicate, span) in + std::iter::zip(instantiated.predicates, instantiated.spans) + { + if let ty::PredicateKind::Trait(pred) = predicate.kind().skip_binder() + && pred.self_ty().peel_refs() == callee_ty + && ty::ClosureKind::from_def_id(self.tcx, pred.def_id()).is_some() + { + err.span_note(span, "callable defined here"); + return; + } + } + } + } + ty::Opaque(new_def_id, _) + | ty::Closure(new_def_id, _) + | ty::FnDef(new_def_id, _) => { + def_id = new_def_id; + } + _ => { + // Look for a user-provided impl of a `Fn` trait, and point to it. + let new_def_id = self.probe(|_| { + let trait_ref = ty::TraitRef::new( + call_kind.to_def_id(self.tcx), + self.tcx.mk_substs([ + ty::GenericArg::from(callee_ty), + self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: rustc_span::DUMMY_SP, + }) + .into(), + ].into_iter()), + ); + let obligation = traits::Obligation::new( + traits::ObligationCause::dummy(), + self.param_env, + ty::Binder::dummy(ty::TraitPredicate { + trait_ref, + constness: ty::BoundConstness::NotConst, + polarity: ty::ImplPolarity::Positive, + }), + ); + match SelectionContext::new(&self).select(&obligation) { + Ok(Some(traits::ImplSource::UserDefined(impl_source))) => { + Some(impl_source.impl_def_id) + } + _ => None + } + }); + if let Some(new_def_id) = new_def_id { + def_id = new_def_id; + } else { + return; + } + } + } + } + + if let Some(def_span) = self.tcx.def_ident_span(def_id) && !def_span.is_dummy() { + let mut spans: MultiSpan = def_span.into(); + + let params = self + .tcx + .hir() + .get_if_local(def_id) + .and_then(|node| node.body_id()) + .into_iter() + .flat_map(|id| self.tcx.hir().body(id).params); + + for param in params { + spans.push_span_label(param.span, ""); + } + + let def_kind = self.tcx.def_kind(def_id); + err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id))); + } else { + let def_kind = self.tcx.def_kind(def_id); + err.span_note( + self.tcx.def_span(def_id), + &format!("{} defined here", def_kind.descr(def_id)), + ); + } + } +} diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs new file mode 100644 index 000000000..05bcc710e --- /dev/null +++ b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs @@ -0,0 +1,296 @@ +mod _impl; +mod arg_matrix; +mod checks; +mod suggestions; + +pub use _impl::*; +pub use suggestions::*; + +use crate::astconv::AstConv; +use crate::check::coercion::DynamicCoerceMany; +use crate::check::{Diverges, EnclosingBreakables, Inherited, UnsafetyState}; + +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_infer::infer; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind}; +use rustc_middle::ty::subst::GenericArgKind; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{self, Const, Ty, TyCtxt}; +use rustc_session::Session; +use rustc_span::symbol::Ident; +use rustc_span::{self, Span}; +use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode}; + +use std::cell::{Cell, RefCell}; +use std::ops::Deref; + +pub struct FnCtxt<'a, 'tcx> { + pub(super) body_id: hir::HirId, + + /// The parameter environment used for proving trait obligations + /// in this function. This can change when we descend into + /// closures (as they bring new things into scope), hence it is + /// not part of `Inherited` (as of the time of this writing, + /// closures do not yet change the environment, but they will + /// eventually). + pub(super) param_env: ty::ParamEnv<'tcx>, + + /// Number of errors that had been reported when we started + /// checking this function. On exit, if we find that *more* errors + /// have been reported, we will skip regionck and other work that + /// expects the types within the function to be consistent. + // FIXME(matthewjasper) This should not exist, and it's not correct + // if type checking is run in parallel. + err_count_on_creation: usize, + + /// If `Some`, this stores coercion information for returned + /// expressions. If `None`, this is in a context where return is + /// inappropriate, such as a const expression. + /// + /// This is a `RefCell`, which means that we + /// can track all the return expressions and then use them to + /// compute a useful coercion from the set, similar to a match + /// expression or other branching context. You can use methods + /// like `expected_ty` to access the declared return type (if + /// any). + pub(super) ret_coercion: Option>>, + + pub(super) ret_type_span: Option, + + /// Used exclusively to reduce cost of advanced evaluation used for + /// more helpful diagnostics. + pub(super) in_tail_expr: bool, + + /// First span of a return site that we find. Used in error messages. + pub(super) ret_coercion_span: Cell>, + + pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>, + + pub(super) ps: Cell, + + /// Whether the last checked node generates a divergence (e.g., + /// `return` will set this to `Always`). In general, when entering + /// an expression or other node in the tree, the initial value + /// indicates whether prior parts of the containing expression may + /// have diverged. It is then typically set to `Maybe` (and the + /// old value remembered) for processing the subparts of the + /// current expression. As each subpart is processed, they may set + /// the flag to `Always`, etc. Finally, at the end, we take the + /// result and "union" it with the original value, so that when we + /// return the flag indicates if any subpart of the parent + /// expression (up to and including this part) has diverged. So, + /// if you read it after evaluating a subexpression `X`, the value + /// you get indicates whether any subexpression that was + /// evaluating up to and including `X` diverged. + /// + /// We currently use this flag only for diagnostic purposes: + /// + /// - To warn about unreachable code: if, after processing a + /// sub-expression but before we have applied the effects of the + /// current node, we see that the flag is set to `Always`, we + /// can issue a warning. This corresponds to something like + /// `foo(return)`; we warn on the `foo()` expression. (We then + /// update the flag to `WarnedAlways` to suppress duplicate + /// reports.) Similarly, if we traverse to a fresh statement (or + /// tail expression) from an `Always` setting, we will issue a + /// warning. This corresponds to something like `{return; + /// foo();}` or `{return; 22}`, where we would warn on the + /// `foo()` or `22`. + /// + /// An expression represents dead code if, after checking it, + /// the diverges flag is set to something other than `Maybe`. + pub(super) diverges: Cell, + + /// Whether any child nodes have any type errors. + pub(super) has_errors: Cell, + + pub(super) enclosing_breakables: RefCell>, + + pub(super) inh: &'a Inherited<'a, 'tcx>, + + /// True if the function or closure's return type is known before + /// entering the function/closure, i.e. if the return type is + /// either given explicitly or inferred from, say, an `Fn*` trait + /// bound. Used for diagnostic purposes only. + pub(super) return_type_pre_known: bool, + + /// True if the return type has an Opaque type + pub(super) return_type_has_opaque: bool, +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn new( + inh: &'a Inherited<'a, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + body_id: hir::HirId, + ) -> FnCtxt<'a, 'tcx> { + FnCtxt { + body_id, + param_env, + err_count_on_creation: inh.tcx.sess.err_count(), + ret_coercion: None, + ret_type_span: None, + in_tail_expr: false, + ret_coercion_span: Cell::new(None), + resume_yield_tys: None, + ps: Cell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)), + diverges: Cell::new(Diverges::Maybe), + has_errors: Cell::new(false), + enclosing_breakables: RefCell::new(EnclosingBreakables { + stack: Vec::new(), + by_id: Default::default(), + }), + inh, + return_type_pre_known: true, + return_type_has_opaque: false, + } + } + + pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> { + ObligationCause::new(span, self.body_id, code) + } + + pub fn misc(&self, span: Span) -> ObligationCause<'tcx> { + self.cause(span, ObligationCauseCode::MiscObligation) + } + + pub fn sess(&self) -> &Session { + &self.tcx.sess + } + + pub fn errors_reported_since_creation(&self) -> bool { + self.tcx.sess.err_count() > self.err_count_on_creation + } +} + +impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> { + type Target = Inherited<'a, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.inh + } +} + +impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'tcx> { + self.tcx + } + + fn item_def_id(&self) -> Option { + None + } + + fn get_type_parameter_bounds( + &self, + _: Span, + def_id: DefId, + _: Ident, + ) -> ty::GenericPredicates<'tcx> { + let tcx = self.tcx; + let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local()); + let generics = tcx.generics_of(item_def_id); + let index = generics.param_def_id_to_index[&def_id]; + ty::GenericPredicates { + parent: None, + predicates: tcx.arena.alloc_from_iter( + self.param_env.caller_bounds().iter().filter_map(|predicate| { + match predicate.kind().skip_binder() { + ty::PredicateKind::Trait(data) if data.self_ty().is_param(index) => { + // HACK(eddyb) should get the original `Span`. + let span = tcx.def_span(def_id); + Some((predicate, span)) + } + _ => None, + } + }), + ), + } + } + + fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option> { + let v = match def { + Some(def) => infer::EarlyBoundRegion(span, def.name), + None => infer::MiscVariable(span), + }; + Some(self.next_region_var(v)) + } + + fn allow_ty_infer(&self) -> bool { + true + } + + fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> { + if let Some(param) = param { + if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() { + return ty; + } + unreachable!() + } else { + self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span, + }) + } + } + + fn ct_infer( + &self, + ty: Ty<'tcx>, + param: Option<&ty::GenericParamDef>, + span: Span, + ) -> Const<'tcx> { + if let Some(param) = param { + if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() { + return ct; + } + unreachable!() + } else { + self.next_const_var( + ty, + ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span }, + ) + } + } + + fn projected_ty_from_poly_trait_ref( + &self, + span: Span, + item_def_id: DefId, + item_segment: &hir::PathSegment<'_>, + poly_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Ty<'tcx> { + let trait_ref = self.replace_bound_vars_with_fresh_vars( + span, + infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id), + poly_trait_ref, + ); + + let item_substs = >::create_substs_for_associated_item( + self, + self.tcx, + span, + item_def_id, + item_segment, + trait_ref.substs, + ); + + self.tcx().mk_projection(item_def_id, item_substs) + } + + fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> { + if ty.has_escaping_bound_vars() { + ty // FIXME: normalization and escaping regions + } else { + self.normalize_associated_types_in(span, ty) + } + } + + fn set_tainted_by_errors(&self) { + self.infcx.set_tainted_by_errors() + } + + fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) { + self.write_ty(hir_id, ty) + } +} diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs new file mode 100644 index 000000000..57771e096 --- /dev/null +++ b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs @@ -0,0 +1,912 @@ +use super::FnCtxt; +use crate::astconv::AstConv; +use crate::errors::{AddReturnTypeSuggestion, ExpectedReturnTypeLabel}; + +use rustc_ast::util::parser::ExprPrecedence; +use rustc_errors::{Applicability, Diagnostic, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def::{CtorOf, DefKind}; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{ + Expr, ExprKind, GenericBound, Node, Path, QPath, Stmt, StmtKind, TyKind, WherePredicate, +}; +use rustc_infer::infer::{self, TyCtxtInferExt}; +use rustc_infer::traits::{self, StatementAsExpression}; +use rustc_middle::lint::in_external_macro; +use rustc_middle::ty::{self, Binder, IsSuggestable, Subst, ToPredicate, Ty}; +use rustc_span::symbol::sym; +use rustc_span::Span; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub(in super::super) fn suggest_semicolon_at_end(&self, span: Span, err: &mut Diagnostic) { + err.span_suggestion_short( + span.shrink_to_hi(), + "consider using a semicolon here", + ";", + Applicability::MachineApplicable, + ); + } + + /// On implicit return expressions with mismatched types, provides the following suggestions: + /// + /// - Points out the method's return type as the reason for the expected type. + /// - Possible missing semicolon. + /// - Possible missing return type if the return type is the default, and not `fn main()`. + pub fn suggest_mismatched_types_on_tail( + &self, + err: &mut Diagnostic, + expr: &'tcx hir::Expr<'tcx>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + blk_id: hir::HirId, + ) -> bool { + let expr = expr.peel_drop_temps(); + self.suggest_missing_semicolon(err, expr, expected, false); + let mut pointing_at_return_type = false; + if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) { + let fn_id = self.tcx.hir().get_return_block(blk_id).unwrap(); + pointing_at_return_type = self.suggest_missing_return_type( + err, + &fn_decl, + expected, + found, + can_suggest, + fn_id, + ); + self.suggest_missing_break_or_return_expr( + err, expr, &fn_decl, expected, found, blk_id, fn_id, + ); + } + pointing_at_return_type + } + + /// When encountering an fn-like ctor that needs to unify with a value, check whether calling + /// the ctor would successfully solve the type mismatch and if so, suggest it: + /// ```compile_fail,E0308 + /// fn foo(x: usize) -> usize { x } + /// let x: usize = foo; // suggest calling the `foo` function: `foo(42)` + /// ``` + fn suggest_fn_call( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ) -> bool { + let (def_id, output, inputs) = match *found.kind() { + ty::FnDef(def_id, _) => { + let fn_sig = found.fn_sig(self.tcx); + (def_id, fn_sig.output(), fn_sig.inputs().skip_binder().len()) + } + ty::Closure(def_id, substs) => { + let fn_sig = substs.as_closure().sig(); + (def_id, fn_sig.output(), fn_sig.inputs().skip_binder().len() - 1) + } + ty::Opaque(def_id, substs) => { + let sig = self.tcx.bound_item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| { + if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder() + && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output() + // args tuple will always be substs[1] + && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind() + { + Some(( + pred.kind().rebind(proj.term.ty().unwrap()), + args.len(), + )) + } else { + None + } + }); + if let Some((output, inputs)) = sig { + (def_id, output, inputs) + } else { + return false; + } + } + _ => return false, + }; + + let output = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, output); + let output = self.normalize_associated_types_in(expr.span, output); + if !output.is_ty_var() && self.can_coerce(output, expected) { + let (sugg_call, mut applicability) = match inputs { + 0 => ("".to_string(), Applicability::MachineApplicable), + 1..=4 => ( + (0..inputs).map(|_| "_").collect::>().join(", "), + Applicability::MachineApplicable, + ), + _ => ("...".to_string(), Applicability::HasPlaceholders), + }; + + let msg = match self.tcx.def_kind(def_id) { + DefKind::Fn => "call this function", + DefKind::Closure | DefKind::OpaqueTy => "call this closure", + DefKind::Ctor(CtorOf::Struct, _) => "instantiate this tuple struct", + DefKind::Ctor(CtorOf::Variant, _) => "instantiate this tuple variant", + _ => "call this function", + }; + + let sugg = match expr.kind { + hir::ExprKind::Call(..) + | hir::ExprKind::Path(..) + | hir::ExprKind::Index(..) + | hir::ExprKind::Lit(..) => { + vec![(expr.span.shrink_to_hi(), format!("({sugg_call})"))] + } + hir::ExprKind::Closure { .. } => { + // Might be `{ expr } || { bool }` + applicability = Applicability::MaybeIncorrect; + vec![ + (expr.span.shrink_to_lo(), "(".to_string()), + (expr.span.shrink_to_hi(), format!(")({sugg_call})")), + ] + } + _ => { + vec![ + (expr.span.shrink_to_lo(), "(".to_string()), + (expr.span.shrink_to_hi(), format!(")({sugg_call})")), + ] + } + }; + + err.multipart_suggestion_verbose( + format!("use parentheses to {msg}"), + sugg, + applicability, + ); + + return true; + } + false + } + + pub fn suggest_deref_ref_or_into( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'tcx>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + ) { + let expr = expr.peel_blocks(); + if let Some((sp, msg, suggestion, applicability, verbose)) = + self.check_ref(expr, found, expected) + { + if verbose { + err.span_suggestion_verbose(sp, &msg, suggestion, applicability); + } else { + err.span_suggestion(sp, &msg, suggestion, applicability); + } + } else if let (ty::FnDef(def_id, ..), true) = + (&found.kind(), self.suggest_fn_call(err, expr, expected, found)) + { + if let Some(sp) = self.tcx.hir().span_if_local(*def_id) { + err.span_label(sp, format!("{found} defined here")); + } + } else if !self.check_for_cast(err, expr, found, expected, expected_ty_expr) { + let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id); + if !methods.is_empty() { + let mut suggestions = methods.iter() + .filter_map(|conversion_method| { + let receiver_method_ident = expr.method_ident(); + if let Some(method_ident) = receiver_method_ident + && method_ident.name == conversion_method.name + { + return None // do not suggest code that is already there (#53348) + } + + let method_call_list = [sym::to_vec, sym::to_string]; + let mut sugg = if let ExprKind::MethodCall(receiver_method, ..) = expr.kind + && receiver_method.ident.name == sym::clone + && method_call_list.contains(&conversion_method.name) + // If receiver is `.clone()` and found type has one of those methods, + // we guess that the user wants to convert from a slice type (`&[]` or `&str`) + // to an owned type (`Vec` or `String`). These conversions clone internally, + // so we remove the user's `clone` call. + { + vec![( + receiver_method.ident.span, + conversion_method.name.to_string() + )] + } else if expr.precedence().order() + < ExprPrecedence::MethodCall.order() + { + vec![ + (expr.span.shrink_to_lo(), "(".to_string()), + (expr.span.shrink_to_hi(), format!(").{}()", conversion_method.name)), + ] + } else { + vec![(expr.span.shrink_to_hi(), format!(".{}()", conversion_method.name))] + }; + let struct_pat_shorthand_field = self.maybe_get_struct_pattern_shorthand_field(expr); + if let Some(name) = struct_pat_shorthand_field { + sugg.insert( + 0, + (expr.span.shrink_to_lo(), format!("{}: ", name)), + ); + } + Some(sugg) + }) + .peekable(); + if suggestions.peek().is_some() { + err.multipart_suggestions( + "try using a conversion method", + suggestions, + Applicability::MaybeIncorrect, + ); + } + } else if let ty::Adt(found_adt, found_substs) = found.kind() + && self.tcx.is_diagnostic_item(sym::Option, found_adt.did()) + && let ty::Adt(expected_adt, expected_substs) = expected.kind() + && self.tcx.is_diagnostic_item(sym::Option, expected_adt.did()) + && let ty::Ref(_, inner_ty, _) = expected_substs.type_at(0).kind() + && inner_ty.is_str() + { + let ty = found_substs.type_at(0); + let mut peeled = ty; + let mut ref_cnt = 0; + while let ty::Ref(_, inner, _) = peeled.kind() { + peeled = *inner; + ref_cnt += 1; + } + if let ty::Adt(adt, _) = peeled.kind() + && self.tcx.is_diagnostic_item(sym::String, adt.did()) + { + err.span_suggestion_verbose( + expr.span.shrink_to_hi(), + "try converting the passed type into a `&str`", + format!(".map(|x| &*{}x)", "*".repeat(ref_cnt)), + Applicability::MaybeIncorrect, + ); + } + } + } + } + + /// When encountering the expected boxed value allocated in the stack, suggest allocating it + /// in the heap by calling `Box::new()`. + pub(in super::super) fn suggest_boxing_when_appropriate( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ) { + if self.tcx.hir().is_inside_const_context(expr.hir_id) { + // Do not suggest `Box::new` in const context. + return; + } + if !expected.is_box() || found.is_box() { + return; + } + let boxed_found = self.tcx.mk_box(found); + if self.can_coerce(boxed_found, expected) { + err.multipart_suggestion( + "store this in the heap by calling `Box::new`", + vec![ + (expr.span.shrink_to_lo(), "Box::new(".to_string()), + (expr.span.shrink_to_hi(), ")".to_string()), + ], + Applicability::MachineApplicable, + ); + err.note( + "for more on the distinction between the stack and the heap, read \ + https://doc.rust-lang.org/book/ch15-01-box.html, \ + https://doc.rust-lang.org/rust-by-example/std/box.html, and \ + https://doc.rust-lang.org/std/boxed/index.html", + ); + } + } + + /// When encountering a closure that captures variables, where a FnPtr is expected, + /// suggest a non-capturing closure + pub(in super::super) fn suggest_no_capture_closure( + &self, + err: &mut Diagnostic, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ) { + if let (ty::FnPtr(_), ty::Closure(def_id, _)) = (expected.kind(), found.kind()) { + if let Some(upvars) = self.tcx.upvars_mentioned(*def_id) { + // Report upto four upvars being captured to reduce the amount error messages + // reported back to the user. + let spans_and_labels = upvars + .iter() + .take(4) + .map(|(var_hir_id, upvar)| { + let var_name = self.tcx.hir().name(*var_hir_id).to_string(); + let msg = format!("`{}` captured here", var_name); + (upvar.span, msg) + }) + .collect::>(); + + let mut multi_span: MultiSpan = + spans_and_labels.iter().map(|(sp, _)| *sp).collect::>().into(); + for (sp, label) in spans_and_labels { + multi_span.push_span_label(sp, label); + } + err.span_note( + multi_span, + "closures can only be coerced to `fn` types if they do not capture any variables" + ); + } + } + } + + /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`. + #[instrument(skip(self, err))] + pub(in super::super) fn suggest_calling_boxed_future_when_appropriate( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + ) -> bool { + // Handle #68197. + + if self.tcx.hir().is_inside_const_context(expr.hir_id) { + // Do not suggest `Box::new` in const context. + return false; + } + let pin_did = self.tcx.lang_items().pin_type(); + // This guards the `unwrap` and `mk_box` below. + if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() { + return false; + } + let box_found = self.tcx.mk_box(found); + let pin_box_found = self.tcx.mk_lang_item(box_found, LangItem::Pin).unwrap(); + let pin_found = self.tcx.mk_lang_item(found, LangItem::Pin).unwrap(); + match expected.kind() { + ty::Adt(def, _) if Some(def.did()) == pin_did => { + if self.can_coerce(pin_box_found, expected) { + debug!("can coerce {:?} to {:?}, suggesting Box::pin", pin_box_found, expected); + match found.kind() { + ty::Adt(def, _) if def.is_box() => { + err.help("use `Box::pin`"); + } + _ => { + err.multipart_suggestion( + "you need to pin and box this expression", + vec![ + (expr.span.shrink_to_lo(), "Box::pin(".to_string()), + (expr.span.shrink_to_hi(), ")".to_string()), + ], + Applicability::MaybeIncorrect, + ); + } + } + true + } else if self.can_coerce(pin_found, expected) { + match found.kind() { + ty::Adt(def, _) if def.is_box() => { + err.help("use `Box::pin`"); + true + } + _ => false, + } + } else { + false + } + } + ty::Adt(def, _) if def.is_box() && self.can_coerce(box_found, expected) => { + // Check if the parent expression is a call to Pin::new. If it + // is and we were expecting a Box, ergo Pin>, we + // can suggest Box::pin. + let parent = self.tcx.hir().get_parent_node(expr.hir_id); + let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) = self.tcx.hir().find(parent) else { + return false; + }; + match fn_name.kind { + ExprKind::Path(QPath::TypeRelative( + hir::Ty { + kind: TyKind::Path(QPath::Resolved(_, Path { res: recv_ty, .. })), + .. + }, + method, + )) if recv_ty.opt_def_id() == pin_did && method.ident.name == sym::new => { + err.span_suggestion( + fn_name.span, + "use `Box::pin` to pin and box this expression", + "Box::pin", + Applicability::MachineApplicable, + ); + true + } + _ => false, + } + } + _ => false, + } + } + + /// A common error is to forget to add a semicolon at the end of a block, e.g., + /// + /// ```compile_fail,E0308 + /// # fn bar_that_returns_u32() -> u32 { 4 } + /// fn foo() { + /// bar_that_returns_u32() + /// } + /// ``` + /// + /// This routine checks if the return expression in a block would make sense on its own as a + /// statement and the return type has been left as default or has been specified as `()`. If so, + /// it suggests adding a semicolon. + /// + /// If the expression is the expression of a closure without block (`|| expr`), a + /// block is needed to be added too (`|| { expr; }`). This is denoted by `needs_block`. + pub fn suggest_missing_semicolon( + &self, + err: &mut Diagnostic, + expression: &'tcx hir::Expr<'tcx>, + expected: Ty<'tcx>, + needs_block: bool, + ) { + if expected.is_unit() { + // `BlockTailExpression` only relevant if the tail expr would be + // useful on its own. + match expression.kind { + ExprKind::Call(..) + | ExprKind::MethodCall(..) + | ExprKind::Loop(..) + | ExprKind::If(..) + | ExprKind::Match(..) + | ExprKind::Block(..) + if expression.can_have_side_effects() + // If the expression is from an external macro, then do not suggest + // adding a semicolon, because there's nowhere to put it. + // See issue #81943. + && !in_external_macro(self.tcx.sess, expression.span) => + { + if needs_block { + err.multipart_suggestion( + "consider using a semicolon here", + vec![ + (expression.span.shrink_to_lo(), "{ ".to_owned()), + (expression.span.shrink_to_hi(), "; }".to_owned()), + ], + Applicability::MachineApplicable, + ); + } else { + err.span_suggestion( + expression.span.shrink_to_hi(), + "consider using a semicolon here", + ";", + Applicability::MachineApplicable, + ); + } + } + _ => (), + } + } + } + + /// A possible error is to forget to add a return type that is needed: + /// + /// ```compile_fail,E0308 + /// # fn bar_that_returns_u32() -> u32 { 4 } + /// fn foo() { + /// bar_that_returns_u32() + /// } + /// ``` + /// + /// This routine checks if the return type is left as default, the method is not part of an + /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return + /// type. + pub(in super::super) fn suggest_missing_return_type( + &self, + err: &mut Diagnostic, + fn_decl: &hir::FnDecl<'_>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + can_suggest: bool, + fn_id: hir::HirId, + ) -> bool { + let found = + self.resolve_numeric_literals_with_default(self.resolve_vars_if_possible(found)); + // Only suggest changing the return type for methods that + // haven't set a return type at all (and aren't `fn main()` or an impl). + match ( + &fn_decl.output, + found.is_suggestable(self.tcx, false), + can_suggest, + expected.is_unit(), + ) { + (&hir::FnRetTy::DefaultReturn(span), true, true, true) => { + err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found }); + true + } + (&hir::FnRetTy::DefaultReturn(span), false, true, true) => { + // FIXME: if `found` could be `impl Iterator` or `impl Fn*`, we should suggest + // that. + err.subdiagnostic(AddReturnTypeSuggestion::MissingHere { span }); + true + } + (&hir::FnRetTy::DefaultReturn(span), _, false, true) => { + // `fn main()` must return `()`, do not suggest changing return type + err.subdiagnostic(ExpectedReturnTypeLabel::Unit { span }); + true + } + // expectation was caused by something else, not the default return + (&hir::FnRetTy::DefaultReturn(_), _, _, false) => false, + (&hir::FnRetTy::Return(ref ty), _, _, _) => { + // Only point to return type if the expected type is the return type, as if they + // are not, the expectation must have been caused by something else. + debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind); + let span = ty.span; + let ty = >::ast_ty_to_ty(self, ty); + debug!("suggest_missing_return_type: return type {:?}", ty); + debug!("suggest_missing_return_type: expected type {:?}", ty); + let bound_vars = self.tcx.late_bound_vars(fn_id); + let ty = Binder::bind_with_vars(ty, bound_vars); + let ty = self.normalize_associated_types_in(span, ty); + let ty = self.tcx.erase_late_bound_regions(ty); + if self.can_coerce(expected, ty) { + err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected }); + self.try_suggest_return_impl_trait(err, expected, ty, fn_id); + return true; + } + false + } + } + } + + /// check whether the return type is a generic type with a trait bound + /// only suggest this if the generic param is not present in the arguments + /// if this is true, hint them towards changing the return type to `impl Trait` + /// ```compile_fail,E0308 + /// fn cant_name_it u32>() -> T { + /// || 3 + /// } + /// ``` + fn try_suggest_return_impl_trait( + &self, + err: &mut Diagnostic, + expected: Ty<'tcx>, + found: Ty<'tcx>, + fn_id: hir::HirId, + ) { + // Only apply the suggestion if: + // - the return type is a generic parameter + // - the generic param is not used as a fn param + // - the generic param has at least one bound + // - the generic param doesn't appear in any other bounds where it's not the Self type + // Suggest: + // - Changing the return type to be `impl ` + + debug!("try_suggest_return_impl_trait, expected = {:?}, found = {:?}", expected, found); + + let ty::Param(expected_ty_as_param) = expected.kind() else { return }; + + let fn_node = self.tcx.hir().find(fn_id); + + let Some(hir::Node::Item(hir::Item { + kind: + hir::ItemKind::Fn( + hir::FnSig { decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. }, .. }, + hir::Generics { params, predicates, .. }, + _body_id, + ), + .. + })) = fn_node else { return }; + + if params.get(expected_ty_as_param.index as usize).is_none() { + return; + }; + + // get all where BoundPredicates here, because they are used in to cases below + let where_predicates = predicates + .iter() + .filter_map(|p| match p { + WherePredicate::BoundPredicate(hir::WhereBoundPredicate { + bounds, + bounded_ty, + .. + }) => { + // FIXME: Maybe these calls to `ast_ty_to_ty` can be removed (and the ones below) + let ty = >::ast_ty_to_ty(self, bounded_ty); + Some((ty, bounds)) + } + _ => None, + }) + .map(|(ty, bounds)| match ty.kind() { + ty::Param(param_ty) if param_ty == expected_ty_as_param => Ok(Some(bounds)), + // check whether there is any predicate that contains our `T`, like `Option: Send` + _ => match ty.contains(expected) { + true => Err(()), + false => Ok(None), + }, + }) + .collect::, _>>(); + + let Ok(where_predicates) = where_predicates else { return }; + + // now get all predicates in the same types as the where bounds, so we can chain them + let predicates_from_where = + where_predicates.iter().flatten().flat_map(|bounds| bounds.iter()); + + // extract all bounds from the source code using their spans + let all_matching_bounds_strs = predicates_from_where + .filter_map(|bound| match bound { + GenericBound::Trait(_, _) => { + self.tcx.sess.source_map().span_to_snippet(bound.span()).ok() + } + _ => None, + }) + .collect::>(); + + if all_matching_bounds_strs.len() == 0 { + return; + } + + let all_bounds_str = all_matching_bounds_strs.join(" + "); + + let ty_param_used_in_fn_params = fn_parameters.iter().any(|param| { + let ty = >::ast_ty_to_ty(self, param); + matches!(ty.kind(), ty::Param(fn_param_ty_param) if expected_ty_as_param == fn_param_ty_param) + }); + + if ty_param_used_in_fn_params { + return; + } + + err.span_suggestion( + fn_return.span(), + "consider using an impl return type", + format!("impl {}", all_bounds_str), + Applicability::MaybeIncorrect, + ); + } + + pub(in super::super) fn suggest_missing_break_or_return_expr( + &self, + err: &mut Diagnostic, + expr: &'tcx hir::Expr<'tcx>, + fn_decl: &hir::FnDecl<'_>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + id: hir::HirId, + fn_id: hir::HirId, + ) { + if !expected.is_unit() { + return; + } + let found = self.resolve_vars_with_obligations(found); + + let in_loop = self.is_loop(id) + || self.tcx.hir().parent_iter(id).any(|(parent_id, _)| self.is_loop(parent_id)); + + let in_local_statement = self.is_local_statement(id) + || self + .tcx + .hir() + .parent_iter(id) + .any(|(parent_id, _)| self.is_local_statement(parent_id)); + + if in_loop && in_local_statement { + err.multipart_suggestion( + "you might have meant to break the loop with this value", + vec![ + (expr.span.shrink_to_lo(), "break ".to_string()), + (expr.span.shrink_to_hi(), ";".to_string()), + ], + Applicability::MaybeIncorrect, + ); + return; + } + + if let hir::FnRetTy::Return(ty) = fn_decl.output { + let ty = >::ast_ty_to_ty(self, ty); + let bound_vars = self.tcx.late_bound_vars(fn_id); + let ty = self.tcx.erase_late_bound_regions(Binder::bind_with_vars(ty, bound_vars)); + let ty = self.normalize_associated_types_in(expr.span, ty); + let ty = match self.tcx.asyncness(fn_id.owner) { + hir::IsAsync::Async => self + .tcx + .infer_ctxt() + .enter(|infcx| { + infcx.get_impl_future_output_ty(ty).unwrap_or_else(|| { + span_bug!( + fn_decl.output.span(), + "failed to get output type of async function" + ) + }) + }) + .skip_binder(), + hir::IsAsync::NotAsync => ty, + }; + if self.can_coerce(found, ty) { + err.multipart_suggestion( + "you might have meant to return this value", + vec![ + (expr.span.shrink_to_lo(), "return ".to_string()), + (expr.span.shrink_to_hi(), ";".to_string()), + ], + Applicability::MaybeIncorrect, + ); + } + } + } + + pub(in super::super) fn suggest_missing_parentheses( + &self, + err: &mut Diagnostic, + expr: &hir::Expr<'_>, + ) { + let sp = self.tcx.sess.source_map().start_point(expr.span); + if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) { + // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }` + self.tcx.sess.parse_sess.expr_parentheses_needed(err, *sp); + } + } + + /// Given an expression type mismatch, peel any `&` expressions until we get to + /// a block expression, and then suggest replacing the braces with square braces + /// if it was possibly mistaken array syntax. + pub(crate) fn suggest_block_to_brackets_peeling_refs( + &self, + diag: &mut Diagnostic, + mut expr: &hir::Expr<'_>, + mut expr_ty: Ty<'tcx>, + mut expected_ty: Ty<'tcx>, + ) { + loop { + match (&expr.kind, expr_ty.kind(), expected_ty.kind()) { + ( + hir::ExprKind::AddrOf(_, _, inner_expr), + ty::Ref(_, inner_expr_ty, _), + ty::Ref(_, inner_expected_ty, _), + ) => { + expr = *inner_expr; + expr_ty = *inner_expr_ty; + expected_ty = *inner_expected_ty; + } + (hir::ExprKind::Block(blk, _), _, _) => { + self.suggest_block_to_brackets(diag, *blk, expr_ty, expected_ty); + break; + } + _ => break, + } + } + } + + /// Suggest wrapping the block in square brackets instead of curly braces + /// in case the block was mistaken array syntax, e.g. `{ 1 }` -> `[ 1 ]`. + pub(crate) fn suggest_block_to_brackets( + &self, + diag: &mut Diagnostic, + blk: &hir::Block<'_>, + blk_ty: Ty<'tcx>, + expected_ty: Ty<'tcx>, + ) { + if let ty::Slice(elem_ty) | ty::Array(elem_ty, _) = expected_ty.kind() { + if self.can_coerce(blk_ty, *elem_ty) + && blk.stmts.is_empty() + && blk.rules == hir::BlockCheckMode::DefaultBlock + { + let source_map = self.tcx.sess.source_map(); + if let Ok(snippet) = source_map.span_to_snippet(blk.span) { + if snippet.starts_with('{') && snippet.ends_with('}') { + diag.multipart_suggestion_verbose( + "to create an array, use square brackets instead of curly braces", + vec![ + ( + blk.span + .shrink_to_lo() + .with_hi(rustc_span::BytePos(blk.span.lo().0 + 1)), + "[".to_string(), + ), + ( + blk.span + .shrink_to_hi() + .with_lo(rustc_span::BytePos(blk.span.hi().0 - 1)), + "]".to_string(), + ), + ], + Applicability::MachineApplicable, + ); + } + } + } + } + } + + fn is_loop(&self, id: hir::HirId) -> bool { + let node = self.tcx.hir().get(id); + matches!(node, Node::Expr(Expr { kind: ExprKind::Loop(..), .. })) + } + + fn is_local_statement(&self, id: hir::HirId) -> bool { + let node = self.tcx.hir().get(id); + matches!(node, Node::Stmt(Stmt { kind: StmtKind::Local(..), .. })) + } + + /// Suggest that `&T` was cloned instead of `T` because `T` does not implement `Clone`, + /// which is a side-effect of autoref. + pub(crate) fn note_type_is_not_clone( + &self, + diag: &mut Diagnostic, + expected_ty: Ty<'tcx>, + found_ty: Ty<'tcx>, + expr: &hir::Expr<'_>, + ) { + let hir::ExprKind::MethodCall(segment, &[ref callee_expr], _) = expr.kind else { return; }; + let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else { return; }; + let ty::Ref(_, pointee_ty, _) = found_ty.kind() else { return }; + let results = self.typeck_results.borrow(); + // First, look for a `Clone::clone` call + if segment.ident.name == sym::clone + && results.type_dependent_def_id(expr.hir_id).map_or( + false, + |did| { + let assoc_item = self.tcx.associated_item(did); + assoc_item.container == ty::AssocItemContainer::TraitContainer + && assoc_item.container_id(self.tcx) == clone_trait_did + }, + ) + // If that clone call hasn't already dereferenced the self type (i.e. don't give this + // diagnostic in cases where we have `(&&T).clone()` and we expect `T`). + && !results.expr_adjustments(callee_expr).iter().any(|adj| matches!(adj.kind, ty::adjustment::Adjust::Deref(..))) + // Check that we're in fact trying to clone into the expected type + && self.can_coerce(*pointee_ty, expected_ty) + // And the expected type doesn't implement `Clone` + && !self.predicate_must_hold_considering_regions(&traits::Obligation { + cause: traits::ObligationCause::dummy(), + param_env: self.param_env, + recursion_depth: 0, + predicate: ty::Binder::dummy(ty::TraitRef { + def_id: clone_trait_did, + substs: self.tcx.mk_substs([expected_ty.into()].iter()), + }) + .without_const() + .to_predicate(self.tcx), + }) + { + diag.span_note( + callee_expr.span, + &format!( + "`{expected_ty}` does not implement `Clone`, so `{found_ty}` was cloned instead" + ), + ); + } + } + + /// A common error is to add an extra semicolon: + /// + /// ```compile_fail,E0308 + /// fn foo() -> usize { + /// 22; + /// } + /// ``` + /// + /// This routine checks if the final statement in a block is an + /// expression with an explicit semicolon whose type is compatible + /// with `expected_ty`. If so, it suggests removing the semicolon. + pub(crate) fn consider_removing_semicolon( + &self, + blk: &'tcx hir::Block<'tcx>, + expected_ty: Ty<'tcx>, + err: &mut Diagnostic, + ) -> bool { + if let Some((span_semi, boxed)) = self.could_remove_semicolon(blk, expected_ty) { + if let StatementAsExpression::NeedsBoxing = boxed { + err.span_suggestion_verbose( + span_semi, + "consider removing this semicolon and boxing the expression", + "", + Applicability::HasPlaceholders, + ); + } else { + err.span_suggestion_short( + span_semi, + "remove this semicolon", + "", + Applicability::MachineApplicable, + ); + } + true + } else { + false + } + } +} diff --git a/compiler/rustc_typeck/src/check/gather_locals.rs b/compiler/rustc_typeck/src/check/gather_locals.rs new file mode 100644 index 000000000..8f34a970f --- /dev/null +++ b/compiler/rustc_typeck/src/check/gather_locals.rs @@ -0,0 +1,160 @@ +use crate::check::{FnCtxt, LocalTy, UserType}; +use rustc_hir as hir; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_hir::PatKind; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_middle::ty::Ty; +use rustc_span::Span; +use rustc_trait_selection::traits; + +/// A declaration is an abstraction of [hir::Local] and [hir::Let]. +/// +/// It must have a hir_id, as this is how we connect gather_locals to the check functions. +pub(super) struct Declaration<'a> { + pub hir_id: hir::HirId, + pub pat: &'a hir::Pat<'a>, + pub ty: Option<&'a hir::Ty<'a>>, + pub span: Span, + pub init: Option<&'a hir::Expr<'a>>, + pub els: Option<&'a hir::Block<'a>>, +} + +impl<'a> From<&'a hir::Local<'a>> for Declaration<'a> { + fn from(local: &'a hir::Local<'a>) -> Self { + let hir::Local { hir_id, pat, ty, span, init, els, source: _ } = *local; + Declaration { hir_id, pat, ty, span, init, els } + } +} + +impl<'a> From<&'a hir::Let<'a>> for Declaration<'a> { + fn from(let_expr: &'a hir::Let<'a>) -> Self { + let hir::Let { hir_id, pat, ty, span, init } = *let_expr; + Declaration { hir_id, pat, ty, span, init: Some(init), els: None } + } +} + +pub(super) struct GatherLocalsVisitor<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + // parameters are special cases of patterns, but we want to handle them as + // *distinct* cases. so track when we are hitting a pattern *within* an fn + // parameter. + outermost_fn_param_pat: Option, +} + +impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> { + pub(super) fn new(fcx: &'a FnCtxt<'a, 'tcx>) -> Self { + Self { fcx, outermost_fn_param_pat: None } + } + + fn assign(&mut self, span: Span, nid: hir::HirId, ty_opt: Option>) -> Ty<'tcx> { + match ty_opt { + None => { + // Infer the variable's type. + let var_ty = self.fcx.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span, + }); + self.fcx + .locals + .borrow_mut() + .insert(nid, LocalTy { decl_ty: var_ty, revealed_ty: var_ty }); + var_ty + } + Some(typ) => { + // Take type that the user specified. + self.fcx.locals.borrow_mut().insert(nid, typ); + typ.revealed_ty + } + } + } + + /// Allocates a [LocalTy] for a declaration, which may have a type annotation. If it does have + /// a type annotation, then the LocalTy stored will be the resolved type. This may be found + /// again during type checking by querying [FnCtxt::local_ty] for the same hir_id. + fn declare(&mut self, decl: Declaration<'tcx>) { + let local_ty = match decl.ty { + Some(ref ty) => { + let o_ty = self.fcx.to_ty(&ty); + + let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(UserType::Ty(o_ty)); + debug!("visit_local: ty.hir_id={:?} o_ty={:?} c_ty={:?}", ty.hir_id, o_ty, c_ty); + self.fcx + .typeck_results + .borrow_mut() + .user_provided_types_mut() + .insert(ty.hir_id, c_ty); + + Some(LocalTy { decl_ty: o_ty, revealed_ty: o_ty }) + } + None => None, + }; + self.assign(decl.span, decl.hir_id, local_ty); + + debug!( + "local variable {:?} is assigned type {}", + decl.pat, + self.fcx.ty_to_string(self.fcx.locals.borrow().get(&decl.hir_id).unwrap().decl_ty) + ); + } +} + +impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> { + // Add explicitly-declared locals. + fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) { + self.declare(local.into()); + intravisit::walk_local(self, local) + } + + fn visit_let_expr(&mut self, let_expr: &'tcx hir::Let<'tcx>) { + self.declare(let_expr.into()); + intravisit::walk_let_expr(self, let_expr); + } + + fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) { + let old_outermost_fn_param_pat = self.outermost_fn_param_pat.replace(param.ty_span); + intravisit::walk_param(self, param); + self.outermost_fn_param_pat = old_outermost_fn_param_pat; + } + + // Add pattern bindings. + fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) { + if let PatKind::Binding(_, _, ident, _) = p.kind { + let var_ty = self.assign(p.span, p.hir_id, None); + + if let Some(ty_span) = self.outermost_fn_param_pat { + if !self.fcx.tcx.features().unsized_fn_params { + self.fcx.require_type_is_sized( + var_ty, + p.span, + traits::SizedArgumentType(Some(ty_span)), + ); + } + } else { + if !self.fcx.tcx.features().unsized_locals { + self.fcx.require_type_is_sized(var_ty, p.span, traits::VariableType(p.hir_id)); + } + } + + debug!( + "pattern binding {} is assigned to {} with type {:?}", + ident, + self.fcx.ty_to_string(self.fcx.locals.borrow().get(&p.hir_id).unwrap().decl_ty), + var_ty + ); + } + let old_outermost_fn_param_pat = self.outermost_fn_param_pat.take(); + intravisit::walk_pat(self, p); + self.outermost_fn_param_pat = old_outermost_fn_param_pat; + } + + // Don't descend into the bodies of nested closures. + fn visit_fn( + &mut self, + _: intravisit::FnKind<'tcx>, + _: &'tcx hir::FnDecl<'tcx>, + _: hir::BodyId, + _: Span, + _: hir::HirId, + ) { + } +} diff --git a/compiler/rustc_typeck/src/check/generator_interior.rs b/compiler/rustc_typeck/src/check/generator_interior.rs new file mode 100644 index 000000000..d4f800149 --- /dev/null +++ b/compiler/rustc_typeck/src/check/generator_interior.rs @@ -0,0 +1,632 @@ +//! This calculates the types which has storage which lives across a suspension point in a +//! generator from the perspective of typeck. The actual types used at runtime +//! is calculated in `rustc_mir_transform::generator` and may be a subset of the +//! types computed here. + +use self::drop_ranges::DropRanges; +use super::FnCtxt; +use rustc_data_structures::fx::{FxHashSet, FxIndexSet}; +use rustc_errors::pluralize; +use rustc_hir as hir; +use rustc_hir::def::{CtorKind, DefKind, Res}; +use rustc_hir::def_id::DefId; +use rustc_hir::hir_id::HirIdSet; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind}; +use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData}; +use rustc_middle::ty::{self, RvalueScopes, Ty, TyCtxt, TypeVisitable}; +use rustc_span::symbol::sym; +use rustc_span::Span; +use tracing::debug; + +mod drop_ranges; + +struct InteriorVisitor<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + region_scope_tree: &'a region::ScopeTree, + types: FxIndexSet>, + rvalue_scopes: &'a RvalueScopes, + expr_count: usize, + kind: hir::GeneratorKind, + prev_unresolved_span: Option, + linted_values: HirIdSet, + drop_ranges: DropRanges, +} + +impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> { + fn record( + &mut self, + ty: Ty<'tcx>, + hir_id: HirId, + scope: Option, + expr: Option<&'tcx Expr<'tcx>>, + source_span: Span, + ) { + use rustc_span::DUMMY_SP; + + let ty = self.fcx.resolve_vars_if_possible(ty); + + debug!( + "attempting to record type ty={:?}; hir_id={:?}; scope={:?}; expr={:?}; source_span={:?}; expr_count={:?}", + ty, hir_id, scope, expr, source_span, self.expr_count, + ); + + let live_across_yield = scope + .map(|s| { + self.region_scope_tree.yield_in_scope(s).and_then(|yield_data| { + // If we are recording an expression that is the last yield + // in the scope, or that has a postorder CFG index larger + // than the one of all of the yields, then its value can't + // be storage-live (and therefore live) at any of the yields. + // + // See the mega-comment at `yield_in_scope` for a proof. + + yield_data + .iter() + .find(|yield_data| { + debug!( + "comparing counts yield: {} self: {}, source_span = {:?}", + yield_data.expr_and_pat_count, self.expr_count, source_span + ); + + if self.fcx.sess().opts.unstable_opts.drop_tracking + && self + .drop_ranges + .is_dropped_at(hir_id, yield_data.expr_and_pat_count) + { + debug!("value is dropped at yield point; not recording"); + return false; + } + + // If it is a borrowing happening in the guard, + // it needs to be recorded regardless because they + // do live across this yield point. + yield_data.expr_and_pat_count >= self.expr_count + }) + .cloned() + }) + }) + .unwrap_or_else(|| { + Some(YieldData { span: DUMMY_SP, expr_and_pat_count: 0, source: self.kind.into() }) + }); + + if let Some(yield_data) = live_across_yield { + debug!( + "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}", + expr, scope, ty, self.expr_count, yield_data.span + ); + + if let Some((unresolved_type, unresolved_type_span)) = + self.fcx.unresolved_type_vars(&ty) + { + // If unresolved type isn't a ty_var then unresolved_type_span is None + let span = self + .prev_unresolved_span + .unwrap_or_else(|| unresolved_type_span.unwrap_or(source_span)); + + // If we encounter an int/float variable, then inference fallback didn't + // finish due to some other error. Don't emit spurious additional errors. + if let ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) = + unresolved_type.kind() + { + self.fcx + .tcx + .sess + .delay_span_bug(span, &format!("Encountered var {:?}", unresolved_type)); + } else { + let note = format!( + "the type is part of the {} because of this {}", + self.kind, yield_data.source + ); + + self.fcx + .need_type_info_err_in_generator(self.kind, span, unresolved_type) + .span_note(yield_data.span, &*note) + .emit(); + } + } else { + // Insert the type into the ordered set. + let scope_span = scope.map(|s| s.span(self.fcx.tcx, self.region_scope_tree)); + + if !self.linted_values.contains(&hir_id) { + check_must_not_suspend_ty( + self.fcx, + ty, + hir_id, + SuspendCheckData { + expr, + source_span, + yield_span: yield_data.span, + plural_len: 1, + ..Default::default() + }, + ); + self.linted_values.insert(hir_id); + } + + self.types.insert(ty::GeneratorInteriorTypeCause { + span: source_span, + ty, + scope_span, + yield_span: yield_data.span, + expr: expr.map(|e| e.hir_id), + }); + } + } else { + debug!( + "no type in expr = {:?}, count = {:?}, span = {:?}", + expr, + self.expr_count, + expr.map(|e| e.span) + ); + if let Some((unresolved_type, unresolved_type_span)) = + self.fcx.unresolved_type_vars(&ty) + { + debug!( + "remained unresolved_type = {:?}, unresolved_type_span: {:?}", + unresolved_type, unresolved_type_span + ); + self.prev_unresolved_span = unresolved_type_span; + } + } + } +} + +pub fn resolve_interior<'a, 'tcx>( + fcx: &'a FnCtxt<'a, 'tcx>, + def_id: DefId, + body_id: hir::BodyId, + interior: Ty<'tcx>, + kind: hir::GeneratorKind, +) { + let body = fcx.tcx.hir().body(body_id); + let typeck_results = fcx.inh.typeck_results.borrow(); + let mut visitor = InteriorVisitor { + fcx, + types: FxIndexSet::default(), + region_scope_tree: fcx.tcx.region_scope_tree(def_id), + rvalue_scopes: &typeck_results.rvalue_scopes, + expr_count: 0, + kind, + prev_unresolved_span: None, + linted_values: <_>::default(), + drop_ranges: drop_ranges::compute_drop_ranges(fcx, def_id, body), + }; + intravisit::walk_body(&mut visitor, body); + + // Check that we visited the same amount of expressions as the RegionResolutionVisitor + let region_expr_count = fcx.tcx.region_scope_tree(def_id).body_expr_count(body_id).unwrap(); + assert_eq!(region_expr_count, visitor.expr_count); + + // The types are already kept in insertion order. + let types = visitor.types; + + // The types in the generator interior contain lifetimes local to the generator itself, + // which should not be exposed outside of the generator. Therefore, we replace these + // lifetimes with existentially-bound lifetimes, which reflect the exact value of the + // lifetimes not being known by users. + // + // These lifetimes are used in auto trait impl checking (for example, + // if a Sync generator contains an &'α T, we need to check whether &'α T: Sync), + // so knowledge of the exact relationships between them isn't particularly important. + + debug!("types in generator {:?}, span = {:?}", types, body.value.span); + + let mut counter = 0; + let mut captured_tys = FxHashSet::default(); + let type_causes: Vec<_> = types + .into_iter() + .filter_map(|mut cause| { + // Erase regions and canonicalize late-bound regions to deduplicate as many types as we + // can. + let erased = fcx.tcx.erase_regions(cause.ty); + if captured_tys.insert(erased) { + // Replace all regions inside the generator interior with late bound regions. + // Note that each region slot in the types gets a new fresh late bound region, + // which means that none of the regions inside relate to any other, even if + // typeck had previously found constraints that would cause them to be related. + let folded = fcx.tcx.fold_regions(erased, |_, current_depth| { + let br = ty::BoundRegion { + var: ty::BoundVar::from_u32(counter), + kind: ty::BrAnon(counter), + }; + let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, br)); + counter += 1; + r + }); + + cause.ty = folded; + Some(cause) + } else { + None + } + }) + .collect(); + + // Extract type components to build the witness type. + let type_list = fcx.tcx.mk_type_list(type_causes.iter().map(|cause| cause.ty)); + let bound_vars = fcx.tcx.mk_bound_variable_kinds( + (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))), + ); + let witness = + fcx.tcx.mk_generator_witness(ty::Binder::bind_with_vars(type_list, bound_vars.clone())); + + drop(typeck_results); + // Store the generator types and spans into the typeck results for this generator. + fcx.inh.typeck_results.borrow_mut().generator_interior_types = + ty::Binder::bind_with_vars(type_causes, bound_vars); + + debug!( + "types in generator after region replacement {:?}, span = {:?}", + witness, body.value.span + ); + + // Unify the type variable inside the generator with the new witness + match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(interior, witness) { + Ok(ok) => fcx.register_infer_ok_obligations(ok), + _ => bug!(), + } +} + +// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in +// librustc_middle/middle/region.rs since `expr_count` is compared against the results +// there. +impl<'a, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'tcx> { + fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) { + let Arm { guard, pat, body, .. } = arm; + self.visit_pat(pat); + if let Some(ref g) = guard { + { + // If there is a guard, we need to count all variables bound in the pattern as + // borrowed for the entire guard body, regardless of whether they are accessed. + // We do this by walking the pattern bindings and recording `&T` for any `x: T` + // that is bound. + + struct ArmPatCollector<'a, 'b, 'tcx> { + interior_visitor: &'a mut InteriorVisitor<'b, 'tcx>, + scope: Scope, + } + + impl<'a, 'b, 'tcx> Visitor<'tcx> for ArmPatCollector<'a, 'b, 'tcx> { + fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) { + intravisit::walk_pat(self, pat); + if let PatKind::Binding(_, id, ident, ..) = pat.kind { + let ty = + self.interior_visitor.fcx.typeck_results.borrow().node_type(id); + let tcx = self.interior_visitor.fcx.tcx; + let ty = tcx.mk_ref( + // Use `ReErased` as `resolve_interior` is going to replace all the + // regions anyway. + tcx.mk_region(ty::ReErased), + ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }, + ); + self.interior_visitor.record( + ty, + id, + Some(self.scope), + None, + ident.span, + ); + } + } + } + + ArmPatCollector { + interior_visitor: self, + scope: Scope { id: g.body().hir_id.local_id, data: ScopeData::Node }, + } + .visit_pat(pat); + } + + match g { + Guard::If(ref e) => { + self.visit_expr(e); + } + Guard::IfLet(ref l) => { + self.visit_let_expr(l); + } + } + } + self.visit_expr(body); + } + + fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) { + intravisit::walk_pat(self, pat); + + self.expr_count += 1; + + if let PatKind::Binding(..) = pat.kind { + let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id).unwrap(); + let ty = self.fcx.typeck_results.borrow().pat_ty(pat); + self.record(ty, pat.hir_id, Some(scope), None, pat.span); + } + } + + fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) { + match &expr.kind { + ExprKind::Call(callee, args) => match &callee.kind { + ExprKind::Path(qpath) => { + let res = self.fcx.typeck_results.borrow().qpath_res(qpath, callee.hir_id); + match res { + // Direct calls never need to keep the callee `ty::FnDef` + // ZST in a temporary, so skip its type, just in case it + // can significantly complicate the generator type. + Res::Def( + DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn), + _, + ) => { + // NOTE(eddyb) this assumes a path expression has + // no nested expressions to keep track of. + self.expr_count += 1; + + // Record the rest of the call expression normally. + for arg in *args { + self.visit_expr(arg); + } + } + _ => intravisit::walk_expr(self, expr), + } + } + _ => intravisit::walk_expr(self, expr), + }, + _ => intravisit::walk_expr(self, expr), + } + + self.expr_count += 1; + + debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr)); + + let ty = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr); + let may_need_drop = |ty: Ty<'tcx>| { + // Avoid ICEs in needs_drop. + let ty = self.fcx.resolve_vars_if_possible(ty); + let ty = self.fcx.tcx.erase_regions(ty); + if ty.needs_infer() { + return true; + } + ty.needs_drop(self.fcx.tcx, self.fcx.param_env) + }; + + // Typically, the value produced by an expression is consumed by its parent in some way, + // so we only have to check if the parent contains a yield (note that the parent may, for + // example, store the value into a local variable, but then we already consider local + // variables to be live across their scope). + // + // However, in the case of temporary values, we are going to store the value into a + // temporary on the stack that is live for the current temporary scope and then return a + // reference to it. That value may be live across the entire temporary scope. + // + // There's another subtlety: if the type has an observable drop, it must be dropped after + // the yield, even if it's not borrowed or referenced after the yield. Ideally this would + // *only* happen for types with observable drop, not all types which wrap them, but that + // doesn't match the behavior of MIR borrowck and causes ICEs. See the FIXME comment in + // src/test/ui/generator/drop-tracking-parent-expression.rs. + let scope = if self.drop_ranges.is_borrowed_temporary(expr) + || ty.map_or(true, |ty| { + let needs_drop = may_need_drop(ty); + debug!(?needs_drop, ?ty); + needs_drop + }) { + self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id) + } else { + debug!("parent_node: {:?}", self.fcx.tcx.hir().find_parent_node(expr.hir_id)); + match self.fcx.tcx.hir().find_parent_node(expr.hir_id) { + Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }), + None => { + self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id) + } + } + }; + + // If there are adjustments, then record the final type -- + // this is the actual value that is being produced. + if let Some(adjusted_ty) = ty { + self.record(adjusted_ty, expr.hir_id, scope, Some(expr), expr.span); + } + + // Also record the unadjusted type (which is the only type if + // there are no adjustments). The reason for this is that the + // unadjusted value is sometimes a "temporary" that would wind + // up in a MIR temporary. + // + // As an example, consider an expression like `vec![].push(x)`. + // Here, the `vec![]` would wind up MIR stored into a + // temporary variable `t` which we can borrow to invoke + // `>::push(&mut t, x)`. + // + // Note that an expression can have many adjustments, and we + // are just ignoring those intermediate types. This is because + // those intermediate values are always linearly "consumed" by + // the other adjustments, and hence would never be directly + // captured in the MIR. + // + // (Note that this partly relies on the fact that the `Deref` + // traits always return references, which means their content + // can be reborrowed without needing to spill to a temporary. + // If this were not the case, then we could conceivably have + // to create intermediate temporaries.) + // + // The type table might not have information for this expression + // if it is in a malformed scope. (#66387) + if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) { + self.record(ty, expr.hir_id, scope, Some(expr), expr.span); + } else { + self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node"); + } + } +} + +#[derive(Default)] +pub struct SuspendCheckData<'a, 'tcx> { + expr: Option<&'tcx Expr<'tcx>>, + source_span: Span, + yield_span: Span, + descr_pre: &'a str, + descr_post: &'a str, + plural_len: usize, +} + +// Returns whether it emitted a diagnostic or not +// Note that this fn and the proceeding one are based on the code +// for creating must_use diagnostics +// +// Note that this technique was chosen over things like a `Suspend` marker trait +// as it is simpler and has precedent in the compiler +pub fn check_must_not_suspend_ty<'tcx>( + fcx: &FnCtxt<'_, 'tcx>, + ty: Ty<'tcx>, + hir_id: HirId, + data: SuspendCheckData<'_, 'tcx>, +) -> bool { + if ty.is_unit() + // FIXME: should this check `is_ty_uninhabited_from`. This query is not available in this stage + // of typeck (before ReVar and RePlaceholder are removed), but may remove noise, like in + // `must_use` + // || fcx.tcx.is_ty_uninhabited_from(fcx.tcx.parent_module(hir_id).to_def_id(), ty, fcx.param_env) + { + return false; + } + + let plural_suffix = pluralize!(data.plural_len); + + match *ty.kind() { + ty::Adt(..) if ty.is_box() => { + let boxed_ty = ty.boxed_ty(); + let descr_pre = &format!("{}boxed ", data.descr_pre); + check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data }) + } + ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data), + // FIXME: support adding the attribute to TAITs + ty::Opaque(def, _) => { + let mut has_emitted = false; + for &(predicate, _) in fcx.tcx.explicit_item_bounds(def) { + // We only look at the `DefId`, so it is safe to skip the binder here. + if let ty::PredicateKind::Trait(ref poly_trait_predicate) = + predicate.kind().skip_binder() + { + let def_id = poly_trait_predicate.trait_ref.def_id; + let descr_pre = &format!("{}implementer{} of ", data.descr_pre, plural_suffix); + if check_must_not_suspend_def( + fcx.tcx, + def_id, + hir_id, + SuspendCheckData { descr_pre, ..data }, + ) { + has_emitted = true; + break; + } + } + } + has_emitted + } + ty::Dynamic(binder, _) => { + let mut has_emitted = false; + for predicate in binder.iter() { + if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() { + let def_id = trait_ref.def_id; + let descr_post = &format!(" trait object{}{}", plural_suffix, data.descr_post); + if check_must_not_suspend_def( + fcx.tcx, + def_id, + hir_id, + SuspendCheckData { descr_post, ..data }, + ) { + has_emitted = true; + break; + } + } + } + has_emitted + } + ty::Tuple(fields) => { + let mut has_emitted = false; + let comps = match data.expr.map(|e| &e.kind) { + Some(hir::ExprKind::Tup(comps)) => { + debug_assert_eq!(comps.len(), fields.len()); + Some(comps) + } + _ => None, + }; + for (i, ty) in fields.iter().enumerate() { + let descr_post = &format!(" in tuple element {i}"); + let span = comps.and_then(|c| c.get(i)).map(|e| e.span).unwrap_or(data.source_span); + if check_must_not_suspend_ty( + fcx, + ty, + hir_id, + SuspendCheckData { + descr_post, + expr: comps.and_then(|comps| comps.get(i)), + source_span: span, + ..data + }, + ) { + has_emitted = true; + } + } + has_emitted + } + ty::Array(ty, len) => { + let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix); + check_must_not_suspend_ty( + fcx, + ty, + hir_id, + SuspendCheckData { + descr_pre, + plural_len: len.try_eval_usize(fcx.tcx, fcx.param_env).unwrap_or(0) as usize + + 1, + ..data + }, + ) + } + _ => false, + } +} + +fn check_must_not_suspend_def( + tcx: TyCtxt<'_>, + def_id: DefId, + hir_id: HirId, + data: SuspendCheckData<'_, '_>, +) -> bool { + if let Some(attr) = tcx.get_attr(def_id, sym::must_not_suspend) { + tcx.struct_span_lint_hir( + rustc_session::lint::builtin::MUST_NOT_SUSPEND, + hir_id, + data.source_span, + |lint| { + let msg = format!( + "{}`{}`{} held across a suspend point, but should not be", + data.descr_pre, + tcx.def_path_str(def_id), + data.descr_post, + ); + let mut err = lint.build(&msg); + + // add span pointing to the offending yield/await + err.span_label(data.yield_span, "the value is held across this suspend point"); + + // Add optional reason note + if let Some(note) = attr.value_str() { + // FIXME(guswynn): consider formatting this better + err.span_note(data.source_span, note.as_str()); + } + + // Add some quick suggestions on what to do + // FIXME: can `drop` work as a suggestion here as well? + err.span_help( + data.source_span, + "consider using a block (`{ ... }`) \ + to shrink the value's scope, ending before the suspend point", + ); + + err.emit(); + }, + ); + + true + } else { + false + } +} diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs new file mode 100644 index 000000000..518cd7342 --- /dev/null +++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs @@ -0,0 +1,309 @@ +//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped +//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the +//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used. +//! +//! There are three phases to this analysis: +//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed. +//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized, +//! and also build a control flow graph. +//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through +//! the CFG and find the exact points where we know a value is definitely dropped. +//! +//! The end result is a data structure that maps the post-order index of each node in the HIR tree +//! to a set of values that are known to be dropped at that location. + +use self::cfg_build::build_control_flow_graph; +use self::record_consumed_borrow::find_consumed_and_borrowed; +use crate::check::FnCtxt; +use hir::def_id::DefId; +use hir::{Body, HirId, HirIdMap, Node}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_hir as hir; +use rustc_index::bit_set::BitSet; +use rustc_index::vec::IndexVec; +use rustc_middle::hir::map::Map; +use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId}; +use rustc_middle::ty; +use std::collections::BTreeMap; +use std::fmt::Debug; + +mod cfg_build; +mod cfg_propagate; +mod cfg_visualize; +mod record_consumed_borrow; + +pub fn compute_drop_ranges<'a, 'tcx>( + fcx: &'a FnCtxt<'a, 'tcx>, + def_id: DefId, + body: &'tcx Body<'tcx>, +) -> DropRanges { + if fcx.sess().opts.unstable_opts.drop_tracking { + let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body); + + let typeck_results = &fcx.typeck_results.borrow(); + let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0); + let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph( + fcx.tcx.hir(), + fcx.tcx, + typeck_results, + consumed_borrowed_places, + body, + num_exprs, + ); + + drop_ranges.propagate_to_fixpoint(); + + debug!("borrowed_temporaries = {borrowed_temporaries:?}"); + DropRanges { + tracked_value_map: drop_ranges.tracked_value_map, + nodes: drop_ranges.nodes, + borrowed_temporaries: Some(borrowed_temporaries), + } + } else { + // If drop range tracking is not enabled, skip all the analysis and produce an + // empty set of DropRanges. + DropRanges { + tracked_value_map: FxHashMap::default(), + nodes: IndexVec::new(), + borrowed_temporaries: None, + } + } +} + +/// Applies `f` to consumable node in the HIR subtree pointed to by `place`. +/// +/// This includes the place itself, and if the place is a reference to a local +/// variable then `f` is also called on the HIR node for that variable as well. +/// +/// For example, if `place` points to `foo()`, then `f` is called once for the +/// result of `foo`. On the other hand, if `place` points to `x` then `f` will +/// be called both on the `ExprKind::Path` node that represents the expression +/// as well as the HirId of the local `x` itself. +fn for_each_consumable<'tcx>(hir: Map<'tcx>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) { + f(place); + let node = hir.find(place.hir_id()); + if let Some(Node::Expr(expr)) = node { + match expr.kind { + hir::ExprKind::Path(hir::QPath::Resolved( + _, + hir::Path { res: hir::def::Res::Local(hir_id), .. }, + )) => { + f(TrackedValue::Variable(*hir_id)); + } + _ => (), + } + } +} + +rustc_index::newtype_index! { + pub struct PostOrderId { + DEBUG_FORMAT = "id({})", + } +} + +rustc_index::newtype_index! { + pub struct TrackedValueIndex { + DEBUG_FORMAT = "hidx({})", + } +} + +/// Identifies a value whose drop state we need to track. +#[derive(PartialEq, Eq, Hash, Clone, Copy)] +enum TrackedValue { + /// Represents a named variable, such as a let binding, parameter, or upvar. + /// + /// The HirId points to the variable's definition site. + Variable(HirId), + /// A value produced as a result of an expression. + /// + /// The HirId points to the expression that returns this value. + Temporary(HirId), +} + +impl Debug for TrackedValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + ty::tls::with_opt(|opt_tcx| { + if let Some(tcx) = opt_tcx { + write!(f, "{}", tcx.hir().node_to_string(self.hir_id())) + } else { + match self { + Self::Variable(hir_id) => write!(f, "Variable({:?})", hir_id), + Self::Temporary(hir_id) => write!(f, "Temporary({:?})", hir_id), + } + } + }) + } +} + +impl TrackedValue { + fn hir_id(&self) -> HirId { + match self { + TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id, + } + } + + fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self { + match place_with_id.place.base { + PlaceBase::Rvalue | PlaceBase::StaticItem => { + TrackedValue::Temporary(place_with_id.hir_id) + } + PlaceBase::Local(hir_id) + | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => { + TrackedValue::Variable(hir_id) + } + } + } +} + +/// Represents a reason why we might not be able to convert a HirId or Place +/// into a tracked value. +#[derive(Debug)] +enum TrackedValueConversionError { + /// Place projects are not currently supported. + /// + /// The reasoning around these is kind of subtle, so we choose to be more + /// conservative around these for now. There is no reason in theory we + /// cannot support these, we just have not implemented it yet. + PlaceProjectionsNotSupported, +} + +impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue { + type Error = TrackedValueConversionError; + + fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result { + if !place_with_id.place.projections.is_empty() { + debug!( + "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.", + place_with_id + ); + return Err(TrackedValueConversionError::PlaceProjectionsNotSupported); + } + + Ok(TrackedValue::from_place_with_projections_allowed(place_with_id)) + } +} + +pub struct DropRanges { + tracked_value_map: FxHashMap, + nodes: IndexVec, + borrowed_temporaries: Option>, +} + +impl DropRanges { + pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool { + self.tracked_value_map + .get(&TrackedValue::Temporary(hir_id)) + .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id))) + .cloned() + .map_or(false, |tracked_value_id| { + self.expect_node(location.into()).drop_state.contains(tracked_value_id) + }) + } + + pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool { + if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true } + } + + /// Returns a reference to the NodeInfo for a node, panicking if it does not exist + fn expect_node(&self, id: PostOrderId) -> &NodeInfo { + &self.nodes[id] + } +} + +/// Tracks information needed to compute drop ranges. +struct DropRangesBuilder { + /// The core of DropRangesBuilder is a set of nodes, which each represent + /// one expression. We primarily refer to them by their index in a + /// post-order traversal of the HIR tree, since this is what + /// generator_interior uses to talk about yield positions. + /// + /// This IndexVec keeps the relevant details for each node. See the + /// NodeInfo struct for more details, but this information includes things + /// such as the set of control-flow successors, which variables are dropped + /// or reinitialized, and whether each variable has been inferred to be + /// known-dropped or potentially reinitialized at each point. + nodes: IndexVec, + /// We refer to values whose drop state we are tracking by the HirId of + /// where they are defined. Within a NodeInfo, however, we store the + /// drop-state in a bit vector indexed by a HirIdIndex + /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping + /// from HirIds to the HirIdIndex that is used to represent that value in + /// bitvector. + tracked_value_map: FxHashMap, + + /// When building the control flow graph, we don't always know the + /// post-order index of the target node at the point we encounter it. + /// For example, this happens with break and continue. In those cases, + /// we store a pair of the PostOrderId of the source and the HirId + /// of the target. Once we have gathered all of these edges, we make a + /// pass over the set of deferred edges (see process_deferred_edges in + /// cfg_build.rs), look up the PostOrderId for the target (since now the + /// post-order index for all nodes is known), and add missing control flow + /// edges. + deferred_edges: Vec<(PostOrderId, HirId)>, + /// This maps HirIds of expressions to their post-order index. It is + /// used in process_deferred_edges to correctly add back-edges. + post_order_map: HirIdMap, +} + +impl Debug for DropRangesBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DropRanges") + .field("hir_id_map", &self.tracked_value_map) + .field("post_order_maps", &self.post_order_map) + .field("nodes", &self.nodes.iter_enumerated().collect::>()) + .finish() + } +} + +/// DropRanges keeps track of what values are definitely dropped at each point in the code. +/// +/// Values of interest are defined by the hir_id of their place. Locations in code are identified +/// by their index in the post-order traversal. At its core, DropRanges maps +/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely +/// dropped at the point of the node identified by post_order_id. +impl DropRangesBuilder { + /// Returns the number of values (hir_ids) that are tracked + fn num_values(&self) -> usize { + self.tracked_value_map.len() + } + + fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo { + let size = self.num_values(); + self.nodes.ensure_contains_elem(id, || NodeInfo::new(size)); + &mut self.nodes[id] + } + + fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) { + trace!("adding control edge from {:?} to {:?}", from, to); + self.node_mut(from).successors.push(to); + } +} + +#[derive(Debug)] +struct NodeInfo { + /// IDs of nodes that can follow this one in the control flow + /// + /// If the vec is empty, then control proceeds to the next node. + successors: Vec, + + /// List of hir_ids that are dropped by this node. + drops: Vec, + + /// List of hir_ids that are reinitialized by this node. + reinits: Vec, + + /// Set of values that are definitely dropped at this point. + drop_state: BitSet, +} + +impl NodeInfo { + fn new(num_values: usize) -> Self { + Self { + successors: vec![], + drops: vec![], + reinits: vec![], + drop_state: BitSet::new_filled(num_values), + } + } +} diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs new file mode 100644 index 000000000..a2c23db16 --- /dev/null +++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs @@ -0,0 +1,560 @@ +use super::{ + for_each_consumable, record_consumed_borrow::ConsumedAndBorrowedPlaces, DropRangesBuilder, + NodeInfo, PostOrderId, TrackedValue, TrackedValueIndex, +}; +use hir::{ + intravisit::{self, Visitor}, + Body, Expr, ExprKind, Guard, HirId, LoopIdError, +}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_hir as hir; +use rustc_index::vec::IndexVec; +use rustc_middle::{ + hir::map::Map, + ty::{TyCtxt, TypeckResults}, +}; +use std::mem::swap; + +/// Traverses the body to find the control flow graph and locations for the +/// relevant places are dropped or reinitialized. +/// +/// The resulting structure still needs to be iterated to a fixed point, which +/// can be done with propagate_to_fixpoint in cfg_propagate. +pub(super) fn build_control_flow_graph<'tcx>( + hir: Map<'tcx>, + tcx: TyCtxt<'tcx>, + typeck_results: &TypeckResults<'tcx>, + consumed_borrowed_places: ConsumedAndBorrowedPlaces, + body: &'tcx Body<'tcx>, + num_exprs: usize, +) -> (DropRangesBuilder, FxHashSet) { + let mut drop_range_visitor = + DropRangeVisitor::new(hir, tcx, typeck_results, consumed_borrowed_places, num_exprs); + intravisit::walk_body(&mut drop_range_visitor, body); + + drop_range_visitor.drop_ranges.process_deferred_edges(); + if let Some(filename) = &tcx.sess.opts.unstable_opts.dump_drop_tracking_cfg { + super::cfg_visualize::write_graph_to_file(&drop_range_visitor.drop_ranges, filename, tcx); + } + + (drop_range_visitor.drop_ranges, drop_range_visitor.places.borrowed_temporaries) +} + +/// This struct is used to gather the information for `DropRanges` to determine the regions of the +/// HIR tree for which a value is dropped. +/// +/// We are interested in points where a variables is dropped or initialized, and the control flow +/// of the code. We identify locations in code by their post-order traversal index, so it is +/// important for this traversal to match that in `RegionResolutionVisitor` and `InteriorVisitor`. +/// +/// We make several simplifying assumptions, with the goal of being more conservative than +/// necessary rather than less conservative (since being less conservative is unsound, but more +/// conservative is still safe). These assumptions are: +/// +/// 1. Moving a variable `a` counts as a move of the whole variable. +/// 2. Moving a partial path like `a.b.c` is ignored. +/// 3. Reinitializing through a field (e.g. `a.b.c = 5`) counts as a reinitialization of all of +/// `a`. +/// +/// Some examples: +/// +/// Rule 1: +/// ```rust +/// let mut a = (vec![0], vec![0]); +/// drop(a); +/// // `a` is not considered initialized. +/// ``` +/// +/// Rule 2: +/// ```rust +/// let mut a = (vec![0], vec![0]); +/// drop(a.0); +/// drop(a.1); +/// // `a` is still considered initialized. +/// ``` +/// +/// Rule 3: +/// ```compile_fail,E0382 +/// let mut a = (vec![0], vec![0]); +/// drop(a); +/// a.1 = vec![1]; +/// // all of `a` is considered initialized +/// ``` + +struct DropRangeVisitor<'a, 'tcx> { + hir: Map<'tcx>, + places: ConsumedAndBorrowedPlaces, + drop_ranges: DropRangesBuilder, + expr_index: PostOrderId, + tcx: TyCtxt<'tcx>, + typeck_results: &'a TypeckResults<'tcx>, + label_stack: Vec<(Option, PostOrderId)>, +} + +impl<'a, 'tcx> DropRangeVisitor<'a, 'tcx> { + fn new( + hir: Map<'tcx>, + tcx: TyCtxt<'tcx>, + typeck_results: &'a TypeckResults<'tcx>, + places: ConsumedAndBorrowedPlaces, + num_exprs: usize, + ) -> Self { + debug!("consumed_places: {:?}", places.consumed); + let drop_ranges = DropRangesBuilder::new( + places.consumed.iter().flat_map(|(_, places)| places.iter().cloned()), + hir, + num_exprs, + ); + Self { + hir, + places, + drop_ranges, + expr_index: PostOrderId::from_u32(0), + typeck_results, + tcx, + label_stack: vec![], + } + } + + fn record_drop(&mut self, value: TrackedValue) { + if self.places.borrowed.contains(&value) { + debug!("not marking {:?} as dropped because it is borrowed at some point", value); + } else { + debug!("marking {:?} as dropped at {:?}", value, self.expr_index); + let count = self.expr_index; + self.drop_ranges.drop_at(value, count); + } + } + + /// ExprUseVisitor's consume callback doesn't go deep enough for our purposes in all + /// expressions. This method consumes a little deeper into the expression when needed. + fn consume_expr(&mut self, expr: &hir::Expr<'_>) { + debug!("consuming expr {:?}, count={:?}", expr.kind, self.expr_index); + let places = self + .places + .consumed + .get(&expr.hir_id) + .map_or(vec![], |places| places.iter().cloned().collect()); + for place in places { + trace!(?place, "consuming place"); + for_each_consumable(self.hir, place, |value| self.record_drop(value)); + } + } + + /// Marks an expression as being reinitialized. + /// + /// Note that we always approximated on the side of things being more + /// initialized than they actually are, as opposed to less. In cases such + /// as `x.y = ...`, we would consider all of `x` as being initialized + /// instead of just the `y` field. + /// + /// This is because it is always safe to consider something initialized + /// even when it is not, but the other way around will cause problems. + /// + /// In the future, we will hopefully tighten up these rules to be more + /// precise. + fn reinit_expr(&mut self, expr: &hir::Expr<'_>) { + // Walk the expression to find the base. For example, in an expression + // like `*a[i].x`, we want to find the `a` and mark that as + // reinitialized. + match expr.kind { + ExprKind::Path(hir::QPath::Resolved( + _, + hir::Path { res: hir::def::Res::Local(hir_id), .. }, + )) => { + // This is the base case, where we have found an actual named variable. + + let location = self.expr_index; + debug!("reinitializing {:?} at {:?}", hir_id, location); + self.drop_ranges.reinit_at(TrackedValue::Variable(*hir_id), location); + } + + ExprKind::Field(base, _) => self.reinit_expr(base), + + // Most expressions do not refer to something where we need to track + // reinitializations. + // + // Some of these may be interesting in the future + ExprKind::Path(..) + | ExprKind::Box(..) + | ExprKind::ConstBlock(..) + | ExprKind::Array(..) + | ExprKind::Call(..) + | ExprKind::MethodCall(..) + | ExprKind::Tup(..) + | ExprKind::Binary(..) + | ExprKind::Unary(..) + | ExprKind::Lit(..) + | ExprKind::Cast(..) + | ExprKind::Type(..) + | ExprKind::DropTemps(..) + | ExprKind::Let(..) + | ExprKind::If(..) + | ExprKind::Loop(..) + | ExprKind::Match(..) + | ExprKind::Closure { .. } + | ExprKind::Block(..) + | ExprKind::Assign(..) + | ExprKind::AssignOp(..) + | ExprKind::Index(..) + | ExprKind::AddrOf(..) + | ExprKind::Break(..) + | ExprKind::Continue(..) + | ExprKind::Ret(..) + | ExprKind::InlineAsm(..) + | ExprKind::Struct(..) + | ExprKind::Repeat(..) + | ExprKind::Yield(..) + | ExprKind::Err => (), + } + } + + /// For an expression with an uninhabited return type (e.g. a function that returns !), + /// this adds a self edge to to the CFG to model the fact that the function does not + /// return. + fn handle_uninhabited_return(&mut self, expr: &Expr<'tcx>) { + let ty = self.typeck_results.expr_ty(expr); + let ty = self.tcx.erase_regions(ty); + let m = self.tcx.parent_module(expr.hir_id).to_def_id(); + let param_env = self.tcx.param_env(m.expect_local()); + if self.tcx.is_ty_uninhabited_from(m, ty, param_env) { + // This function will not return. We model this fact as an infinite loop. + self.drop_ranges.add_control_edge(self.expr_index + 1, self.expr_index + 1); + } + } + + /// Map a Destination to an equivalent expression node + /// + /// The destination field of a Break or Continue expression can target either an + /// expression or a block. The drop range analysis, however, only deals in + /// expression nodes, so blocks that might be the destination of a Break or Continue + /// will not have a PostOrderId. + /// + /// If the destination is an expression, this function will simply return that expression's + /// hir_id. If the destination is a block, this function will return the hir_id of last + /// expression in the block. + fn find_target_expression_from_destination( + &self, + destination: hir::Destination, + ) -> Result { + destination.target_id.map(|target| { + let node = self.hir.get(target); + match node { + hir::Node::Expr(_) => target, + hir::Node::Block(b) => find_last_block_expression(b), + hir::Node::Param(..) + | hir::Node::Item(..) + | hir::Node::ForeignItem(..) + | hir::Node::TraitItem(..) + | hir::Node::ImplItem(..) + | hir::Node::Variant(..) + | hir::Node::Field(..) + | hir::Node::AnonConst(..) + | hir::Node::Stmt(..) + | hir::Node::PathSegment(..) + | hir::Node::Ty(..) + | hir::Node::TypeBinding(..) + | hir::Node::TraitRef(..) + | hir::Node::Pat(..) + | hir::Node::Arm(..) + | hir::Node::Local(..) + | hir::Node::Ctor(..) + | hir::Node::Lifetime(..) + | hir::Node::GenericParam(..) + | hir::Node::Crate(..) + | hir::Node::Infer(..) => bug!("Unsupported branch target: {:?}", node), + } + }) + } +} + +fn find_last_block_expression(block: &hir::Block<'_>) -> HirId { + block.expr.map_or_else( + // If there is no tail expression, there will be at least one statement in the + // block because the block contains a break or continue statement. + || block.stmts.last().unwrap().hir_id, + |expr| expr.hir_id, + ) +} + +impl<'a, 'tcx> Visitor<'tcx> for DropRangeVisitor<'a, 'tcx> { + fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) { + let mut reinit = None; + match expr.kind { + ExprKind::Assign(lhs, rhs, _) => { + self.visit_expr(lhs); + self.visit_expr(rhs); + + reinit = Some(lhs); + } + + ExprKind::If(test, if_true, if_false) => { + self.visit_expr(test); + + let fork = self.expr_index; + + self.drop_ranges.add_control_edge(fork, self.expr_index + 1); + self.visit_expr(if_true); + let true_end = self.expr_index; + + self.drop_ranges.add_control_edge(fork, self.expr_index + 1); + if let Some(if_false) = if_false { + self.visit_expr(if_false); + } + + self.drop_ranges.add_control_edge(true_end, self.expr_index + 1); + } + ExprKind::Match(scrutinee, arms, ..) => { + // We walk through the match expression almost like a chain of if expressions. + // Here's a diagram to follow along with: + // + // ┌─┐ + // match │A│ { + // ┌───┴─┘ + // │ + // ┌▼┌───►┌─┐ ┌─┐ + // │B│ if │C│ =>│D│, + // └─┘ ├─┴──►└─┴──────┐ + // ┌──┘ │ + // ┌──┘ │ + // │ │ + // ┌▼┌───►┌─┐ ┌─┐ │ + // │E│ if │F│ =>│G│, │ + // └─┘ ├─┴──►└─┴┐ │ + // │ │ │ + // } ▼ ▼ │ + // ┌─┐◄───────────────────┘ + // │H│ + // └─┘ + // + // The order we want is that the scrutinee (A) flows into the first pattern (B), + // which flows into the guard (C). Then the guard either flows into the arm body + // (D) or into the start of the next arm (E). Finally, the body flows to the end + // of the match block (H). + // + // The subsequent arms follow the same ordering. First we go to the pattern, then + // the guard (if present, otherwise it flows straight into the body), then into + // the body and then to the end of the match expression. + // + // The comments below show which edge is being added. + self.visit_expr(scrutinee); + + let (guard_exit, arm_end_ids) = arms.iter().fold( + (self.expr_index, vec![]), + |(incoming_edge, mut arm_end_ids), hir::Arm { pat, body, guard, .. }| { + // A -> B, or C -> E + self.drop_ranges.add_control_edge(incoming_edge, self.expr_index + 1); + self.visit_pat(pat); + // B -> C and E -> F are added implicitly due to the traversal order. + match guard { + Some(Guard::If(expr)) => self.visit_expr(expr), + Some(Guard::IfLet(let_expr)) => { + self.visit_let_expr(let_expr); + } + None => (), + } + // Likewise, C -> D and F -> G are added implicitly. + + // Save C, F, so we can add the other outgoing edge. + let to_next_arm = self.expr_index; + + // The default edge does not get added since we also have an explicit edge, + // so we also need to add an edge to the next node as well. + // + // This adds C -> D, F -> G + self.drop_ranges.add_control_edge(self.expr_index, self.expr_index + 1); + self.visit_expr(body); + + // Save the end of the body so we can add the exit edge once we know where + // the exit is. + arm_end_ids.push(self.expr_index); + + // Pass C to the next iteration, as well as vec![D] + // + // On the last round through, we pass F and vec![D, G] so that we can + // add all the exit edges. + (to_next_arm, arm_end_ids) + }, + ); + // F -> H + self.drop_ranges.add_control_edge(guard_exit, self.expr_index + 1); + + arm_end_ids.into_iter().for_each(|arm_end| { + // D -> H, G -> H + self.drop_ranges.add_control_edge(arm_end, self.expr_index + 1) + }); + } + + ExprKind::Loop(body, label, ..) => { + let loop_begin = self.expr_index + 1; + self.label_stack.push((label, loop_begin)); + if body.stmts.is_empty() && body.expr.is_none() { + // For empty loops we won't have updated self.expr_index after visiting the + // body, meaning we'd get an edge from expr_index to expr_index + 1, but + // instead we want an edge from expr_index + 1 to expr_index + 1. + self.drop_ranges.add_control_edge(loop_begin, loop_begin); + } else { + self.visit_block(body); + self.drop_ranges.add_control_edge(self.expr_index, loop_begin); + } + self.label_stack.pop(); + } + // Find the loop entry by searching through the label stack for either the last entry + // (if label is none), or the first entry where the label matches this one. The Loop + // case maintains this stack mapping labels to the PostOrderId for the loop entry. + ExprKind::Continue(hir::Destination { label, .. }, ..) => self + .label_stack + .iter() + .rev() + .find(|(loop_label, _)| label.is_none() || *loop_label == label) + .map_or((), |(_, target)| { + self.drop_ranges.add_control_edge(self.expr_index, *target) + }), + + ExprKind::Break(destination, ..) => { + // destination either points to an expression or to a block. We use + // find_target_expression_from_destination to use the last expression of the block + // if destination points to a block. + // + // We add an edge to the hir_id of the expression/block we are breaking out of, and + // then in process_deferred_edges we will map this hir_id to its PostOrderId, which + // will refer to the end of the block due to the post order traversal. + self.find_target_expression_from_destination(destination).map_or((), |target| { + self.drop_ranges.add_control_edge_hir_id(self.expr_index, target) + }) + } + + ExprKind::Call(f, args) => { + self.visit_expr(f); + for arg in args { + self.visit_expr(arg); + } + + self.handle_uninhabited_return(expr); + } + ExprKind::MethodCall(_, exprs, _) => { + for expr in exprs { + self.visit_expr(expr); + } + + self.handle_uninhabited_return(expr); + } + + ExprKind::AddrOf(..) + | ExprKind::Array(..) + | ExprKind::AssignOp(..) + | ExprKind::Binary(..) + | ExprKind::Block(..) + | ExprKind::Box(..) + | ExprKind::Cast(..) + | ExprKind::Closure { .. } + | ExprKind::ConstBlock(..) + | ExprKind::DropTemps(..) + | ExprKind::Err + | ExprKind::Field(..) + | ExprKind::Index(..) + | ExprKind::InlineAsm(..) + | ExprKind::Let(..) + | ExprKind::Lit(..) + | ExprKind::Path(..) + | ExprKind::Repeat(..) + | ExprKind::Ret(..) + | ExprKind::Struct(..) + | ExprKind::Tup(..) + | ExprKind::Type(..) + | ExprKind::Unary(..) + | ExprKind::Yield(..) => intravisit::walk_expr(self, expr), + } + + self.expr_index = self.expr_index + 1; + self.drop_ranges.add_node_mapping(expr.hir_id, self.expr_index); + self.consume_expr(expr); + if let Some(expr) = reinit { + self.reinit_expr(expr); + } + } + + fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) { + intravisit::walk_pat(self, pat); + + // Increment expr_count here to match what InteriorVisitor expects. + self.expr_index = self.expr_index + 1; + } +} + +impl DropRangesBuilder { + fn new( + tracked_values: impl Iterator, + hir: Map<'_>, + num_exprs: usize, + ) -> Self { + let mut tracked_value_map = FxHashMap::<_, TrackedValueIndex>::default(); + let mut next = <_>::from(0u32); + for value in tracked_values { + for_each_consumable(hir, value, |value| { + if !tracked_value_map.contains_key(&value) { + tracked_value_map.insert(value, next); + next = next + 1; + } + }); + } + debug!("hir_id_map: {:?}", tracked_value_map); + let num_values = tracked_value_map.len(); + Self { + tracked_value_map, + nodes: IndexVec::from_fn_n(|_| NodeInfo::new(num_values), num_exprs + 1), + deferred_edges: <_>::default(), + post_order_map: <_>::default(), + } + } + + fn tracked_value_index(&self, tracked_value: TrackedValue) -> TrackedValueIndex { + *self.tracked_value_map.get(&tracked_value).unwrap() + } + + /// Adds an entry in the mapping from HirIds to PostOrderIds + /// + /// Needed so that `add_control_edge_hir_id` can work. + fn add_node_mapping(&mut self, node_hir_id: HirId, post_order_id: PostOrderId) { + self.post_order_map.insert(node_hir_id, post_order_id); + } + + /// Like add_control_edge, but uses a hir_id as the target. + /// + /// This can be used for branches where we do not know the PostOrderId of the target yet, + /// such as when handling `break` or `continue`. + fn add_control_edge_hir_id(&mut self, from: PostOrderId, to: HirId) { + self.deferred_edges.push((from, to)); + } + + fn drop_at(&mut self, value: TrackedValue, location: PostOrderId) { + let value = self.tracked_value_index(value); + self.node_mut(location).drops.push(value); + } + + fn reinit_at(&mut self, value: TrackedValue, location: PostOrderId) { + let value = match self.tracked_value_map.get(&value) { + Some(value) => *value, + // If there's no value, this is never consumed and therefore is never dropped. We can + // ignore this. + None => return, + }; + self.node_mut(location).reinits.push(value); + } + + /// Looks up PostOrderId for any control edges added by HirId and adds a proper edge for them. + /// + /// Should be called after visiting the HIR but before solving the control flow, otherwise some + /// edges will be missed. + fn process_deferred_edges(&mut self) { + trace!("processing deferred edges. post_order_map={:#?}", self.post_order_map); + let mut edges = vec![]; + swap(&mut edges, &mut self.deferred_edges); + edges.into_iter().for_each(|(from, to)| { + trace!("Adding deferred edge from {:?} to {:?}", from, to); + let to = *self.post_order_map.get(&to).expect("Expression ID not found"); + trace!("target edge PostOrderId={:?}", to); + self.add_control_edge(from, to) + }); + } +} diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs new file mode 100644 index 000000000..139d17d2e --- /dev/null +++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs @@ -0,0 +1,92 @@ +use super::{DropRangesBuilder, PostOrderId}; +use rustc_index::{bit_set::BitSet, vec::IndexVec}; +use std::collections::BTreeMap; + +impl DropRangesBuilder { + pub fn propagate_to_fixpoint(&mut self) { + trace!("before fixpoint: {:#?}", self); + let preds = self.compute_predecessors(); + + trace!("predecessors: {:#?}", preds.iter_enumerated().collect::>()); + + let mut new_state = BitSet::new_empty(self.num_values()); + let mut changed_nodes = BitSet::new_empty(self.nodes.len()); + let mut unchanged_mask = BitSet::new_filled(self.nodes.len()); + changed_nodes.insert(0u32.into()); + + let mut propagate = || { + let mut changed = false; + unchanged_mask.insert_all(); + for id in self.nodes.indices() { + trace!("processing {:?}, changed_nodes: {:?}", id, changed_nodes); + // Check if any predecessor has changed, and if not then short-circuit. + // + // We handle the start node specially, since it doesn't have any predecessors, + // but we need to start somewhere. + if match id.index() { + 0 => !changed_nodes.contains(id), + _ => !preds[id].iter().any(|pred| changed_nodes.contains(*pred)), + } { + trace!("short-circuiting because none of {:?} have changed", preds[id]); + unchanged_mask.remove(id); + continue; + } + + if id.index() == 0 { + new_state.clear(); + } else { + // If we are not the start node and we have no predecessors, treat + // everything as dropped because there's no way to get here anyway. + new_state.insert_all(); + }; + + for pred in &preds[id] { + new_state.intersect(&self.nodes[*pred].drop_state); + } + + for drop in &self.nodes[id].drops { + new_state.insert(*drop); + } + + for reinit in &self.nodes[id].reinits { + new_state.remove(*reinit); + } + + if self.nodes[id].drop_state.intersect(&new_state) { + changed_nodes.insert(id); + changed = true; + } else { + unchanged_mask.remove(id); + } + } + + changed_nodes.intersect(&unchanged_mask); + changed + }; + + while propagate() { + trace!("drop_state changed, re-running propagation"); + } + + trace!("after fixpoint: {:#?}", self); + } + + fn compute_predecessors(&self) -> IndexVec> { + let mut preds = IndexVec::from_fn_n(|_| vec![], self.nodes.len()); + for (id, node) in self.nodes.iter_enumerated() { + // If the node has no explicit successors, we assume that control + // will from this node into the next one. + // + // If there are successors listed, then we assume that all + // possible successors are given and we do not include the default. + if node.successors.len() == 0 && id.index() != self.nodes.len() - 1 { + preds[id + 1].push(id); + } else { + for succ in &node.successors { + preds[*succ].push(id); + } + } + } + preds + } +} diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs new file mode 100644 index 000000000..c0a0bfe8e --- /dev/null +++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs @@ -0,0 +1,91 @@ +//! Implementation of GraphWalk for DropRanges so we can visualize the control +//! flow graph when needed for debugging. + +use rustc_graphviz as dot; +use rustc_middle::ty::TyCtxt; + +use super::{DropRangesBuilder, PostOrderId}; + +/// Writes the CFG for DropRangesBuilder to a .dot file for visualization. +/// +/// It is not normally called, but is kept around to easily add debugging +/// code when needed. +pub(super) fn write_graph_to_file( + drop_ranges: &DropRangesBuilder, + filename: &str, + tcx: TyCtxt<'_>, +) { + dot::render( + &DropRangesGraph { drop_ranges, tcx }, + &mut std::fs::File::create(filename).unwrap(), + ) + .unwrap(); +} + +struct DropRangesGraph<'a, 'tcx> { + drop_ranges: &'a DropRangesBuilder, + tcx: TyCtxt<'tcx>, +} + +impl<'a> dot::GraphWalk<'a> for DropRangesGraph<'_, '_> { + type Node = PostOrderId; + + type Edge = (PostOrderId, PostOrderId); + + fn nodes(&'a self) -> dot::Nodes<'a, Self::Node> { + self.drop_ranges.nodes.iter_enumerated().map(|(i, _)| i).collect() + } + + fn edges(&'a self) -> dot::Edges<'a, Self::Edge> { + self.drop_ranges + .nodes + .iter_enumerated() + .flat_map(|(i, node)| { + if node.successors.len() == 0 { + vec![(i, i + 1)] + } else { + node.successors.iter().map(move |&s| (i, s)).collect() + } + }) + .collect() + } + + fn source(&'a self, edge: &Self::Edge) -> Self::Node { + edge.0 + } + + fn target(&'a self, edge: &Self::Edge) -> Self::Node { + edge.1 + } +} + +impl<'a> dot::Labeller<'a> for DropRangesGraph<'_, '_> { + type Node = PostOrderId; + + type Edge = (PostOrderId, PostOrderId); + + fn graph_id(&'a self) -> dot::Id<'a> { + dot::Id::new("drop_ranges").unwrap() + } + + fn node_id(&'a self, n: &Self::Node) -> dot::Id<'a> { + dot::Id::new(format!("id{}", n.index())).unwrap() + } + + fn node_label(&'a self, n: &Self::Node) -> dot::LabelText<'a> { + dot::LabelText::LabelStr( + format!( + "{n:?}: {}", + self.drop_ranges + .post_order_map + .iter() + .find(|(_hir_id, &post_order_id)| post_order_id == *n) + .map_or("".into(), |(hir_id, _)| self + .tcx + .hir() + .node_to_string(*hir_id)) + ) + .into(), + ) + } +} diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs new file mode 100644 index 000000000..ded0888c3 --- /dev/null +++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs @@ -0,0 +1,232 @@ +use super::TrackedValue; +use crate::{ + check::FnCtxt, + expr_use_visitor::{self, ExprUseVisitor}, +}; +use hir::{def_id::DefId, Body, HirId, HirIdMap}; +use rustc_data_structures::fx::FxHashSet; +use rustc_hir as hir; +use rustc_middle::hir::place::{PlaceBase, Projection, ProjectionKind}; +use rustc_middle::ty::{ParamEnv, TyCtxt}; + +pub(super) fn find_consumed_and_borrowed<'a, 'tcx>( + fcx: &'a FnCtxt<'a, 'tcx>, + def_id: DefId, + body: &'tcx Body<'tcx>, +) -> ConsumedAndBorrowedPlaces { + let mut expr_use_visitor = ExprUseDelegate::new(fcx.tcx, fcx.param_env); + expr_use_visitor.consume_body(fcx, def_id, body); + expr_use_visitor.places +} + +pub(super) struct ConsumedAndBorrowedPlaces { + /// Records the variables/expressions that are dropped by a given expression. + /// + /// The key is the hir-id of the expression, and the value is a set or hir-ids for variables + /// or values that are consumed by that expression. + /// + /// Note that this set excludes "partial drops" -- for example, a statement like `drop(x.y)` is + /// not considered a drop of `x`, although it would be a drop of `x.y`. + pub(super) consumed: HirIdMap>, + + /// A set of hir-ids of values or variables that are borrowed at some point within the body. + pub(super) borrowed: FxHashSet, + + /// A set of hir-ids of values or variables that are borrowed at some point within the body. + pub(super) borrowed_temporaries: FxHashSet, +} + +/// Works with ExprUseVisitor to find interesting values for the drop range analysis. +/// +/// Interesting values are those that are either dropped or borrowed. For dropped values, we also +/// record the parent expression, which is the point where the drop actually takes place. +struct ExprUseDelegate<'tcx> { + tcx: TyCtxt<'tcx>, + param_env: ParamEnv<'tcx>, + places: ConsumedAndBorrowedPlaces, +} + +impl<'tcx> ExprUseDelegate<'tcx> { + fn new(tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self { + Self { + tcx, + param_env, + places: ConsumedAndBorrowedPlaces { + consumed: <_>::default(), + borrowed: <_>::default(), + borrowed_temporaries: <_>::default(), + }, + } + } + + fn consume_body(&mut self, fcx: &'_ FnCtxt<'_, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>) { + // Run ExprUseVisitor to find where values are consumed. + ExprUseVisitor::new( + self, + &fcx.infcx, + def_id.expect_local(), + fcx.param_env, + &fcx.typeck_results.borrow(), + ) + .consume_body(body); + } + + fn mark_consumed(&mut self, consumer: HirId, target: TrackedValue) { + self.places.consumed.entry(consumer).or_insert_with(|| <_>::default()); + + debug!(?consumer, ?target, "mark_consumed"); + self.places.consumed.get_mut(&consumer).map(|places| places.insert(target)); + } + + fn borrow_place(&mut self, place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>) { + self.places + .borrowed + .insert(TrackedValue::from_place_with_projections_allowed(place_with_id)); + + // Ordinarily a value is consumed by it's parent, but in the special case of a + // borrowed RValue, we create a reference that lives as long as the temporary scope + // for that expression (typically, the innermost statement, but sometimes the enclosing + // block). We record this fact here so that later in generator_interior + // we can use the correct scope. + // + // We special case borrows through a dereference (`&*x`, `&mut *x` where `x` is + // some rvalue expression), since these are essentially a copy of a pointer. + // In other words, this borrow does not refer to the + // temporary (`*x`), but to the referent (whatever `x` is a borrow of). + // + // We were considering that we might encounter problems down the line if somehow, + // some part of the compiler were to look at this result and try to use it to + // drive a borrowck-like analysis (this does not currently happen, as of this writing). + // But even this should be fine, because the lifetime of the dereferenced reference + // found in the rvalue is only significant as an intermediate 'link' to the value we + // are producing, and we separately track whether that value is live over a yield. + // Example: + // + // ```notrust + // fn identity(x: &mut T) -> &mut T { x } + // let a: A = ...; + // let y: &'y mut A = &mut *identity(&'a mut a); + // ^^^^^^^^^^^^^^^^^^^^^^^^^ the borrow we are talking about + // ``` + // + // The expression `*identity(...)` is a deref of an rvalue, + // where the `identity(...)` (the rvalue) produces a return type + // of `&'rv mut A`, where `'a: 'rv`. We then assign this result to + // `'y`, resulting in (transitively) `'a: 'y` (i.e., while `y` is in use, + // `a` will be considered borrowed). Other parts of the code will ensure + // that if `y` is live over a yield, `&'y mut A` appears in the generator + // state. If `'y` is live, then any sound region analysis must conclude + // that `'a` is also live. So if this causes a bug, blame some other + // part of the code! + let is_deref = place_with_id + .place + .projections + .iter() + .any(|Projection { kind, .. }| *kind == ProjectionKind::Deref); + + if let (false, PlaceBase::Rvalue) = (is_deref, place_with_id.place.base) { + self.places.borrowed_temporaries.insert(place_with_id.hir_id); + } + } +} + +impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> { + fn consume( + &mut self, + place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>, + diag_expr_id: HirId, + ) { + let hir = self.tcx.hir(); + let parent = match hir.find_parent_node(place_with_id.hir_id) { + Some(parent) => parent, + None => place_with_id.hir_id, + }; + debug!( + "consume {:?}; diag_expr_id={}, using parent {}", + place_with_id, + hir.node_to_string(diag_expr_id), + hir.node_to_string(parent) + ); + place_with_id + .try_into() + .map_or((), |tracked_value| self.mark_consumed(parent, tracked_value)); + } + + fn borrow( + &mut self, + place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>, + diag_expr_id: HirId, + bk: rustc_middle::ty::BorrowKind, + ) { + debug!( + "borrow: place_with_id = {place_with_id:?}, diag_expr_id={diag_expr_id:?}, \ + borrow_kind={bk:?}" + ); + + self.borrow_place(place_with_id); + } + + fn copy( + &mut self, + place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>, + _diag_expr_id: HirId, + ) { + debug!("copy: place_with_id = {place_with_id:?}"); + + self.places + .borrowed + .insert(TrackedValue::from_place_with_projections_allowed(place_with_id)); + + // For copied we treat this mostly like a borrow except that we don't add the place + // to borrowed_temporaries because the copy is consumed. + } + + fn mutate( + &mut self, + assignee_place: &expr_use_visitor::PlaceWithHirId<'tcx>, + diag_expr_id: HirId, + ) { + debug!("mutate {assignee_place:?}; diag_expr_id={diag_expr_id:?}"); + + if assignee_place.place.base == PlaceBase::Rvalue + && assignee_place.place.projections.is_empty() + { + // Assigning to an Rvalue is illegal unless done through a dereference. We would have + // already gotten a type error, so we will just return here. + return; + } + + // If the type being assigned needs dropped, then the mutation counts as a borrow + // since it is essentially doing `Drop::drop(&mut x); x = new_value;`. + if assignee_place.place.base_ty.needs_drop(self.tcx, self.param_env) { + self.places + .borrowed + .insert(TrackedValue::from_place_with_projections_allowed(assignee_place)); + } + } + + fn bind( + &mut self, + binding_place: &expr_use_visitor::PlaceWithHirId<'tcx>, + diag_expr_id: HirId, + ) { + debug!("bind {binding_place:?}; diag_expr_id={diag_expr_id:?}"); + } + + fn fake_read( + &mut self, + place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>, + cause: rustc_middle::mir::FakeReadCause, + diag_expr_id: HirId, + ) { + debug!( + "fake_read place_with_id={place_with_id:?}; cause={cause:?}; diag_expr_id={diag_expr_id:?}" + ); + + // fake reads happen in places like the scrutinee of a match expression. + // we treat those as a borrow, much like a copy: the idea is that we are + // transiently creating a `&T` ref that we can read from to observe the current + // value (this `&T` is immediately dropped afterwards). + self.borrow_place(place_with_id); + } +} diff --git a/compiler/rustc_typeck/src/check/inherited.rs b/compiler/rustc_typeck/src/check/inherited.rs new file mode 100644 index 000000000..cd152eb97 --- /dev/null +++ b/compiler/rustc_typeck/src/check/inherited.rs @@ -0,0 +1,183 @@ +use super::callee::DeferredCallResolution; + +use rustc_data_structures::fx::FxHashSet; +use rustc_hir as hir; +use rustc_hir::def_id::LocalDefId; +use rustc_hir::HirIdMap; +use rustc_infer::infer; +use rustc_infer::infer::{InferCtxt, InferOk, TyCtxtInferExt}; +use rustc_middle::ty::fold::TypeFoldable; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_span::def_id::LocalDefIdMap; +use rustc_span::{self, Span}; +use rustc_trait_selection::infer::InferCtxtExt as _; +use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt}; + +use std::cell::RefCell; +use std::ops::Deref; + +/// Closures defined within the function. For example: +/// ```ignore (illustrative) +/// fn foo() { +/// bar(move|| { ... }) +/// } +/// ``` +/// Here, the function `foo()` and the closure passed to +/// `bar()` will each have their own `FnCtxt`, but they will +/// share the inherited fields. +pub struct Inherited<'a, 'tcx> { + pub(super) infcx: InferCtxt<'a, 'tcx>, + + pub(super) typeck_results: &'a RefCell>, + + pub(super) locals: RefCell>>, + + pub(super) fulfillment_cx: RefCell>>, + + // Some additional `Sized` obligations badly affect type inference. + // These obligations are added in a later stage of typeck. + pub(super) deferred_sized_obligations: + RefCell, Span, traits::ObligationCauseCode<'tcx>)>>, + + // When we process a call like `c()` where `c` is a closure type, + // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or + // `FnOnce` closure. In that case, we defer full resolution of the + // call until upvar inference can kick in and make the + // decision. We keep these deferred resolutions grouped by the + // def-id of the closure, so that once we decide, we can easily go + // back and process them. + pub(super) deferred_call_resolutions: RefCell>>>, + + pub(super) deferred_cast_checks: RefCell>>, + + pub(super) deferred_transmute_checks: RefCell, Ty<'tcx>, Span)>>, + + pub(super) deferred_asm_checks: RefCell, hir::HirId)>>, + + pub(super) deferred_generator_interiors: + RefCell, hir::GeneratorKind)>>, + + pub(super) body_id: Option, + + /// Whenever we introduce an adjustment from `!` into a type variable, + /// we record that type variable here. This is later used to inform + /// fallback. See the `fallback` module for details. + pub(super) diverging_type_vars: RefCell>>, +} + +impl<'a, 'tcx> Deref for Inherited<'a, 'tcx> { + type Target = InferCtxt<'a, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.infcx + } +} + +/// A temporary returned by `Inherited::build(...)`. This is necessary +/// for multiple `InferCtxt` to share the same `in_progress_typeck_results` +/// without using `Rc` or something similar. +pub struct InheritedBuilder<'tcx> { + infcx: infer::InferCtxtBuilder<'tcx>, + def_id: LocalDefId, +} + +impl<'tcx> Inherited<'_, 'tcx> { + pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> { + let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner; + + InheritedBuilder { + infcx: tcx + .infer_ctxt() + .ignoring_regions() + .with_fresh_in_progress_typeck_results(hir_owner), + def_id, + } + } +} + +impl<'tcx> InheritedBuilder<'tcx> { + pub fn enter(&mut self, f: F) -> R + where + F: for<'a> FnOnce(Inherited<'a, 'tcx>) -> R, + { + let def_id = self.def_id; + self.infcx.enter(|infcx| f(Inherited::new(infcx, def_id))) + } +} + +impl<'a, 'tcx> Inherited<'a, 'tcx> { + fn new(infcx: InferCtxt<'a, 'tcx>, def_id: LocalDefId) -> Self { + let tcx = infcx.tcx; + let body_id = tcx.hir().maybe_body_owned_by(def_id); + let typeck_results = + infcx.in_progress_typeck_results.expect("building `FnCtxt` without typeck results"); + + Inherited { + typeck_results, + infcx, + fulfillment_cx: RefCell::new(>::new(tcx)), + locals: RefCell::new(Default::default()), + deferred_sized_obligations: RefCell::new(Vec::new()), + deferred_call_resolutions: RefCell::new(Default::default()), + deferred_cast_checks: RefCell::new(Vec::new()), + deferred_transmute_checks: RefCell::new(Vec::new()), + deferred_asm_checks: RefCell::new(Vec::new()), + deferred_generator_interiors: RefCell::new(Vec::new()), + diverging_type_vars: RefCell::new(Default::default()), + body_id, + } + } + + #[instrument(level = "debug", skip(self))] + pub(super) fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) { + if obligation.has_escaping_bound_vars() { + span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation); + } + self.fulfillment_cx.borrow_mut().register_predicate_obligation(self, obligation); + } + + pub(super) fn register_predicates(&self, obligations: I) + where + I: IntoIterator>, + { + for obligation in obligations { + self.register_predicate(obligation); + } + } + + pub(super) fn register_infer_ok_obligations(&self, infer_ok: InferOk<'tcx, T>) -> T { + self.register_predicates(infer_ok.obligations); + infer_ok.value + } + + pub(super) fn normalize_associated_types_in( + &self, + span: Span, + body_id: hir::HirId, + param_env: ty::ParamEnv<'tcx>, + value: T, + ) -> T + where + T: TypeFoldable<'tcx>, + { + self.normalize_associated_types_in_with_cause( + ObligationCause::misc(span, body_id), + param_env, + value, + ) + } + + pub(super) fn normalize_associated_types_in_with_cause( + &self, + cause: ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + value: T, + ) -> T + where + T: TypeFoldable<'tcx>, + { + let ok = self.partially_normalize_associated_types_in(cause, param_env, value); + debug!(?ok); + self.register_infer_ok_obligations(ok) + } +} diff --git a/compiler/rustc_typeck/src/check/intrinsic.rs b/compiler/rustc_typeck/src/check/intrinsic.rs new file mode 100644 index 000000000..3f2a0da8d --- /dev/null +++ b/compiler/rustc_typeck/src/check/intrinsic.rs @@ -0,0 +1,517 @@ +//! Type-checking for the rust-intrinsic and platform-intrinsic +//! intrinsics that the compiler exposes. + +use crate::errors::{ + UnrecognizedAtomicOperation, UnrecognizedIntrinsicFunction, + WrongNumberOfGenericArgumentsToIntrinsic, +}; +use crate::require_same_types; + +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_middle::traits::{ObligationCause, ObligationCauseCode}; +use rustc_middle::ty::subst::Subst; +use rustc_middle::ty::{self, TyCtxt}; +use rustc_span::symbol::{kw, sym, Symbol}; +use rustc_target::spec::abi::Abi; + +use std::iter; + +fn equate_intrinsic_type<'tcx>( + tcx: TyCtxt<'tcx>, + it: &hir::ForeignItem<'_>, + n_tps: usize, + n_lts: usize, + sig: ty::PolyFnSig<'tcx>, +) { + let (own_counts, span) = match &it.kind { + hir::ForeignItemKind::Fn(.., generics) => { + let own_counts = tcx.generics_of(it.def_id.to_def_id()).own_counts(); + (own_counts, generics.span) + } + _ => { + struct_span_err!(tcx.sess, it.span, E0622, "intrinsic must be a function") + .span_label(it.span, "expected a function") + .emit(); + return; + } + }; + + let gen_count_ok = |found: usize, expected: usize, descr: &str| -> bool { + if found != expected { + tcx.sess.emit_err(WrongNumberOfGenericArgumentsToIntrinsic { + span, + found, + expected, + descr, + }); + false + } else { + true + } + }; + + if gen_count_ok(own_counts.lifetimes, n_lts, "lifetime") + && gen_count_ok(own_counts.types, n_tps, "type") + && gen_count_ok(own_counts.consts, 0, "const") + { + let fty = tcx.mk_fn_ptr(sig); + let cause = ObligationCause::new(it.span, it.hir_id(), ObligationCauseCode::IntrinsicType); + require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(it.def_id)), fty); + } +} + +/// Returns the unsafety of the given intrinsic. +pub fn intrinsic_operation_unsafety(intrinsic: Symbol) -> hir::Unsafety { + match intrinsic { + // When adding a new intrinsic to this list, + // it's usually worth updating that intrinsic's documentation + // to note that it's safe to call, since + // safe extern fns are otherwise unprecedented. + sym::abort + | sym::size_of + | sym::min_align_of + | sym::needs_drop + | sym::caller_location + | sym::add_with_overflow + | sym::sub_with_overflow + | sym::mul_with_overflow + | sym::wrapping_add + | sym::wrapping_sub + | sym::wrapping_mul + | sym::saturating_add + | sym::saturating_sub + | sym::rotate_left + | sym::rotate_right + | sym::ctpop + | sym::ctlz + | sym::cttz + | sym::bswap + | sym::bitreverse + | sym::discriminant_value + | sym::type_id + | sym::likely + | sym::unlikely + | sym::ptr_guaranteed_eq + | sym::ptr_guaranteed_ne + | sym::minnumf32 + | sym::minnumf64 + | sym::maxnumf32 + | sym::rustc_peek + | sym::maxnumf64 + | sym::type_name + | sym::forget + | sym::black_box + | sym::variant_count => hir::Unsafety::Normal, + _ => hir::Unsafety::Unsafe, + } +} + +/// Remember to add all intrinsics here, in `compiler/rustc_codegen_llvm/src/intrinsic.rs`, +/// and in `library/core/src/intrinsics.rs`. +pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { + let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n))); + let intrinsic_name = tcx.item_name(it.def_id.to_def_id()); + let name_str = intrinsic_name.as_str(); + + let bound_vars = tcx.mk_bound_variable_kinds( + [ty::BoundVariableKind::Region(ty::BrAnon(0)), ty::BoundVariableKind::Region(ty::BrEnv)] + .iter() + .copied(), + ); + let mk_va_list_ty = |mutbl| { + tcx.lang_items().va_list().map(|did| { + let region = tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) }, + )); + let env_region = tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrEnv }, + )); + let va_list_ty = tcx.bound_type_of(did).subst(tcx, &[region.into()]); + (tcx.mk_ref(env_region, ty::TypeAndMut { ty: va_list_ty, mutbl }), va_list_ty) + }) + }; + + let (n_tps, n_lts, inputs, output, unsafety) = if name_str.starts_with("atomic_") { + let split: Vec<&str> = name_str.split('_').collect(); + assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format"); + + //We only care about the operation here + let (n_tps, inputs, output) = match split[1] { + "cxchg" | "cxchgweak" => ( + 1, + vec![tcx.mk_mut_ptr(param(0)), param(0), param(0)], + tcx.intern_tup(&[param(0), tcx.types.bool]), + ), + "load" => (1, vec![tcx.mk_imm_ptr(param(0))], param(0)), + "store" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()), + + "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" + | "umin" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], param(0)), + "fence" | "singlethreadfence" => (0, Vec::new(), tcx.mk_unit()), + op => { + tcx.sess.emit_err(UnrecognizedAtomicOperation { span: it.span, op }); + return; + } + }; + (n_tps, 0, inputs, output, hir::Unsafety::Unsafe) + } else { + let unsafety = intrinsic_operation_unsafety(intrinsic_name); + let (n_tps, inputs, output) = match intrinsic_name { + sym::abort => (0, Vec::new(), tcx.types.never), + sym::unreachable => (0, Vec::new(), tcx.types.never), + sym::breakpoint => (0, Vec::new(), tcx.mk_unit()), + sym::size_of | sym::pref_align_of | sym::min_align_of | sym::variant_count => { + (1, Vec::new(), tcx.types.usize) + } + sym::size_of_val | sym::min_align_of_val => { + (1, vec![tcx.mk_imm_ptr(param(0))], tcx.types.usize) + } + sym::rustc_peek => (1, vec![param(0)], param(0)), + sym::caller_location => (0, vec![], tcx.caller_location_ty()), + sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { + (1, Vec::new(), tcx.mk_unit()) + } + sym::forget => (1, vec![param(0)], tcx.mk_unit()), + sym::transmute => (2, vec![param(0)], param(1)), + sym::prefetch_read_data + | sym::prefetch_write_data + | sym::prefetch_read_instruction + | sym::prefetch_write_instruction => ( + 1, + vec![ + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }), + tcx.types.i32, + ], + tcx.mk_unit(), + ), + sym::drop_in_place => (1, vec![tcx.mk_mut_ptr(param(0))], tcx.mk_unit()), + sym::needs_drop => (1, Vec::new(), tcx.types.bool), + + sym::type_name => (1, Vec::new(), tcx.mk_static_str()), + sym::type_id => (1, Vec::new(), tcx.types.u64), + sym::offset | sym::arith_offset => ( + 1, + vec![ + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }), + tcx.types.isize, + ], + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }), + ), + sym::copy | sym::copy_nonoverlapping => ( + 1, + vec![ + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }), + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }), + tcx.types.usize, + ], + tcx.mk_unit(), + ), + sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => ( + 1, + vec![ + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }), + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }), + tcx.types.usize, + ], + tcx.mk_unit(), + ), + sym::write_bytes | sym::volatile_set_memory => ( + 1, + vec![ + tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }), + tcx.types.u8, + tcx.types.usize, + ], + tcx.mk_unit(), + ), + sym::sqrtf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::sqrtf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::powif32 => (0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32), + sym::powif64 => (0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64), + sym::sinf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::sinf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::cosf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::cosf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::powf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32), + sym::powf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64), + sym::expf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::expf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::exp2f32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::exp2f64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::logf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::logf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::log10f32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::log10f64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::log2f32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::log2f64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::fmaf32 => (0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32), + sym::fmaf64 => (0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64), + sym::fabsf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::fabsf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::minnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32), + sym::minnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64), + sym::maxnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32), + sym::maxnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64), + sym::copysignf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32), + sym::copysignf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64), + sym::floorf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::floorf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::ceilf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::ceilf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::truncf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::truncf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::rintf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::rintf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::nearbyintf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::nearbyintf64 => (0, vec![tcx.types.f64], tcx.types.f64), + sym::roundf32 => (0, vec![tcx.types.f32], tcx.types.f32), + sym::roundf64 => (0, vec![tcx.types.f64], tcx.types.f64), + + sym::volatile_load | sym::unaligned_volatile_load => { + (1, vec![tcx.mk_imm_ptr(param(0))], param(0)) + } + sym::volatile_store | sym::unaligned_volatile_store => { + (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()) + } + + sym::ctpop + | sym::ctlz + | sym::ctlz_nonzero + | sym::cttz + | sym::cttz_nonzero + | sym::bswap + | sym::bitreverse => (1, vec![param(0)], param(0)), + + sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { + (1, vec![param(0), param(0)], tcx.intern_tup(&[param(0), tcx.types.bool])) + } + + sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { + (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.bool) + } + + sym::const_allocate => { + (0, vec![tcx.types.usize, tcx.types.usize], tcx.mk_mut_ptr(tcx.types.u8)) + } + sym::const_deallocate => ( + 0, + vec![tcx.mk_mut_ptr(tcx.types.u8), tcx.types.usize, tcx.types.usize], + tcx.mk_unit(), + ), + + sym::ptr_offset_from => { + (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.isize) + } + sym::ptr_offset_from_unsigned => { + (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.usize) + } + sym::unchecked_div | sym::unchecked_rem | sym::exact_div => { + (1, vec![param(0), param(0)], param(0)) + } + sym::unchecked_shl | sym::unchecked_shr | sym::rotate_left | sym::rotate_right => { + (1, vec![param(0), param(0)], param(0)) + } + sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => { + (1, vec![param(0), param(0)], param(0)) + } + sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => { + (1, vec![param(0), param(0)], param(0)) + } + sym::saturating_add | sym::saturating_sub => (1, vec![param(0), param(0)], param(0)), + sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { + (1, vec![param(0), param(0)], param(0)) + } + sym::float_to_int_unchecked => (2, vec![param(0)], param(1)), + + sym::assume => (0, vec![tcx.types.bool], tcx.mk_unit()), + sym::likely => (0, vec![tcx.types.bool], tcx.types.bool), + sym::unlikely => (0, vec![tcx.types.bool], tcx.types.bool), + + sym::discriminant_value => { + let assoc_items = tcx.associated_item_def_ids( + tcx.require_lang_item(hir::LangItem::DiscriminantKind, None), + ); + let discriminant_def_id = assoc_items[0]; + + let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) }; + ( + 1, + vec![ + tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)), param(0)), + ], + tcx.mk_projection(discriminant_def_id, tcx.mk_substs([param(0).into()].iter())), + ) + } + + kw::Try => { + let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8); + let try_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig( + iter::once(mut_u8), + tcx.mk_unit(), + false, + hir::Unsafety::Normal, + Abi::Rust, + )); + let catch_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig( + [mut_u8, mut_u8].iter().cloned(), + tcx.mk_unit(), + false, + hir::Unsafety::Normal, + Abi::Rust, + )); + ( + 0, + vec![tcx.mk_fn_ptr(try_fn_ty), mut_u8, tcx.mk_fn_ptr(catch_fn_ty)], + tcx.types.i32, + ) + } + + sym::va_start | sym::va_end => match mk_va_list_ty(hir::Mutability::Mut) { + Some((va_list_ref_ty, _)) => (0, vec![va_list_ref_ty], tcx.mk_unit()), + None => bug!("`va_list` language item needed for C-variadic intrinsics"), + }, + + sym::va_copy => match mk_va_list_ty(hir::Mutability::Not) { + Some((va_list_ref_ty, va_list_ty)) => { + let va_list_ptr_ty = tcx.mk_mut_ptr(va_list_ty); + (0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.mk_unit()) + } + None => bug!("`va_list` language item needed for C-variadic intrinsics"), + }, + + sym::va_arg => match mk_va_list_ty(hir::Mutability::Mut) { + Some((va_list_ref_ty, _)) => (1, vec![va_list_ref_ty], param(0)), + None => bug!("`va_list` language item needed for C-variadic intrinsics"), + }, + + sym::nontemporal_store => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()), + + sym::raw_eq => { + let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) }; + let param_ty = + tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)), param(0)); + (1, vec![param_ty; 2], tcx.types.bool) + } + + sym::black_box => (1, vec![param(0)], param(0)), + + sym::const_eval_select => (4, vec![param(0), param(1), param(2)], param(3)), + + sym::vtable_size | sym::vtable_align => { + (0, vec![tcx.mk_imm_ptr(tcx.mk_unit())], tcx.types.usize) + } + + other => { + tcx.sess.emit_err(UnrecognizedIntrinsicFunction { span: it.span, name: other }); + return; + } + }; + (n_tps, 0, inputs, output, unsafety) + }; + let sig = tcx.mk_fn_sig(inputs.into_iter(), output, false, unsafety, Abi::RustIntrinsic); + let sig = ty::Binder::bind_with_vars(sig, bound_vars); + equate_intrinsic_type(tcx, it, n_tps, n_lts, sig) +} + +/// Type-check `extern "platform-intrinsic" { ... }` functions. +pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { + let param = |n| { + let name = Symbol::intern(&format!("P{}", n)); + tcx.mk_ty_param(n, name) + }; + + let name = it.ident.name; + + let (n_tps, inputs, output) = match name { + sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => { + (2, vec![param(0), param(0)], param(1)) + } + sym::simd_add + | sym::simd_sub + | sym::simd_mul + | sym::simd_rem + | sym::simd_div + | sym::simd_shl + | sym::simd_shr + | sym::simd_and + | sym::simd_or + | sym::simd_xor + | sym::simd_fmin + | sym::simd_fmax + | sym::simd_fpow + | sym::simd_saturating_add + | sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)), + sym::simd_arith_offset => (2, vec![param(0), param(1)], param(0)), + sym::simd_neg + | sym::simd_fsqrt + | sym::simd_fsin + | sym::simd_fcos + | sym::simd_fexp + | sym::simd_fexp2 + | sym::simd_flog2 + | sym::simd_flog10 + | sym::simd_flog + | sym::simd_fabs + | sym::simd_ceil + | sym::simd_floor + | sym::simd_round + | sym::simd_trunc => (1, vec![param(0)], param(0)), + sym::simd_fpowi => (1, vec![param(0), tcx.types.i32], param(0)), + sym::simd_fma => (1, vec![param(0), param(0), param(0)], param(0)), + sym::simd_gather => (3, vec![param(0), param(1), param(2)], param(0)), + sym::simd_scatter => (3, vec![param(0), param(1), param(2)], tcx.mk_unit()), + sym::simd_insert => (2, vec![param(0), tcx.types.u32, param(1)], param(0)), + sym::simd_extract => (2, vec![param(0), tcx.types.u32], param(1)), + sym::simd_cast | sym::simd_as => (2, vec![param(0)], param(1)), + sym::simd_bitmask => (2, vec![param(0)], param(1)), + sym::simd_select | sym::simd_select_bitmask => { + (2, vec![param(0), param(1), param(1)], param(1)) + } + sym::simd_reduce_all | sym::simd_reduce_any => (1, vec![param(0)], tcx.types.bool), + sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => { + (2, vec![param(0), param(1)], param(1)) + } + sym::simd_reduce_add_unordered + | sym::simd_reduce_mul_unordered + | sym::simd_reduce_and + | sym::simd_reduce_or + | sym::simd_reduce_xor + | sym::simd_reduce_min + | sym::simd_reduce_max + | sym::simd_reduce_min_nanless + | sym::simd_reduce_max_nanless => (2, vec![param(0)], param(1)), + sym::simd_shuffle => (3, vec![param(0), param(0), param(1)], param(2)), + name if name.as_str().starts_with("simd_shuffle") => { + match name.as_str()["simd_shuffle".len()..].parse() { + Ok(n) => { + let params = vec![param(0), param(0), tcx.mk_array(tcx.types.u32, n)]; + (2, params, param(1)) + } + Err(_) => { + let msg = + format!("unrecognized platform-specific intrinsic function: `{name}`"); + tcx.sess.struct_span_err(it.span, &msg).emit(); + return; + } + } + } + _ => { + let msg = format!("unrecognized platform-specific intrinsic function: `{name}`"); + tcx.sess.struct_span_err(it.span, &msg).emit(); + return; + } + }; + + let sig = tcx.mk_fn_sig( + inputs.into_iter(), + output, + false, + hir::Unsafety::Unsafe, + Abi::PlatformIntrinsic, + ); + let sig = ty::Binder::dummy(sig); + equate_intrinsic_type(tcx, it, n_tps, 0, sig) +} diff --git a/compiler/rustc_typeck/src/check/intrinsicck.rs b/compiler/rustc_typeck/src/check/intrinsicck.rs new file mode 100644 index 000000000..df94abbaf --- /dev/null +++ b/compiler/rustc_typeck/src/check/intrinsicck.rs @@ -0,0 +1,530 @@ +use rustc_ast::InlineAsmTemplatePiece; +use rustc_data_structures::fx::FxHashSet; +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_index::vec::Idx; +use rustc_middle::ty::layout::{LayoutError, SizeSkeleton}; +use rustc_middle::ty::{self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitable, UintTy}; +use rustc_session::lint; +use rustc_span::{Span, Symbol, DUMMY_SP}; +use rustc_target::abi::{Pointer, VariantIdx}; +use rustc_target::asm::{InlineAsmReg, InlineAsmRegClass, InlineAsmRegOrRegClass, InlineAsmType}; +use rustc_trait_selection::infer::InferCtxtExt; + +use super::FnCtxt; + +/// If the type is `Option`, it will return `T`, otherwise +/// the type itself. Works on most `Option`-like types. +fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { + let ty::Adt(def, substs) = *ty.kind() else { return ty }; + + if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() { + let data_idx; + + let one = VariantIdx::new(1); + let zero = VariantIdx::new(0); + + if def.variant(zero).fields.is_empty() { + data_idx = one; + } else if def.variant(one).fields.is_empty() { + data_idx = zero; + } else { + return ty; + } + + if def.variant(data_idx).fields.len() == 1 { + return def.variant(data_idx).fields[0].ty(tcx, substs); + } + } + + ty +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>) { + let convert = |ty: Ty<'tcx>| { + let ty = self.resolve_vars_if_possible(ty); + let ty = self.tcx.normalize_erasing_regions(self.param_env, ty); + (SizeSkeleton::compute(ty, self.tcx, self.param_env), ty) + }; + let (sk_from, from) = convert(from); + let (sk_to, to) = convert(to); + + // Check for same size using the skeletons. + if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) { + if sk_from.same_size(sk_to) { + return; + } + + // Special-case transmuting from `typeof(function)` and + // `Option` to present a clearer error. + let from = unpack_option_like(self.tcx, from); + if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&self.tcx) { + struct_span_err!(self.tcx.sess, span, E0591, "can't transmute zero-sized type") + .note(&format!("source type: {from}")) + .note(&format!("target type: {to}")) + .help("cast with `as` to a pointer instead") + .emit(); + return; + } + } + + // Try to display a sensible error with as much information as possible. + let skeleton_string = |ty: Ty<'tcx>, sk| match sk { + Ok(SizeSkeleton::Known(size)) => format!("{} bits", size.bits()), + Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"), + Err(LayoutError::Unknown(bad)) => { + if bad == ty { + "this type does not have a fixed size".to_owned() + } else { + format!("size can vary because of {bad}") + } + } + Err(err) => err.to_string(), + }; + + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0512, + "cannot transmute between types of different sizes, \ + or dependently-sized types" + ); + if from == to { + err.note(&format!("`{from}` does not have a fixed size")); + } else { + err.note(&format!("source type: `{}` ({})", from, skeleton_string(from, sk_from))) + .note(&format!("target type: `{}` ({})", to, skeleton_string(to, sk_to))); + } + err.emit(); + } + + // FIXME(compiler-errors): This could use `<$ty as Pointee>::Metadata == ()` + fn is_thin_ptr_ty(&self, ty: Ty<'tcx>) -> bool { + // Type still may have region variables, but `Sized` does not depend + // on those, so just erase them before querying. + if self.tcx.erase_regions(ty).is_sized(self.tcx.at(DUMMY_SP), self.param_env) { + return true; + } + if let ty::Foreign(..) = ty.kind() { + return true; + } + false + } +} + +pub struct InlineAsmCtxt<'a, 'tcx> { + tcx: TyCtxt<'tcx>, + fcx: Option<&'a FnCtxt<'a, 'tcx>>, +} + +impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { + pub fn new_global_asm(tcx: TyCtxt<'tcx>) -> Self { + InlineAsmCtxt { tcx, fcx: None } + } + + pub fn new_in_fn(fcx: &'a FnCtxt<'a, 'tcx>) -> Self { + InlineAsmCtxt { tcx: fcx.tcx, fcx: Some(fcx) } + } + + fn check_asm_operand_type( + &self, + idx: usize, + reg: InlineAsmRegOrRegClass, + expr: &hir::Expr<'tcx>, + template: &[InlineAsmTemplatePiece], + is_input: bool, + tied_input: Option<(&hir::Expr<'tcx>, Option)>, + target_features: &FxHashSet, + ) -> Option { + let fcx = self.fcx.unwrap_or_else(|| span_bug!(expr.span, "asm operand for global asm")); + // Check the type against the allowed types for inline asm. + let ty = fcx.typeck_results.borrow().expr_ty_adjusted(expr); + let ty = fcx.resolve_vars_if_possible(ty); + let asm_ty_isize = match self.tcx.sess.target.pointer_width { + 16 => InlineAsmType::I16, + 32 => InlineAsmType::I32, + 64 => InlineAsmType::I64, + _ => unreachable!(), + }; + + // Expect types to be fully resolved, no const or type variables. + if ty.has_infer_types_or_consts() { + assert!(fcx.is_tainted_by_errors()); + return None; + } + + let asm_ty = match *ty.kind() { + // `!` is allowed for input but not for output (issue #87802) + ty::Never if is_input => return None, + ty::Error(_) => return None, + ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8), + ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16), + ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32), + ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64), + ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128), + ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize), + ty::Float(FloatTy::F32) => Some(InlineAsmType::F32), + ty::Float(FloatTy::F64) => Some(InlineAsmType::F64), + ty::FnPtr(_) => Some(asm_ty_isize), + ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if fcx.is_thin_ptr_ty(ty) => { + Some(asm_ty_isize) + } + ty::Adt(adt, substs) if adt.repr().simd() => { + let fields = &adt.non_enum_variant().fields; + let elem_ty = fields[0].ty(self.tcx, substs); + match elem_ty.kind() { + ty::Never | ty::Error(_) => return None, + ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => { + Some(InlineAsmType::VecI8(fields.len() as u64)) + } + ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => { + Some(InlineAsmType::VecI16(fields.len() as u64)) + } + ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => { + Some(InlineAsmType::VecI32(fields.len() as u64)) + } + ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => { + Some(InlineAsmType::VecI64(fields.len() as u64)) + } + ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => { + Some(InlineAsmType::VecI128(fields.len() as u64)) + } + ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => { + Some(match self.tcx.sess.target.pointer_width { + 16 => InlineAsmType::VecI16(fields.len() as u64), + 32 => InlineAsmType::VecI32(fields.len() as u64), + 64 => InlineAsmType::VecI64(fields.len() as u64), + _ => unreachable!(), + }) + } + ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(fields.len() as u64)), + ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(fields.len() as u64)), + _ => None, + } + } + ty::Infer(_) => unreachable!(), + _ => None, + }; + let Some(asm_ty) = asm_ty else { + let msg = &format!("cannot use value of type `{ty}` for inline assembly"); + let mut err = self.tcx.sess.struct_span_err(expr.span, msg); + err.note( + "only integers, floats, SIMD vectors, pointers and function pointers \ + can be used as arguments for inline assembly", + ); + err.emit(); + return None; + }; + + // Check that the type implements Copy. The only case where this can + // possibly fail is for SIMD types which don't #[derive(Copy)]. + if !fcx.infcx.type_is_copy_modulo_regions(fcx.param_env, ty, DUMMY_SP) { + let msg = "arguments for inline assembly must be copyable"; + let mut err = self.tcx.sess.struct_span_err(expr.span, msg); + err.note(&format!("`{ty}` does not implement the Copy trait")); + err.emit(); + } + + // Ideally we wouldn't need to do this, but LLVM's register allocator + // really doesn't like it when tied operands have different types. + // + // This is purely an LLVM limitation, but we have to live with it since + // there is no way to hide this with implicit conversions. + // + // For the purposes of this check we only look at the `InlineAsmType`, + // which means that pointers and integers are treated as identical (modulo + // size). + if let Some((in_expr, Some(in_asm_ty))) = tied_input { + if in_asm_ty != asm_ty { + let msg = "incompatible types for asm inout argument"; + let mut err = self.tcx.sess.struct_span_err(vec![in_expr.span, expr.span], msg); + + let in_expr_ty = fcx.typeck_results.borrow().expr_ty_adjusted(in_expr); + let in_expr_ty = fcx.resolve_vars_if_possible(in_expr_ty); + err.span_label(in_expr.span, &format!("type `{in_expr_ty}`")); + err.span_label(expr.span, &format!("type `{ty}`")); + err.note( + "asm inout arguments must have the same type, \ + unless they are both pointers or integers of the same size", + ); + err.emit(); + } + + // All of the later checks have already been done on the input, so + // let's not emit errors and warnings twice. + return Some(asm_ty); + } + + // Check the type against the list of types supported by the selected + // register class. + let asm_arch = self.tcx.sess.asm_arch.unwrap(); + let reg_class = reg.reg_class(); + let supported_tys = reg_class.supported_types(asm_arch); + let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else { + let msg = &format!("type `{ty}` cannot be used with this register class"); + let mut err = self.tcx.sess.struct_span_err(expr.span, msg); + let supported_tys: Vec<_> = + supported_tys.iter().map(|(t, _)| t.to_string()).collect(); + err.note(&format!( + "register class `{}` supports these types: {}", + reg_class.name(), + supported_tys.join(", "), + )); + if let Some(suggest) = reg_class.suggest_class(asm_arch, asm_ty) { + err.help(&format!( + "consider using the `{}` register class instead", + suggest.name() + )); + } + err.emit(); + return Some(asm_ty); + }; + + // Check whether the selected type requires a target feature. Note that + // this is different from the feature check we did earlier. While the + // previous check checked that this register class is usable at all + // with the currently enabled features, some types may only be usable + // with a register class when a certain feature is enabled. We check + // this here since it depends on the results of typeck. + // + // Also note that this check isn't run when the operand type is never + // (!). In that case we still need the earlier check to verify that the + // register class is usable at all. + if let Some(feature) = feature { + if !target_features.contains(&feature) { + let msg = &format!("`{}` target feature is not enabled", feature); + let mut err = self.tcx.sess.struct_span_err(expr.span, msg); + err.note(&format!( + "this is required to use type `{}` with register class `{}`", + ty, + reg_class.name(), + )); + err.emit(); + return Some(asm_ty); + } + } + + // Check whether a modifier is suggested for using this type. + if let Some((suggested_modifier, suggested_result)) = + reg_class.suggest_modifier(asm_arch, asm_ty) + { + // Search for any use of this operand without a modifier and emit + // the suggestion for them. + let mut spans = vec![]; + for piece in template { + if let &InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span } = piece + { + if operand_idx == idx && modifier.is_none() { + spans.push(span); + } + } + } + if !spans.is_empty() { + let (default_modifier, default_result) = + reg_class.default_modifier(asm_arch).unwrap(); + self.tcx.struct_span_lint_hir( + lint::builtin::ASM_SUB_REGISTER, + expr.hir_id, + spans, + |lint| { + let msg = "formatting may not be suitable for sub-register argument"; + let mut err = lint.build(msg); + err.span_label(expr.span, "for this argument"); + err.help(&format!( + "use the `{suggested_modifier}` modifier to have the register formatted as `{suggested_result}`", + )); + err.help(&format!( + "or use the `{default_modifier}` modifier to keep the default formatting of `{default_result}`", + )); + err.emit(); + }, + ); + } + } + + Some(asm_ty) + } + + pub fn check_asm(&self, asm: &hir::InlineAsm<'tcx>, enclosing_id: hir::HirId) { + let hir = self.tcx.hir(); + let enclosing_def_id = hir.local_def_id(enclosing_id).to_def_id(); + let target_features = self.tcx.asm_target_features(enclosing_def_id); + let Some(asm_arch) = self.tcx.sess.asm_arch else { + self.tcx.sess.delay_span_bug(DUMMY_SP, "target architecture does not support asm"); + return; + }; + for (idx, (op, op_sp)) in asm.operands.iter().enumerate() { + // Validate register classes against currently enabled target + // features. We check that at least one type is available for + // the enabled features. + // + // We ignore target feature requirements for clobbers: if the + // feature is disabled then the compiler doesn't care what we + // do with the registers. + // + // Note that this is only possible for explicit register + // operands, which cannot be used in the asm string. + if let Some(reg) = op.reg() { + // Some explicit registers cannot be used depending on the + // target. Reject those here. + if let InlineAsmRegOrRegClass::Reg(reg) = reg { + if let InlineAsmReg::Err = reg { + // `validate` will panic on `Err`, as an error must + // already have been reported. + continue; + } + if let Err(msg) = reg.validate( + asm_arch, + self.tcx.sess.relocation_model(), + &target_features, + &self.tcx.sess.target, + op.is_clobber(), + ) { + let msg = format!("cannot use register `{}`: {}", reg.name(), msg); + self.tcx.sess.struct_span_err(*op_sp, &msg).emit(); + continue; + } + } + + if !op.is_clobber() { + let mut missing_required_features = vec![]; + let reg_class = reg.reg_class(); + if let InlineAsmRegClass::Err = reg_class { + continue; + } + for &(_, feature) in reg_class.supported_types(asm_arch) { + match feature { + Some(feature) => { + if target_features.contains(&feature) { + missing_required_features.clear(); + break; + } else { + missing_required_features.push(feature); + } + } + None => { + missing_required_features.clear(); + break; + } + } + } + + // We are sorting primitive strs here and can use unstable sort here + missing_required_features.sort_unstable(); + missing_required_features.dedup(); + match &missing_required_features[..] { + [] => {} + [feature] => { + let msg = format!( + "register class `{}` requires the `{}` target feature", + reg_class.name(), + feature + ); + self.tcx.sess.struct_span_err(*op_sp, &msg).emit(); + // register isn't enabled, don't do more checks + continue; + } + features => { + let msg = format!( + "register class `{}` requires at least one of the following target features: {}", + reg_class.name(), + features + .iter() + .map(|f| f.as_str()) + .intersperse(", ") + .collect::(), + ); + self.tcx.sess.struct_span_err(*op_sp, &msg).emit(); + // register isn't enabled, don't do more checks + continue; + } + } + } + } + + match *op { + hir::InlineAsmOperand::In { reg, ref expr } => { + self.check_asm_operand_type( + idx, + reg, + expr, + asm.template, + true, + None, + &target_features, + ); + } + hir::InlineAsmOperand::Out { reg, late: _, ref expr } => { + if let Some(expr) = expr { + self.check_asm_operand_type( + idx, + reg, + expr, + asm.template, + false, + None, + &target_features, + ); + } + } + hir::InlineAsmOperand::InOut { reg, late: _, ref expr } => { + self.check_asm_operand_type( + idx, + reg, + expr, + asm.template, + false, + None, + &target_features, + ); + } + hir::InlineAsmOperand::SplitInOut { reg, late: _, ref in_expr, ref out_expr } => { + let in_ty = self.check_asm_operand_type( + idx, + reg, + in_expr, + asm.template, + true, + None, + &target_features, + ); + if let Some(out_expr) = out_expr { + self.check_asm_operand_type( + idx, + reg, + out_expr, + asm.template, + false, + Some((in_expr, in_ty)), + &target_features, + ); + } + } + // No special checking is needed for these: + // - Typeck has checked that Const operands are integers. + // - AST lowering guarantees that SymStatic points to a static. + hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymStatic { .. } => {} + // Check that sym actually points to a function. Later passes + // depend on this. + hir::InlineAsmOperand::SymFn { anon_const } => { + let ty = self.tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id); + match ty.kind() { + ty::Never | ty::Error(_) => {} + ty::FnDef(..) => {} + _ => { + let mut err = + self.tcx.sess.struct_span_err(*op_sp, "invalid `sym` operand"); + err.span_label( + self.tcx.hir().span(anon_const.body.hir_id), + &format!("is {} `{}`", ty.kind().article(), ty), + ); + err.help("`sym` operands must refer to either a function or a static"); + err.emit(); + } + }; + } + } + } + } +} diff --git a/compiler/rustc_typeck/src/check/method/confirm.rs b/compiler/rustc_typeck/src/check/method/confirm.rs new file mode 100644 index 000000000..2c89b63ae --- /dev/null +++ b/compiler/rustc_typeck/src/check/method/confirm.rs @@ -0,0 +1,582 @@ +use super::{probe, MethodCallee}; + +use crate::astconv::{AstConv, CreateSubstsForGenericArgsCtxt, IsMethodCall}; +use crate::check::{callee, FnCtxt}; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_hir::GenericArg; +use rustc_infer::infer::{self, InferOk}; +use rustc_middle::traits::{ObligationCauseCode, UnifyReceiverContext}; +use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast}; +use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; +use rustc_middle::ty::fold::TypeFoldable; +use rustc_middle::ty::subst::{self, Subst, SubstsRef}; +use rustc_middle::ty::{self, GenericParamDefKind, Ty}; +use rustc_span::Span; +use rustc_trait_selection::traits; + +use std::iter; +use std::ops::Deref; + +struct ConfirmContext<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + span: Span, + self_expr: &'tcx hir::Expr<'tcx>, + call_expr: &'tcx hir::Expr<'tcx>, +} + +impl<'a, 'tcx> Deref for ConfirmContext<'a, 'tcx> { + type Target = FnCtxt<'a, 'tcx>; + fn deref(&self) -> &Self::Target { + self.fcx + } +} + +#[derive(Debug)] +pub struct ConfirmResult<'tcx> { + pub callee: MethodCallee<'tcx>, + pub illegal_sized_bound: Option, +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn confirm_method( + &self, + span: Span, + self_expr: &'tcx hir::Expr<'tcx>, + call_expr: &'tcx hir::Expr<'tcx>, + unadjusted_self_ty: Ty<'tcx>, + pick: probe::Pick<'tcx>, + segment: &hir::PathSegment<'_>, + ) -> ConfirmResult<'tcx> { + debug!( + "confirm(unadjusted_self_ty={:?}, pick={:?}, generic_args={:?})", + unadjusted_self_ty, pick, segment.args, + ); + + let mut confirm_cx = ConfirmContext::new(self, span, self_expr, call_expr); + confirm_cx.confirm(unadjusted_self_ty, pick, segment) + } +} + +impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { + fn new( + fcx: &'a FnCtxt<'a, 'tcx>, + span: Span, + self_expr: &'tcx hir::Expr<'tcx>, + call_expr: &'tcx hir::Expr<'tcx>, + ) -> ConfirmContext<'a, 'tcx> { + ConfirmContext { fcx, span, self_expr, call_expr } + } + + fn confirm( + &mut self, + unadjusted_self_ty: Ty<'tcx>, + pick: probe::Pick<'tcx>, + segment: &hir::PathSegment<'_>, + ) -> ConfirmResult<'tcx> { + // Adjust the self expression the user provided and obtain the adjusted type. + let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick); + + // Create substitutions for the method's type parameters. + let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick); + let all_substs = self.instantiate_method_substs(&pick, segment, rcvr_substs); + + debug!("rcvr_substs={rcvr_substs:?}, all_substs={all_substs:?}"); + + // Create the final signature for the method, replacing late-bound regions. + let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs); + + // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that + // something which derefs to `Self` actually implements the trait and the caller + // wanted to make a static dispatch on it but forgot to import the trait. + // See test `src/test/ui/issue-35976.rs`. + // + // In that case, we'll error anyway, but we'll also re-run the search with all traits + // in scope, and if we find another method which can be used, we'll output an + // appropriate hint suggesting to import the trait. + let filler_substs = rcvr_substs + .extend_to(self.tcx, pick.item.def_id, |def, _| self.tcx.mk_param_from_def(def)); + let illegal_sized_bound = self.predicates_require_illegal_sized_bound( + &self.tcx.predicates_of(pick.item.def_id).instantiate(self.tcx, filler_substs), + ); + + // Unify the (adjusted) self type with what the method expects. + // + // SUBTLE: if we want good error messages, because of "guessing" while matching + // traits, no trait system method can be called before this point because they + // could alter our Self-type, except for normalizing the receiver from the + // signature (which is also done during probing). + let method_sig_rcvr = self.normalize_associated_types_in(self.span, method_sig.inputs()[0]); + debug!( + "confirm: self_ty={:?} method_sig_rcvr={:?} method_sig={:?} method_predicates={:?}", + self_ty, method_sig_rcvr, method_sig, method_predicates + ); + self.unify_receivers(self_ty, method_sig_rcvr, &pick, all_substs); + + let (method_sig, method_predicates) = + self.normalize_associated_types_in(self.span, (method_sig, method_predicates)); + let method_sig = ty::Binder::dummy(method_sig); + + // Make sure nobody calls `drop()` explicitly. + self.enforce_illegal_method_limitations(&pick); + + // Add any trait/regions obligations specified on the method's type parameters. + // We won't add these if we encountered an illegal sized bound, so that we can use + // a custom error in that case. + if illegal_sized_bound.is_none() { + self.add_obligations( + self.tcx.mk_fn_ptr(method_sig), + all_substs, + method_predicates, + pick.item.def_id, + ); + } + + // Create the final `MethodCallee`. + let callee = MethodCallee { + def_id: pick.item.def_id, + substs: all_substs, + sig: method_sig.skip_binder(), + }; + ConfirmResult { callee, illegal_sized_bound } + } + + /////////////////////////////////////////////////////////////////////////// + // ADJUSTMENTS + + fn adjust_self_ty( + &mut self, + unadjusted_self_ty: Ty<'tcx>, + pick: &probe::Pick<'tcx>, + ) -> Ty<'tcx> { + // Commit the autoderefs by calling `autoderef` again, but this + // time writing the results into the various typeck results. + let mut autoderef = + self.autoderef_overloaded_span(self.span, unadjusted_self_ty, self.call_expr.span); + let Some((ty, n)) = autoderef.nth(pick.autoderefs) else { + return self.tcx.ty_error_with_message( + rustc_span::DUMMY_SP, + &format!("failed autoderef {}", pick.autoderefs), + ); + }; + assert_eq!(n, pick.autoderefs); + + let mut adjustments = self.adjust_steps(&autoderef); + let mut target = self.structurally_resolved_type(autoderef.span(), ty); + + match pick.autoref_or_ptr_adjustment { + Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl, unsize }) => { + let region = self.next_region_var(infer::Autoref(self.span)); + // Type we're wrapping in a reference, used later for unsizing + let base_ty = target; + + target = self.tcx.mk_ref(region, ty::TypeAndMut { mutbl, ty: target }); + let mutbl = match mutbl { + hir::Mutability::Not => AutoBorrowMutability::Not, + hir::Mutability::Mut => AutoBorrowMutability::Mut { + // Method call receivers are the primary use case + // for two-phase borrows. + allow_two_phase_borrow: AllowTwoPhase::Yes, + }, + }; + adjustments.push(Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)), + target, + }); + + if unsize { + let unsized_ty = if let ty::Array(elem_ty, _) = base_ty.kind() { + self.tcx.mk_slice(*elem_ty) + } else { + bug!( + "AutorefOrPtrAdjustment's unsize flag should only be set for array ty, found {}", + base_ty + ) + }; + target = self + .tcx + .mk_ref(region, ty::TypeAndMut { mutbl: mutbl.into(), ty: unsized_ty }); + adjustments + .push(Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target }); + } + } + Some(probe::AutorefOrPtrAdjustment::ToConstPtr) => { + target = match target.kind() { + &ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => { + assert_eq!(mutbl, hir::Mutability::Mut); + self.tcx.mk_ptr(ty::TypeAndMut { mutbl: hir::Mutability::Not, ty }) + } + other => panic!("Cannot adjust receiver type {:?} to const ptr", other), + }; + + adjustments.push(Adjustment { + kind: Adjust::Pointer(PointerCast::MutToConstPointer), + target, + }); + } + None => {} + } + + self.register_predicates(autoderef.into_obligations()); + + // Write out the final adjustments. + self.apply_adjustments(self.self_expr, adjustments); + + target + } + + /// Returns a set of substitutions for the method *receiver* where all type and region + /// parameters are instantiated with fresh variables. This substitution does not include any + /// parameters declared on the method itself. + /// + /// Note that this substitution may include late-bound regions from the impl level. If so, + /// these are instantiated later in the `instantiate_method_sig` routine. + fn fresh_receiver_substs( + &mut self, + self_ty: Ty<'tcx>, + pick: &probe::Pick<'tcx>, + ) -> SubstsRef<'tcx> { + match pick.kind { + probe::InherentImplPick => { + let impl_def_id = pick.item.container_id(self.tcx); + assert!( + self.tcx.impl_trait_ref(impl_def_id).is_none(), + "impl {:?} is not an inherent impl", + impl_def_id + ); + self.fresh_substs_for_item(self.span, impl_def_id) + } + + probe::ObjectPick => { + let trait_def_id = pick.item.container_id(self.tcx); + self.extract_existential_trait_ref(self_ty, |this, object_ty, principal| { + // The object data has no entry for the Self + // Type. For the purposes of this method call, we + // substitute the object type itself. This + // wouldn't be a sound substitution in all cases, + // since each instance of the object type is a + // different existential and hence could match + // distinct types (e.g., if `Self` appeared as an + // argument type), but those cases have already + // been ruled out when we deemed the trait to be + // "object safe". + let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty); + let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id); + let upcast_trait_ref = + this.replace_bound_vars_with_fresh_vars(upcast_poly_trait_ref); + debug!( + "original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}", + original_poly_trait_ref, upcast_trait_ref, trait_def_id + ); + upcast_trait_ref.substs + }) + } + + probe::TraitPick => { + let trait_def_id = pick.item.container_id(self.tcx); + + // Make a trait reference `$0 : Trait<$1...$n>` + // consisting entirely of type variables. Later on in + // the process we will unify the transformed-self-type + // of the method with the actual type in order to + // unify some of these variables. + self.fresh_substs_for_item(self.span, trait_def_id) + } + + probe::WhereClausePick(poly_trait_ref) => { + // Where clauses can have bound regions in them. We need to instantiate + // those to convert from a poly-trait-ref to a trait-ref. + self.replace_bound_vars_with_fresh_vars(poly_trait_ref).substs + } + } + } + + fn extract_existential_trait_ref(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R + where + F: FnMut(&mut ConfirmContext<'a, 'tcx>, Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>) -> R, + { + // If we specified that this is an object method, then the + // self-type ought to be something that can be dereferenced to + // yield an object-type (e.g., `&Object` or `Box` + // etc). + + // FIXME: this feels, like, super dubious + self.fcx + .autoderef(self.span, self_ty) + .include_raw_pointers() + .find_map(|(ty, _)| match ty.kind() { + ty::Dynamic(data, ..) => Some(closure( + self, + ty, + data.principal().unwrap_or_else(|| { + span_bug!(self.span, "calling trait method on empty object?") + }), + )), + _ => None, + }) + .unwrap_or_else(|| { + span_bug!( + self.span, + "self-type `{}` for ObjectPick never dereferenced to an object", + self_ty + ) + }) + } + + fn instantiate_method_substs( + &mut self, + pick: &probe::Pick<'tcx>, + seg: &hir::PathSegment<'_>, + parent_substs: SubstsRef<'tcx>, + ) -> SubstsRef<'tcx> { + // Determine the values for the generic parameters of the method. + // If they were not explicitly supplied, just construct fresh + // variables. + let generics = self.tcx.generics_of(pick.item.def_id); + + let arg_count_correct = >::check_generic_arg_count_for_call( + self.tcx, + self.span, + pick.item.def_id, + generics, + seg, + IsMethodCall::Yes, + ); + + // Create subst for early-bound lifetime parameters, combining + // parameters from the type and those from the method. + assert_eq!(generics.parent_count, parent_substs.len()); + + struct MethodSubstsCtxt<'a, 'tcx> { + cfcx: &'a ConfirmContext<'a, 'tcx>, + pick: &'a probe::Pick<'tcx>, + seg: &'a hir::PathSegment<'a>, + } + impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for MethodSubstsCtxt<'a, 'tcx> { + fn args_for_def_id( + &mut self, + def_id: DefId, + ) -> (Option<&'a hir::GenericArgs<'a>>, bool) { + if def_id == self.pick.item.def_id { + if let Some(data) = self.seg.args { + return (Some(data), false); + } + } + (None, false) + } + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx> { + match (¶m.kind, arg) { + (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { + >::ast_region_to_region(self.cfcx.fcx, lt, Some(param)) + .into() + } + (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => { + self.cfcx.to_ty(ty).into() + } + (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => { + self.cfcx.const_arg_to_const(&ct.value, param.def_id).into() + } + (GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => { + self.cfcx.ty_infer(Some(param), inf.span).into() + } + (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => { + let tcx = self.cfcx.tcx(); + self.cfcx.ct_infer(tcx.type_of(param.def_id), Some(param), inf.span).into() + } + _ => unreachable!(), + } + } + + fn inferred_kind( + &mut self, + _substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + _infer_args: bool, + ) -> subst::GenericArg<'tcx> { + self.cfcx.var_for_def(self.cfcx.span, param) + } + } + >::create_substs_for_generic_args( + self.tcx, + pick.item.def_id, + parent_substs, + false, + None, + &arg_count_correct, + &mut MethodSubstsCtxt { cfcx: self, pick, seg }, + ) + } + + fn unify_receivers( + &mut self, + self_ty: Ty<'tcx>, + method_self_ty: Ty<'tcx>, + pick: &probe::Pick<'tcx>, + substs: SubstsRef<'tcx>, + ) { + debug!( + "unify_receivers: self_ty={:?} method_self_ty={:?} span={:?} pick={:?}", + self_ty, method_self_ty, self.span, pick + ); + let cause = self.cause( + self.span, + ObligationCauseCode::UnifyReceiver(Box::new(UnifyReceiverContext { + assoc_item: pick.item, + param_env: self.param_env, + substs, + })), + ); + match self.at(&cause, self.param_env).sup(method_self_ty, self_ty) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + } + Err(_) => { + span_bug!( + self.span, + "{} was a subtype of {} but now is not?", + self_ty, + method_self_ty + ); + } + } + } + + // NOTE: this returns the *unnormalized* predicates and method sig. Because of + // inference guessing, the predicates and method signature can't be normalized + // until we unify the `Self` type. + fn instantiate_method_sig( + &mut self, + pick: &probe::Pick<'tcx>, + all_substs: SubstsRef<'tcx>, + ) -> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) { + debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, all_substs); + + // Instantiate the bounds on the method with the + // type/early-bound-regions substitutions performed. There can + // be no late-bound regions appearing here. + let def_id = pick.item.def_id; + let method_predicates = self.tcx.predicates_of(def_id).instantiate(self.tcx, all_substs); + + debug!("method_predicates after subst = {:?}", method_predicates); + + let sig = self.tcx.bound_fn_sig(def_id); + + let sig = sig.subst(self.tcx, all_substs); + debug!("type scheme substituted, sig={:?}", sig); + + let sig = self.replace_bound_vars_with_fresh_vars(sig); + debug!("late-bound lifetimes from method instantiated, sig={:?}", sig); + + (sig, method_predicates) + } + + fn add_obligations( + &mut self, + fty: Ty<'tcx>, + all_substs: SubstsRef<'tcx>, + method_predicates: ty::InstantiatedPredicates<'tcx>, + def_id: DefId, + ) { + debug!( + "add_obligations: fty={:?} all_substs={:?} method_predicates={:?} def_id={:?}", + fty, all_substs, method_predicates, def_id + ); + + // FIXME: could replace with the following, but we already calculated `method_predicates`, + // so we just call `predicates_for_generics` directly to avoid redoing work. + // `self.add_required_obligations(self.span, def_id, &all_substs);` + for obligation in traits::predicates_for_generics( + traits::ObligationCause::new(self.span, self.body_id, traits::ItemObligation(def_id)), + self.param_env, + method_predicates, + ) { + self.register_predicate(obligation); + } + + // this is a projection from a trait reference, so we have to + // make sure that the trait reference inputs are well-formed. + self.add_wf_bounds(all_substs, self.call_expr); + + // the function type must also be well-formed (this is not + // implied by the substs being well-formed because of inherent + // impls and late-bound regions - see issue #28609). + self.register_wf_obligation(fty.into(), self.span, traits::WellFormed(None)); + } + + /////////////////////////////////////////////////////////////////////////// + // MISCELLANY + + fn predicates_require_illegal_sized_bound( + &self, + predicates: &ty::InstantiatedPredicates<'tcx>, + ) -> Option { + let sized_def_id = self.tcx.lang_items().sized_trait()?; + + traits::elaborate_predicates(self.tcx, predicates.predicates.iter().copied()) + // We don't care about regions here. + .filter_map(|obligation| match obligation.predicate.kind().skip_binder() { + ty::PredicateKind::Trait(trait_pred) if trait_pred.def_id() == sized_def_id => { + let span = iter::zip(&predicates.predicates, &predicates.spans) + .find_map( + |(p, span)| { + if *p == obligation.predicate { Some(*span) } else { None } + }, + ) + .unwrap_or(rustc_span::DUMMY_SP); + Some((trait_pred, span)) + } + _ => None, + }) + .find_map(|(trait_pred, span)| match trait_pred.self_ty().kind() { + ty::Dynamic(..) => Some(span), + _ => None, + }) + } + + fn enforce_illegal_method_limitations(&self, pick: &probe::Pick<'_>) { + // Disallow calls to the method `drop` defined in the `Drop` trait. + if let Some(trait_def_id) = pick.item.trait_container(self.tcx) { + callee::check_legal_trait_for_method_call( + self.tcx, + self.span, + Some(self.self_expr.span), + self.call_expr.span, + trait_def_id, + ) + } + } + + fn upcast( + &mut self, + source_trait_ref: ty::PolyTraitRef<'tcx>, + target_trait_def_id: DefId, + ) -> ty::PolyTraitRef<'tcx> { + let upcast_trait_refs = + traits::upcast_choices(self.tcx, source_trait_ref, target_trait_def_id); + + // must be exactly one trait ref or we'd get an ambig error etc + if upcast_trait_refs.len() != 1 { + span_bug!( + self.span, + "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`", + source_trait_ref, + target_trait_def_id, + upcast_trait_refs + ); + } + + upcast_trait_refs.into_iter().next().unwrap() + } + + fn replace_bound_vars_with_fresh_vars(&self, value: ty::Binder<'tcx, T>) -> T + where + T: TypeFoldable<'tcx> + Copy, + { + self.fcx.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, value) + } +} diff --git a/compiler/rustc_typeck/src/check/method/mod.rs b/compiler/rustc_typeck/src/check/method/mod.rs new file mode 100644 index 000000000..0e678c41f --- /dev/null +++ b/compiler/rustc_typeck/src/check/method/mod.rs @@ -0,0 +1,658 @@ +//! Method lookup: the secret sauce of Rust. See the [rustc dev guide] for more information. +//! +//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/method-lookup.html + +mod confirm; +mod prelude2021; +pub mod probe; +mod suggest; + +pub use self::suggest::SelfSource; +pub use self::MethodError::*; + +use crate::check::{Expectation, FnCtxt}; +use crate::ObligationCause; +use rustc_data_structures::sync::Lrc; +use rustc_errors::{Applicability, Diagnostic}; +use rustc_hir as hir; +use rustc_hir::def::{CtorOf, DefKind, Namespace}; +use rustc_hir::def_id::DefId; +use rustc_infer::infer::{self, InferOk}; +use rustc_middle::ty::subst::Subst; +use rustc_middle::ty::subst::{InternalSubsts, SubstsRef}; +use rustc_middle::ty::{ + self, AssocKind, DefIdTree, GenericParamDefKind, ProjectionPredicate, ProjectionTy, Term, + ToPredicate, Ty, TypeVisitable, +}; +use rustc_span::symbol::Ident; +use rustc_span::Span; +use rustc_trait_selection::traits; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; + +use self::probe::{IsSuggestion, ProbeScope}; + +pub fn provide(providers: &mut ty::query::Providers) { + probe::provide(providers); +} + +#[derive(Clone, Copy, Debug)] +pub struct MethodCallee<'tcx> { + /// Impl method ID, for inherent methods, or trait method ID, otherwise. + pub def_id: DefId, + pub substs: SubstsRef<'tcx>, + + /// Instantiated method signature, i.e., it has been + /// substituted, normalized, and has had late-bound + /// lifetimes replaced with inference variables. + pub sig: ty::FnSig<'tcx>, +} + +#[derive(Debug)] +pub enum MethodError<'tcx> { + // Did not find an applicable method, but we did find various near-misses that may work. + NoMatch(NoMatchData<'tcx>), + + // Multiple methods might apply. + Ambiguity(Vec), + + // Found an applicable method, but it is not visible. The third argument contains a list of + // not-in-scope traits which may work. + PrivateMatch(DefKind, DefId, Vec), + + // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have + // forgotten to import a trait. + IllegalSizedBound(Vec, bool, Span), + + // Found a match, but the return type is wrong + BadReturnType, +} + +// Contains a list of static methods that may apply, a list of unsatisfied trait predicates which +// could lead to matches if satisfied, and a list of not-in-scope traits which may work. +#[derive(Debug)] +pub struct NoMatchData<'tcx> { + pub static_candidates: Vec, + pub unsatisfied_predicates: + Vec<(ty::Predicate<'tcx>, Option>, Option>)>, + pub out_of_scope_traits: Vec, + pub lev_candidate: Option, + pub mode: probe::Mode, +} + +// A pared down enum describing just the places from which a method +// candidate can arise. Used for error reporting only. +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub enum CandidateSource { + Impl(DefId), + Trait(DefId /* trait id */), +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Determines whether the type `self_ty` supports a method name `method_name` or not. + #[instrument(level = "debug", skip(self))] + pub fn method_exists( + &self, + method_name: Ident, + self_ty: Ty<'tcx>, + call_expr_id: hir::HirId, + allow_private: bool, + ) -> bool { + let mode = probe::Mode::MethodCall; + match self.probe_for_name( + method_name.span, + mode, + method_name, + IsSuggestion(false), + self_ty, + call_expr_id, + ProbeScope::TraitsInScope, + ) { + Ok(..) => true, + Err(NoMatch(..)) => false, + Err(Ambiguity(..)) => true, + Err(PrivateMatch(..)) => allow_private, + Err(IllegalSizedBound(..)) => true, + Err(BadReturnType) => bug!("no return type expectations but got BadReturnType"), + } + } + + /// Adds a suggestion to call the given method to the provided diagnostic. + #[instrument(level = "debug", skip(self, err, call_expr))] + pub(crate) fn suggest_method_call( + &self, + err: &mut Diagnostic, + msg: &str, + method_name: Ident, + self_ty: Ty<'tcx>, + call_expr: &hir::Expr<'_>, + span: Option, + ) { + let params = self + .probe_for_name( + method_name.span, + probe::Mode::MethodCall, + method_name, + IsSuggestion(false), + self_ty, + call_expr.hir_id, + ProbeScope::TraitsInScope, + ) + .map(|pick| { + let sig = self.tcx.fn_sig(pick.item.def_id); + sig.inputs().skip_binder().len().saturating_sub(1) + }) + .unwrap_or(0); + + // Account for `foo.bar`; + let sugg_span = span.unwrap_or(call_expr.span).shrink_to_hi(); + let (suggestion, applicability) = ( + format!("({})", (0..params).map(|_| "_").collect::>().join(", ")), + if params > 0 { Applicability::HasPlaceholders } else { Applicability::MaybeIncorrect }, + ); + + err.span_suggestion_verbose(sugg_span, msg, suggestion, applicability); + } + + /// Performs method lookup. If lookup is successful, it will return the callee + /// and store an appropriate adjustment for the self-expr. In some cases it may + /// report an error (e.g., invoking the `drop` method). + /// + /// # Arguments + /// + /// Given a method call like `foo.bar::(a, b + 1, ...)`: + /// + /// * `self`: the surrounding `FnCtxt` (!) + /// * `self_ty`: the (unadjusted) type of the self expression (`foo`) + /// * `segment`: the name and generic arguments of the method (`bar::`) + /// * `span`: the span for the method call + /// * `call_expr`: the complete method call: (`foo.bar::(...)`) + /// * `self_expr`: the self expression (`foo`) + /// * `args`: the expressions of the arguments (`a, b + 1, ...`) + #[instrument(level = "debug", skip(self, call_expr, self_expr))] + pub fn lookup_method( + &self, + self_ty: Ty<'tcx>, + segment: &hir::PathSegment<'_>, + span: Span, + call_expr: &'tcx hir::Expr<'tcx>, + self_expr: &'tcx hir::Expr<'tcx>, + args: &'tcx [hir::Expr<'tcx>], + ) -> Result, MethodError<'tcx>> { + debug!( + "lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})", + segment.ident, self_ty, call_expr, self_expr + ); + + let pick = + self.lookup_probe(span, segment.ident, self_ty, call_expr, ProbeScope::TraitsInScope)?; + + self.lint_dot_call_from_2018(self_ty, segment, span, call_expr, self_expr, &pick, args); + + for import_id in &pick.import_ids { + debug!("used_trait_import: {:?}", import_id); + Lrc::get_mut(&mut self.typeck_results.borrow_mut().used_trait_imports) + .unwrap() + .insert(*import_id); + } + + self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span, None); + + let result = + self.confirm_method(span, self_expr, call_expr, self_ty, pick.clone(), segment); + debug!("result = {:?}", result); + + if let Some(span) = result.illegal_sized_bound { + let mut needs_mut = false; + if let ty::Ref(region, t_type, mutability) = self_ty.kind() { + let trait_type = self + .tcx + .mk_ref(*region, ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() }); + // We probe again to see if there might be a borrow mutability discrepancy. + match self.lookup_probe( + span, + segment.ident, + trait_type, + call_expr, + ProbeScope::TraitsInScope, + ) { + Ok(ref new_pick) if *new_pick != pick => { + needs_mut = true; + } + _ => {} + } + } + + // We probe again, taking all traits into account (not only those in scope). + let mut candidates = match self.lookup_probe( + span, + segment.ident, + self_ty, + call_expr, + ProbeScope::AllTraits, + ) { + // If we find a different result the caller probably forgot to import a trait. + Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container_id(self.tcx)], + Err(Ambiguity(ref sources)) => sources + .iter() + .filter_map(|source| { + match *source { + // Note: this cannot come from an inherent impl, + // because the first probing succeeded. + CandidateSource::Impl(def) => self.tcx.trait_id_of_impl(def), + CandidateSource::Trait(_) => None, + } + }) + .collect(), + _ => Vec::new(), + }; + candidates.retain(|candidate| *candidate != self.tcx.parent(result.callee.def_id)); + + return Err(IllegalSizedBound(candidates, needs_mut, span)); + } + + Ok(result.callee) + } + + #[instrument(level = "debug", skip(self, call_expr))] + pub fn lookup_probe( + &self, + span: Span, + method_name: Ident, + self_ty: Ty<'tcx>, + call_expr: &'tcx hir::Expr<'tcx>, + scope: ProbeScope, + ) -> probe::PickResult<'tcx> { + let mode = probe::Mode::MethodCall; + let self_ty = self.resolve_vars_if_possible(self_ty); + self.probe_for_name( + span, + mode, + method_name, + IsSuggestion(false), + self_ty, + call_expr.hir_id, + scope, + ) + } + + pub(super) fn obligation_for_method( + &self, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + opt_input_types: Option<&[Ty<'tcx>]>, + ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List>) + { + // Construct a trait-reference `self_ty : Trait` + let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| { + match param.kind { + GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {} + GenericParamDefKind::Type { .. } => { + if param.index == 0 { + return self_ty.into(); + } else if let Some(input_types) = opt_input_types { + return input_types[param.index as usize - 1].into(); + } + } + } + self.var_for_def(span, param) + }); + + let trait_ref = ty::TraitRef::new(trait_def_id, substs); + + // Construct an obligation + let poly_trait_ref = ty::Binder::dummy(trait_ref); + ( + traits::Obligation::misc( + span, + self.body_id, + self.param_env, + poly_trait_ref.without_const().to_predicate(self.tcx), + ), + substs, + ) + } + + pub(super) fn obligation_for_op_method( + &self, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + opt_input_type: Option>, + opt_input_expr: Option<&'tcx hir::Expr<'tcx>>, + expected: Expectation<'tcx>, + ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List>) + { + // Construct a trait-reference `self_ty : Trait` + let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| { + match param.kind { + GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {} + GenericParamDefKind::Type { .. } => { + if param.index == 0 { + return self_ty.into(); + } else if let Some(input_type) = opt_input_type { + return input_type.into(); + } + } + } + self.var_for_def(span, param) + }); + + let trait_ref = ty::TraitRef::new(trait_def_id, substs); + + // Construct an obligation + let poly_trait_ref = ty::Binder::dummy(trait_ref); + let opt_output_ty = + expected.only_has_type(self).and_then(|ty| (!ty.needs_infer()).then(|| ty)); + let opt_output_assoc_item = self.tcx.associated_items(trait_def_id).find_by_name_and_kind( + self.tcx, + Ident::from_str("Output"), + AssocKind::Type, + trait_def_id, + ); + let output_pred = + opt_output_ty.zip(opt_output_assoc_item).map(|(output_ty, output_assoc_item)| { + ty::Binder::dummy(ty::PredicateKind::Projection(ProjectionPredicate { + projection_ty: ProjectionTy { substs, item_def_id: output_assoc_item.def_id }, + term: Term::Ty(output_ty), + })) + .to_predicate(self.tcx) + }); + + ( + traits::Obligation::new( + traits::ObligationCause::new( + span, + self.body_id, + traits::BinOp { + rhs_span: opt_input_expr.map(|expr| expr.span), + is_lit: opt_input_expr + .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))), + output_pred, + }, + ), + self.param_env, + poly_trait_ref.without_const().to_predicate(self.tcx), + ), + substs, + ) + } + + /// `lookup_method_in_trait` is used for overloaded operators. + /// It does a very narrow slice of what the normal probe/confirm path does. + /// In particular, it doesn't really do any probing: it simply constructs + /// an obligation for a particular trait with the given self type and checks + /// whether that trait is implemented. + #[instrument(level = "debug", skip(self, span, opt_input_types))] + pub(super) fn lookup_method_in_trait( + &self, + span: Span, + m_name: Ident, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + opt_input_types: Option<&[Ty<'tcx>]>, + ) -> Option>> { + debug!( + "lookup_in_trait_adjusted(self_ty={:?}, m_name={}, trait_def_id={:?}, opt_input_types={:?})", + self_ty, m_name, trait_def_id, opt_input_types + ); + + let (obligation, substs) = + self.obligation_for_method(span, trait_def_id, self_ty, opt_input_types); + self.construct_obligation_for_trait( + span, + m_name, + trait_def_id, + obligation, + substs, + None, + false, + ) + } + + pub(super) fn lookup_op_method_in_trait( + &self, + span: Span, + m_name: Ident, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + opt_input_type: Option>, + opt_input_expr: Option<&'tcx hir::Expr<'tcx>>, + expected: Expectation<'tcx>, + ) -> Option>> { + let (obligation, substs) = self.obligation_for_op_method( + span, + trait_def_id, + self_ty, + opt_input_type, + opt_input_expr, + expected, + ); + self.construct_obligation_for_trait( + span, + m_name, + trait_def_id, + obligation, + substs, + opt_input_expr, + true, + ) + } + + // FIXME(#18741): it seems likely that we can consolidate some of this + // code with the other method-lookup code. In particular, the second half + // of this method is basically the same as confirmation. + fn construct_obligation_for_trait( + &self, + span: Span, + m_name: Ident, + trait_def_id: DefId, + obligation: traits::PredicateObligation<'tcx>, + substs: &'tcx ty::List>, + opt_input_expr: Option<&'tcx hir::Expr<'tcx>>, + is_op: bool, + ) -> Option>> { + debug!(?obligation); + + // Now we want to know if this can be matched + if !self.predicate_may_hold(&obligation) { + debug!("--> Cannot match obligation"); + // Cannot be matched, no such method resolution is possible. + return None; + } + + // Trait must have a method named `m_name` and it should not have + // type parameters or early-bound regions. + let tcx = self.tcx; + let Some(method_item) = self.associated_value(trait_def_id, m_name) else { + tcx.sess.delay_span_bug( + span, + "operator trait does not have corresponding operator method", + ); + return None; + }; + let def_id = method_item.def_id; + let generics = tcx.generics_of(def_id); + assert_eq!(generics.params.len(), 0); + + debug!("lookup_in_trait_adjusted: method_item={:?}", method_item); + let mut obligations = vec![]; + + // Instantiate late-bound regions and substitute the trait + // parameters into the method type to get the actual method type. + // + // N.B., instantiate late-bound regions first so that + // `instantiate_type_scheme` can normalize associated types that + // may reference those regions. + let fn_sig = tcx.bound_fn_sig(def_id); + let fn_sig = fn_sig.subst(self.tcx, substs); + let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, fn_sig); + + let InferOk { value, obligations: o } = if is_op { + self.normalize_op_associated_types_in_as_infer_ok(span, fn_sig, opt_input_expr) + } else { + self.normalize_associated_types_in_as_infer_ok(span, fn_sig) + }; + let fn_sig = { + obligations.extend(o); + value + }; + + // Register obligations for the parameters. This will include the + // `Self` parameter, which in turn has a bound of the main trait, + // so this also effectively registers `obligation` as well. (We + // used to register `obligation` explicitly, but that resulted in + // double error messages being reported.) + // + // Note that as the method comes from a trait, it should not have + // any late-bound regions appearing in its bounds. + let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs); + + let InferOk { value, obligations: o } = if is_op { + self.normalize_op_associated_types_in_as_infer_ok(span, bounds, opt_input_expr) + } else { + self.normalize_associated_types_in_as_infer_ok(span, bounds) + }; + let bounds = { + obligations.extend(o); + value + }; + + assert!(!bounds.has_escaping_bound_vars()); + + let cause = if is_op { + ObligationCause::new( + span, + self.body_id, + traits::BinOp { + rhs_span: opt_input_expr.map(|expr| expr.span), + is_lit: opt_input_expr + .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))), + output_pred: None, + }, + ) + } else { + traits::ObligationCause::misc(span, self.body_id) + }; + obligations.extend(traits::predicates_for_generics(cause.clone(), self.param_env, bounds)); + + // Also add an obligation for the method type being well-formed. + let method_ty = tcx.mk_fn_ptr(ty::Binder::dummy(fn_sig)); + debug!( + "lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}", + method_ty, obligation + ); + obligations.push(traits::Obligation::new( + cause, + self.param_env, + ty::Binder::dummy(ty::PredicateKind::WellFormed(method_ty.into())).to_predicate(tcx), + )); + + let callee = MethodCallee { def_id, substs, sig: fn_sig }; + + debug!("callee = {:?}", callee); + + Some(InferOk { obligations, value: callee }) + } + + /// Performs a [full-qualified function call] (formerly "universal function call") lookup. If + /// lookup is successful, it will return the type of definition and the [`DefId`] of the found + /// function definition. + /// + /// [full-qualified function call]: https://doc.rust-lang.org/reference/expressions/call-expr.html#disambiguating-function-calls + /// + /// # Arguments + /// + /// Given a function call like `Foo::bar::(...)`: + /// + /// * `self`: the surrounding `FnCtxt` (!) + /// * `span`: the span of the call, excluding arguments (`Foo::bar::`) + /// * `method_name`: the identifier of the function within the container type (`bar`) + /// * `self_ty`: the type to search within (`Foo`) + /// * `self_ty_span` the span for the type being searched within (span of `Foo`) + /// * `expr_id`: the [`hir::HirId`] of the expression composing the entire call + #[instrument(level = "debug", skip(self))] + pub fn resolve_fully_qualified_call( + &self, + span: Span, + method_name: Ident, + self_ty: Ty<'tcx>, + self_ty_span: Span, + expr_id: hir::HirId, + ) -> Result<(DefKind, DefId), MethodError<'tcx>> { + debug!( + "resolve_fully_qualified_call: method_name={:?} self_ty={:?} expr_id={:?}", + method_name, self_ty, expr_id, + ); + + let tcx = self.tcx; + + // Check if we have an enum variant. + if let ty::Adt(adt_def, _) = self_ty.kind() { + if adt_def.is_enum() { + let variant_def = adt_def + .variants() + .iter() + .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did())); + if let Some(variant_def) = variant_def { + // Braced variants generate unusable names in value namespace (reserved for + // possible future use), so variants resolved as associated items may refer to + // them as well. It's ok to use the variant's id as a ctor id since an + // error will be reported on any use of such resolution anyway. + let ctor_def_id = variant_def.ctor_def_id.unwrap_or(variant_def.def_id); + tcx.check_stability(ctor_def_id, Some(expr_id), span, Some(method_name.span)); + return Ok(( + DefKind::Ctor(CtorOf::Variant, variant_def.ctor_kind), + ctor_def_id, + )); + } + } + } + + let pick = self.probe_for_name( + span, + probe::Mode::Path, + method_name, + IsSuggestion(false), + self_ty, + expr_id, + ProbeScope::TraitsInScope, + )?; + + self.lint_fully_qualified_call_from_2018( + span, + method_name, + self_ty, + self_ty_span, + expr_id, + &pick, + ); + + debug!("resolve_fully_qualified_call: pick={:?}", pick); + { + let mut typeck_results = self.typeck_results.borrow_mut(); + let used_trait_imports = Lrc::get_mut(&mut typeck_results.used_trait_imports).unwrap(); + for import_id in pick.import_ids { + debug!("resolve_fully_qualified_call: used_trait_import: {:?}", import_id); + used_trait_imports.insert(import_id); + } + } + + let def_kind = pick.item.kind.as_def_kind(); + debug!( + "resolve_fully_qualified_call: def_kind={:?}, def_id={:?}", + def_kind, pick.item.def_id + ); + tcx.check_stability(pick.item.def_id, Some(expr_id), span, Some(method_name.span)); + Ok((def_kind, pick.item.def_id)) + } + + /// Finds item with name `item_name` defined in impl/trait `def_id` + /// and return it, or `None`, if no such item was defined there. + pub fn associated_value(&self, def_id: DefId, item_name: Ident) -> Option { + self.tcx + .associated_items(def_id) + .find_by_name_and_namespace(self.tcx, item_name, Namespace::ValueNS, def_id) + .copied() + } +} diff --git a/compiler/rustc_typeck/src/check/method/prelude2021.rs b/compiler/rustc_typeck/src/check/method/prelude2021.rs new file mode 100644 index 000000000..7c68d9304 --- /dev/null +++ b/compiler/rustc_typeck/src/check/method/prelude2021.rs @@ -0,0 +1,419 @@ +use hir::def_id::DefId; +use hir::HirId; +use hir::ItemKind; +use rustc_ast::Mutability; +use rustc_errors::Applicability; +use rustc_hir as hir; +use rustc_middle::ty::subst::InternalSubsts; +use rustc_middle::ty::{Adt, Array, Ref, Ty}; +use rustc_session::lint::builtin::RUST_2021_PRELUDE_COLLISIONS; +use rustc_span::symbol::kw::{Empty, Underscore}; +use rustc_span::symbol::{sym, Ident}; +use rustc_span::Span; +use rustc_trait_selection::infer::InferCtxtExt; + +use crate::check::{ + method::probe::{self, Pick}, + FnCtxt, +}; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub(super) fn lint_dot_call_from_2018( + &self, + self_ty: Ty<'tcx>, + segment: &hir::PathSegment<'_>, + span: Span, + call_expr: &'tcx hir::Expr<'tcx>, + self_expr: &'tcx hir::Expr<'tcx>, + pick: &Pick<'tcx>, + args: &'tcx [hir::Expr<'tcx>], + ) { + debug!( + "lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})", + segment.ident, self_ty, call_expr, self_expr + ); + + // Rust 2021 and later is already using the new prelude + if span.rust_2021() { + return; + } + + let prelude_or_array_lint = match segment.ident.name { + // `try_into` was added to the prelude in Rust 2021. + sym::try_into => RUST_2021_PRELUDE_COLLISIONS, + // `into_iter` wasn't added to the prelude, + // but `[T; N].into_iter()` doesn't resolve to IntoIterator::into_iter + // before Rust 2021, which results in the same problem. + // It is only a problem for arrays. + sym::into_iter if let Array(..) = self_ty.kind() => { + // In this case, it wasn't really a prelude addition that was the problem. + // Instead, the problem is that the array-into_iter hack will no longer apply in Rust 2021. + rustc_lint::ARRAY_INTO_ITER + } + _ => return, + }; + + // No need to lint if method came from std/core, as that will now be in the prelude + if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) { + return; + } + + if matches!(pick.kind, probe::PickKind::InherentImplPick | probe::PickKind::ObjectPick) { + // avoid repeatedly adding unneeded `&*`s + if pick.autoderefs == 1 + && matches!( + pick.autoref_or_ptr_adjustment, + Some(probe::AutorefOrPtrAdjustment::Autoref { .. }) + ) + && matches!(self_ty.kind(), Ref(..)) + { + return; + } + + // if it's an inherent `self` method (not `&self` or `&mut self`), it will take + // precedence over the `TryInto` impl, and thus won't break in 2021 edition + if pick.autoderefs == 0 && pick.autoref_or_ptr_adjustment.is_none() { + return; + } + + // Inherent impls only require not relying on autoref and autoderef in order to + // ensure that the trait implementation won't be used + self.tcx.struct_span_lint_hir( + prelude_or_array_lint, + self_expr.hir_id, + self_expr.span, + |lint| { + let sp = self_expr.span; + + let mut lint = lint.build(&format!( + "trait method `{}` will become ambiguous in Rust 2021", + segment.ident.name + )); + + let derefs = "*".repeat(pick.autoderefs); + + let autoref = match pick.autoref_or_ptr_adjustment { + Some(probe::AutorefOrPtrAdjustment::Autoref { + mutbl: Mutability::Mut, + .. + }) => "&mut ", + Some(probe::AutorefOrPtrAdjustment::Autoref { + mutbl: Mutability::Not, + .. + }) => "&", + Some(probe::AutorefOrPtrAdjustment::ToConstPtr) | None => "", + }; + if let Ok(self_expr) = self.sess().source_map().span_to_snippet(self_expr.span) + { + let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) = + pick.autoref_or_ptr_adjustment + { + format!("{}{} as *const _", derefs, self_expr) + } else { + format!("{}{}{}", autoref, derefs, self_expr) + }; + + lint.span_suggestion( + sp, + "disambiguate the method call", + format!("({})", self_adjusted), + Applicability::MachineApplicable, + ); + } else { + let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) = + pick.autoref_or_ptr_adjustment + { + format!("{}(...) as *const _", derefs) + } else { + format!("{}{}...", autoref, derefs) + }; + lint.span_help( + sp, + &format!("disambiguate the method call with `({})`", self_adjusted,), + ); + } + + lint.emit(); + }, + ); + } else { + // trait implementations require full disambiguation to not clash with the new prelude + // additions (i.e. convert from dot-call to fully-qualified call) + self.tcx.struct_span_lint_hir( + prelude_or_array_lint, + call_expr.hir_id, + call_expr.span, + |lint| { + let sp = call_expr.span; + let trait_name = self.trait_path_or_bare_name( + span, + call_expr.hir_id, + pick.item.container_id(self.tcx), + ); + + let mut lint = lint.build(&format!( + "trait method `{}` will become ambiguous in Rust 2021", + segment.ident.name + )); + + let (self_adjusted, precise) = self.adjust_expr(pick, self_expr, sp); + if precise { + let args = args + .iter() + .skip(1) + .map(|arg| { + let span = arg.span.find_ancestor_inside(sp).unwrap_or_default(); + format!( + ", {}", + self.sess().source_map().span_to_snippet(span).unwrap() + ) + }) + .collect::(); + + lint.span_suggestion( + sp, + "disambiguate the associated function", + format!( + "{}::{}{}({}{})", + trait_name, + segment.ident.name, + if let Some(args) = segment.args.as_ref().and_then(|args| self + .sess() + .source_map() + .span_to_snippet(args.span_ext) + .ok()) + { + // Keep turbofish. + format!("::{}", args) + } else { + String::new() + }, + self_adjusted, + args, + ), + Applicability::MachineApplicable, + ); + } else { + lint.span_help( + sp, + &format!( + "disambiguate the associated function with `{}::{}(...)`", + trait_name, segment.ident, + ), + ); + } + + lint.emit(); + }, + ); + } + } + + pub(super) fn lint_fully_qualified_call_from_2018( + &self, + span: Span, + method_name: Ident, + self_ty: Ty<'tcx>, + self_ty_span: Span, + expr_id: hir::HirId, + pick: &Pick<'tcx>, + ) { + // Rust 2021 and later is already using the new prelude + if span.rust_2021() { + return; + } + + // These are the fully qualified methods added to prelude in Rust 2021 + if !matches!(method_name.name, sym::try_into | sym::try_from | sym::from_iter) { + return; + } + + // No need to lint if method came from std/core, as that will now be in the prelude + if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) { + return; + } + + // For from_iter, check if the type actually implements FromIterator. + // If we know it does not, we don't need to warn. + if method_name.name == sym::from_iter { + if let Some(trait_def_id) = self.tcx.get_diagnostic_item(sym::FromIterator) { + if !self + .infcx + .type_implements_trait( + trait_def_id, + self_ty, + InternalSubsts::empty(), + self.param_env, + ) + .may_apply() + { + return; + } + } + } + + // No need to lint if this is an inherent method called on a specific type, like `Vec::foo(...)`, + // since such methods take precedence over trait methods. + if matches!(pick.kind, probe::PickKind::InherentImplPick) { + return; + } + + self.tcx.struct_span_lint_hir(RUST_2021_PRELUDE_COLLISIONS, expr_id, span, |lint| { + // "type" refers to either a type or, more likely, a trait from which + // the associated function or method is from. + let container_id = pick.item.container_id(self.tcx); + let trait_path = self.trait_path_or_bare_name(span, expr_id, container_id); + let trait_generics = self.tcx.generics_of(container_id); + + let trait_name = + if trait_generics.params.len() <= trait_generics.has_self as usize { + trait_path + } else { + let counts = trait_generics.own_counts(); + format!( + "{}<{}>", + trait_path, + std::iter::repeat("'_") + .take(counts.lifetimes) + .chain(std::iter::repeat("_").take( + counts.types + counts.consts - trait_generics.has_self as usize + )) + .collect::>() + .join(", ") + ) + }; + + let mut lint = lint.build(&format!( + "trait-associated function `{}` will become ambiguous in Rust 2021", + method_name.name + )); + + let mut self_ty_name = self_ty_span + .find_ancestor_inside(span) + .and_then(|span| self.sess().source_map().span_to_snippet(span).ok()) + .unwrap_or_else(|| self_ty.to_string()); + + // Get the number of generics the self type has (if an Adt) unless we can determine that + // the user has written the self type with generics already which we (naively) do by looking + // for a "<" in `self_ty_name`. + if !self_ty_name.contains('<') { + if let Adt(def, _) = self_ty.kind() { + let generics = self.tcx.generics_of(def.did()); + if !generics.params.is_empty() { + let counts = generics.own_counts(); + self_ty_name += &format!( + "<{}>", + std::iter::repeat("'_") + .take(counts.lifetimes) + .chain(std::iter::repeat("_").take(counts.types + counts.consts)) + .collect::>() + .join(", ") + ); + } + } + } + lint.span_suggestion( + span, + "disambiguate the associated function", + format!("<{} as {}>::{}", self_ty_name, trait_name, method_name.name,), + Applicability::MachineApplicable, + ); + + lint.emit(); + }); + } + + fn trait_path_or_bare_name( + &self, + span: Span, + expr_hir_id: HirId, + trait_def_id: DefId, + ) -> String { + self.trait_path(span, expr_hir_id, trait_def_id).unwrap_or_else(|| { + let key = self.tcx.def_key(trait_def_id); + format!("{}", key.disambiguated_data.data) + }) + } + + fn trait_path(&self, span: Span, expr_hir_id: HirId, trait_def_id: DefId) -> Option { + let applicable_traits = self.tcx.in_scope_traits(expr_hir_id)?; + let applicable_trait = applicable_traits.iter().find(|t| t.def_id == trait_def_id)?; + if applicable_trait.import_ids.is_empty() { + // The trait was declared within the module, we only need to use its name. + return None; + } + + let import_items: Vec<_> = applicable_trait + .import_ids + .iter() + .map(|&import_id| self.tcx.hir().expect_item(import_id)) + .collect(); + + // Find an identifier with which this trait was imported (note that `_` doesn't count). + let any_id = import_items + .iter() + .filter_map(|item| if item.ident.name != Underscore { Some(item.ident) } else { None }) + .next(); + if let Some(any_id) = any_id { + if any_id.name == Empty { + // Glob import, so just use its name. + return None; + } else { + return Some(format!("{}", any_id)); + } + } + + // All that is left is `_`! We need to use the full path. It doesn't matter which one we pick, + // so just take the first one. + match import_items[0].kind { + ItemKind::Use(path, _) => Some( + path.segments + .iter() + .map(|segment| segment.ident.to_string()) + .collect::>() + .join("::"), + ), + _ => { + span_bug!(span, "unexpected item kind, expected a use: {:?}", import_items[0].kind); + } + } + } + + /// Creates a string version of the `expr` that includes explicit adjustments. + /// Returns the string and also a bool indicating whether this is a *precise* + /// suggestion. + fn adjust_expr( + &self, + pick: &Pick<'tcx>, + expr: &hir::Expr<'tcx>, + outer: Span, + ) -> (String, bool) { + let derefs = "*".repeat(pick.autoderefs); + + let autoref = match pick.autoref_or_ptr_adjustment { + Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl: Mutability::Mut, .. }) => "&mut ", + Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl: Mutability::Not, .. }) => "&", + Some(probe::AutorefOrPtrAdjustment::ToConstPtr) | None => "", + }; + + let (expr_text, precise) = if let Some(expr_text) = expr + .span + .find_ancestor_inside(outer) + .and_then(|span| self.sess().source_map().span_to_snippet(span).ok()) + { + (expr_text, true) + } else { + ("(..)".to_string(), false) + }; + + let adjusted_text = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) = + pick.autoref_or_ptr_adjustment + { + format!("{}{} as *const _", derefs, expr_text) + } else { + format!("{}{}{}", autoref, derefs, expr_text) + }; + + (adjusted_text, precise) + } +} diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_typeck/src/check/method/probe.rs new file mode 100644 index 000000000..efe15fec7 --- /dev/null +++ b/compiler/rustc_typeck/src/check/method/probe.rs @@ -0,0 +1,1932 @@ +use super::suggest; +use super::CandidateSource; +use super::MethodError; +use super::NoMatchData; + +use crate::check::FnCtxt; +use crate::errors::MethodCallOnUnknownType; +use crate::hir::def::DefKind; +use crate::hir::def_id::DefId; + +use rustc_data_structures::fx::FxHashSet; +use rustc_errors::Applicability; +use rustc_hir as hir; +use rustc_hir::def::Namespace; +use rustc_infer::infer::canonical::OriginalQueryValues; +use rustc_infer::infer::canonical::{Canonical, QueryResponse}; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::{self, InferOk, TyCtxtInferExt}; +use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind}; +use rustc_middle::middle::stability; +use rustc_middle::ty::fast_reject::{simplify_type, TreatParams}; +use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef}; +use rustc_middle::ty::GenericParamDefKind; +use rustc_middle::ty::{self, ParamEnvAnd, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeVisitable}; +use rustc_session::lint; +use rustc_span::def_id::LocalDefId; +use rustc_span::lev_distance::{ + find_best_match_for_name_with_substrings, lev_distance_with_substrings, +}; +use rustc_span::symbol::sym; +use rustc_span::{symbol::Ident, Span, Symbol, DUMMY_SP}; +use rustc_trait_selection::autoderef::{self, Autoderef}; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; +use rustc_trait_selection::traits::query::method_autoderef::MethodAutoderefBadTy; +use rustc_trait_selection::traits::query::method_autoderef::{ + CandidateStep, MethodAutoderefStepsResult, +}; +use rustc_trait_selection::traits::query::CanonicalTyGoal; +use rustc_trait_selection::traits::{self, ObligationCause}; +use std::cmp::max; +use std::iter; +use std::mem; +use std::ops::Deref; + +use smallvec::{smallvec, SmallVec}; + +use self::CandidateKind::*; +pub use self::PickKind::*; + +/// Boolean flag used to indicate if this search is for a suggestion +/// or not. If true, we can allow ambiguity and so forth. +#[derive(Clone, Copy, Debug)] +pub struct IsSuggestion(pub bool); + +struct ProbeContext<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + span: Span, + mode: Mode, + method_name: Option, + return_type: Option>, + + /// This is the OriginalQueryValues for the steps queries + /// that are answered in steps. + orig_steps_var_values: OriginalQueryValues<'tcx>, + steps: &'tcx [CandidateStep<'tcx>], + + inherent_candidates: Vec>, + extension_candidates: Vec>, + impl_dups: FxHashSet, + + /// Collects near misses when the candidate functions are missing a `self` keyword and is only + /// used for error reporting + static_candidates: Vec, + + /// When probing for names, include names that are close to the + /// requested name (by Levensthein distance) + allow_similar_names: bool, + + /// Some(candidate) if there is a private candidate + private_candidate: Option<(DefKind, DefId)>, + + /// Collects near misses when trait bounds for type parameters are unsatisfied and is only used + /// for error reporting + unsatisfied_predicates: + Vec<(ty::Predicate<'tcx>, Option>, Option>)>, + + is_suggestion: IsSuggestion, + + scope_expr_id: hir::HirId, +} + +impl<'a, 'tcx> Deref for ProbeContext<'a, 'tcx> { + type Target = FnCtxt<'a, 'tcx>; + fn deref(&self) -> &Self::Target { + self.fcx + } +} + +#[derive(Debug, Clone)] +struct Candidate<'tcx> { + // Candidates are (I'm not quite sure, but they are mostly) basically + // some metadata on top of a `ty::AssocItem` (without substs). + // + // However, method probing wants to be able to evaluate the predicates + // for a function with the substs applied - for example, if a function + // has `where Self: Sized`, we don't want to consider it unless `Self` + // is actually `Sized`, and similarly, return-type suggestions want + // to consider the "actual" return type. + // + // The way this is handled is through `xform_self_ty`. It contains + // the receiver type of this candidate, but `xform_self_ty`, + // `xform_ret_ty` and `kind` (which contains the predicates) have the + // generic parameters of this candidate substituted with the *same set* + // of inference variables, which acts as some weird sort of "query". + // + // When we check out a candidate, we require `xform_self_ty` to be + // a subtype of the passed-in self-type, and this equates the type + // variables in the rest of the fields. + // + // For example, if we have this candidate: + // ``` + // trait Foo { + // fn foo(&self) where Self: Sized; + // } + // ``` + // + // Then `xform_self_ty` will be `&'erased ?X` and `kind` will contain + // the predicate `?X: Sized`, so if we are evaluating `Foo` for a + // the receiver `&T`, we'll do the subtyping which will make `?X` + // get the right value, then when we evaluate the predicate we'll check + // if `T: Sized`. + xform_self_ty: Ty<'tcx>, + xform_ret_ty: Option>, + item: ty::AssocItem, + kind: CandidateKind<'tcx>, + import_ids: SmallVec<[LocalDefId; 1]>, +} + +#[derive(Debug, Clone)] +enum CandidateKind<'tcx> { + InherentImplCandidate( + SubstsRef<'tcx>, + // Normalize obligations + Vec>, + ), + ObjectCandidate, + TraitCandidate(ty::TraitRef<'tcx>), + WhereClauseCandidate( + // Trait + ty::PolyTraitRef<'tcx>, + ), +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +enum ProbeResult { + NoMatch, + BadReturnType, + Match, +} + +/// When adjusting a receiver we often want to do one of +/// +/// - Add a `&` (or `&mut`), converting the receiver from `T` to `&T` (or `&mut T`) +/// - If the receiver has type `*mut T`, convert it to `*const T` +/// +/// This type tells us which one to do. +/// +/// Note that in principle we could do both at the same time. For example, when the receiver has +/// type `T`, we could autoref it to `&T`, then convert to `*const T`. Or, when it has type `*mut +/// T`, we could convert it to `*const T`, then autoref to `&*const T`. However, currently we do +/// (at most) one of these. Either the receiver has type `T` and we convert it to `&T` (or with +/// `mut`), or it has type `*mut T` and we convert it to `*const T`. +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum AutorefOrPtrAdjustment { + /// Receiver has type `T`, add `&` or `&mut` (it `T` is `mut`), and maybe also "unsize" it. + /// Unsizing is used to convert a `[T; N]` to `[T]`, which only makes sense when autorefing. + Autoref { + mutbl: hir::Mutability, + + /// Indicates that the source expression should be "unsized" to a target type. + /// This is special-cased for just arrays unsizing to slices. + unsize: bool, + }, + /// Receiver has type `*mut T`, convert to `*const T` + ToConstPtr, +} + +impl AutorefOrPtrAdjustment { + fn get_unsize(&self) -> bool { + match self { + AutorefOrPtrAdjustment::Autoref { mutbl: _, unsize } => *unsize, + AutorefOrPtrAdjustment::ToConstPtr => false, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct Pick<'tcx> { + pub item: ty::AssocItem, + pub kind: PickKind<'tcx>, + pub import_ids: SmallVec<[LocalDefId; 1]>, + + /// Indicates that the source expression should be autoderef'd N times + /// ```ignore (not-rust) + /// A = expr | *expr | **expr | ... + /// ``` + pub autoderefs: usize, + + /// Indicates that we want to add an autoref (and maybe also unsize it), or if the receiver is + /// `*mut T`, convert it to `*const T`. + pub autoref_or_ptr_adjustment: Option, + pub self_ty: Ty<'tcx>, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum PickKind<'tcx> { + InherentImplPick, + ObjectPick, + TraitPick, + WhereClausePick( + // Trait + ty::PolyTraitRef<'tcx>, + ), +} + +pub type PickResult<'tcx> = Result, MethodError<'tcx>>; + +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum Mode { + // An expression of the form `receiver.method_name(...)`. + // Autoderefs are performed on `receiver`, lookup is done based on the + // `self` argument of the method, and static methods aren't considered. + MethodCall, + // An expression of the form `Type::item` or `::item`. + // No autoderefs are performed, lookup is done based on the type each + // implementation is for, and static methods are included. + Path, +} + +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum ProbeScope { + // Assemble candidates coming only from traits in scope. + TraitsInScope, + + // Assemble candidates coming from all traits. + AllTraits, +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// This is used to offer suggestions to users. It returns methods + /// that could have been called which have the desired return + /// type. Some effort is made to rule out methods that, if called, + /// would result in an error (basically, the same criteria we + /// would use to decide if a method is a plausible fit for + /// ambiguity purposes). + #[instrument(level = "debug", skip(self, scope_expr_id))] + pub fn probe_for_return_type( + &self, + span: Span, + mode: Mode, + return_type: Ty<'tcx>, + self_ty: Ty<'tcx>, + scope_expr_id: hir::HirId, + ) -> Vec { + debug!( + "probe(self_ty={:?}, return_type={}, scope_expr_id={})", + self_ty, return_type, scope_expr_id + ); + let method_names = self + .probe_op( + span, + mode, + None, + Some(return_type), + IsSuggestion(true), + self_ty, + scope_expr_id, + ProbeScope::AllTraits, + |probe_cx| Ok(probe_cx.candidate_method_names()), + ) + .unwrap_or_default(); + method_names + .iter() + .flat_map(|&method_name| { + self.probe_op( + span, + mode, + Some(method_name), + Some(return_type), + IsSuggestion(true), + self_ty, + scope_expr_id, + ProbeScope::AllTraits, + |probe_cx| probe_cx.pick(), + ) + .ok() + .map(|pick| pick.item) + }) + .collect() + } + + #[instrument(level = "debug", skip(self, scope_expr_id))] + pub fn probe_for_name( + &self, + span: Span, + mode: Mode, + item_name: Ident, + is_suggestion: IsSuggestion, + self_ty: Ty<'tcx>, + scope_expr_id: hir::HirId, + scope: ProbeScope, + ) -> PickResult<'tcx> { + debug!( + "probe(self_ty={:?}, item_name={}, scope_expr_id={})", + self_ty, item_name, scope_expr_id + ); + self.probe_op( + span, + mode, + Some(item_name), + None, + is_suggestion, + self_ty, + scope_expr_id, + scope, + |probe_cx| probe_cx.pick(), + ) + } + + fn probe_op( + &'a self, + span: Span, + mode: Mode, + method_name: Option, + return_type: Option>, + is_suggestion: IsSuggestion, + self_ty: Ty<'tcx>, + scope_expr_id: hir::HirId, + scope: ProbeScope, + op: OP, + ) -> Result> + where + OP: FnOnce(ProbeContext<'a, 'tcx>) -> Result>, + { + let mut orig_values = OriginalQueryValues::default(); + let param_env_and_self_ty = self.canonicalize_query( + ParamEnvAnd { param_env: self.param_env, value: self_ty }, + &mut orig_values, + ); + + let steps = if mode == Mode::MethodCall { + self.tcx.method_autoderef_steps(param_env_and_self_ty) + } else { + self.probe(|_| { + // Mode::Path - the deref steps is "trivial". This turns + // our CanonicalQuery into a "trivial" QueryResponse. This + // is a bit inefficient, but I don't think that writing + // special handling for this "trivial case" is a good idea. + + let infcx = &self.infcx; + let (ParamEnvAnd { param_env: _, value: self_ty }, canonical_inference_vars) = + infcx.instantiate_canonical_with_fresh_inference_vars( + span, + ¶m_env_and_self_ty, + ); + debug!( + "probe_op: Mode::Path, param_env_and_self_ty={:?} self_ty={:?}", + param_env_and_self_ty, self_ty + ); + MethodAutoderefStepsResult { + steps: infcx.tcx.arena.alloc_from_iter([CandidateStep { + self_ty: self.make_query_response_ignoring_pending_obligations( + canonical_inference_vars, + self_ty, + ), + autoderefs: 0, + from_unsafe_deref: false, + unsize: false, + }]), + opt_bad_ty: None, + reached_recursion_limit: false, + } + }) + }; + + // If our autoderef loop had reached the recursion limit, + // report an overflow error, but continue going on with + // the truncated autoderef list. + if steps.reached_recursion_limit { + self.probe(|_| { + let ty = &steps + .steps + .last() + .unwrap_or_else(|| span_bug!(span, "reached the recursion limit in 0 steps?")) + .self_ty; + let ty = self + .probe_instantiate_query_response(span, &orig_values, ty) + .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty)); + autoderef::report_autoderef_recursion_limit_error(self.tcx, span, ty.value); + }); + } + + // If we encountered an `_` type or an error type during autoderef, this is + // ambiguous. + if let Some(bad_ty) = &steps.opt_bad_ty { + if is_suggestion.0 { + // Ambiguity was encountered during a suggestion. Just keep going. + debug!("ProbeContext: encountered ambiguity in suggestion"); + } else if bad_ty.reached_raw_pointer && !self.tcx.features().arbitrary_self_types { + // this case used to be allowed by the compiler, + // so we do a future-compat lint here for the 2015 edition + // (see https://github.com/rust-lang/rust/issues/46906) + if self.tcx.sess.rust_2018() { + self.tcx.sess.emit_err(MethodCallOnUnknownType { span }); + } else { + self.tcx.struct_span_lint_hir( + lint::builtin::TYVAR_BEHIND_RAW_POINTER, + scope_expr_id, + span, + |lint| { + lint.build("type annotations needed").emit(); + }, + ); + } + } else { + // Encountered a real ambiguity, so abort the lookup. If `ty` is not + // an `Err`, report the right "type annotations needed" error pointing + // to it. + let ty = &bad_ty.ty; + let ty = self + .probe_instantiate_query_response(span, &orig_values, ty) + .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty)); + let ty = self.structurally_resolved_type(span, ty.value); + assert!(matches!(ty.kind(), ty::Error(_))); + return Err(MethodError::NoMatch(NoMatchData { + static_candidates: Vec::new(), + unsatisfied_predicates: Vec::new(), + out_of_scope_traits: Vec::new(), + lev_candidate: None, + mode, + })); + } + } + + debug!("ProbeContext: steps for self_ty={:?} are {:?}", self_ty, steps); + + // this creates one big transaction so that all type variables etc + // that we create during the probe process are removed later + self.probe(|_| { + let mut probe_cx = ProbeContext::new( + self, + span, + mode, + method_name, + return_type, + orig_values, + steps.steps, + is_suggestion, + scope_expr_id, + ); + + probe_cx.assemble_inherent_candidates(); + match scope { + ProbeScope::TraitsInScope => { + probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id) + } + ProbeScope::AllTraits => probe_cx.assemble_extension_candidates_for_all_traits(), + }; + op(probe_cx) + }) + } +} + +pub fn provide(providers: &mut ty::query::Providers) { + providers.method_autoderef_steps = method_autoderef_steps; +} + +fn method_autoderef_steps<'tcx>( + tcx: TyCtxt<'tcx>, + goal: CanonicalTyGoal<'tcx>, +) -> MethodAutoderefStepsResult<'tcx> { + debug!("method_autoderef_steps({:?})", goal); + + tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, &goal, |ref infcx, goal, inference_vars| { + let ParamEnvAnd { param_env, value: self_ty } = goal; + + let mut autoderef = + Autoderef::new(infcx, param_env, hir::CRATE_HIR_ID, DUMMY_SP, self_ty, DUMMY_SP) + .include_raw_pointers() + .silence_errors(); + let mut reached_raw_pointer = false; + let mut steps: Vec<_> = autoderef + .by_ref() + .map(|(ty, d)| { + let step = CandidateStep { + self_ty: infcx.make_query_response_ignoring_pending_obligations( + inference_vars.clone(), + ty, + ), + autoderefs: d, + from_unsafe_deref: reached_raw_pointer, + unsize: false, + }; + if let ty::RawPtr(_) = ty.kind() { + // all the subsequent steps will be from_unsafe_deref + reached_raw_pointer = true; + } + step + }) + .collect(); + + let final_ty = autoderef.final_ty(true); + let opt_bad_ty = match final_ty.kind() { + ty::Infer(ty::TyVar(_)) | ty::Error(_) => Some(MethodAutoderefBadTy { + reached_raw_pointer, + ty: infcx + .make_query_response_ignoring_pending_obligations(inference_vars, final_ty), + }), + ty::Array(elem_ty, _) => { + let dereferences = steps.len() - 1; + + steps.push(CandidateStep { + self_ty: infcx.make_query_response_ignoring_pending_obligations( + inference_vars, + infcx.tcx.mk_slice(*elem_ty), + ), + autoderefs: dereferences, + // this could be from an unsafe deref if we had + // a *mut/const [T; N] + from_unsafe_deref: reached_raw_pointer, + unsize: true, + }); + + None + } + _ => None, + }; + + debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty); + + MethodAutoderefStepsResult { + steps: tcx.arena.alloc_from_iter(steps), + opt_bad_ty: opt_bad_ty.map(|ty| &*tcx.arena.alloc(ty)), + reached_recursion_limit: autoderef.reached_recursion_limit(), + } + }) +} + +impl<'a, 'tcx> ProbeContext<'a, 'tcx> { + fn new( + fcx: &'a FnCtxt<'a, 'tcx>, + span: Span, + mode: Mode, + method_name: Option, + return_type: Option>, + orig_steps_var_values: OriginalQueryValues<'tcx>, + steps: &'tcx [CandidateStep<'tcx>], + is_suggestion: IsSuggestion, + scope_expr_id: hir::HirId, + ) -> ProbeContext<'a, 'tcx> { + ProbeContext { + fcx, + span, + mode, + method_name, + return_type, + inherent_candidates: Vec::new(), + extension_candidates: Vec::new(), + impl_dups: FxHashSet::default(), + orig_steps_var_values, + steps, + static_candidates: Vec::new(), + allow_similar_names: false, + private_candidate: None, + unsatisfied_predicates: Vec::new(), + is_suggestion, + scope_expr_id, + } + } + + fn reset(&mut self) { + self.inherent_candidates.clear(); + self.extension_candidates.clear(); + self.impl_dups.clear(); + self.static_candidates.clear(); + self.private_candidate = None; + } + + /////////////////////////////////////////////////////////////////////////// + // CANDIDATE ASSEMBLY + + fn push_candidate(&mut self, candidate: Candidate<'tcx>, is_inherent: bool) { + let is_accessible = if let Some(name) = self.method_name { + let item = candidate.item; + let def_scope = self + .tcx + .adjust_ident_and_get_scope(name, item.container_id(self.tcx), self.body_id) + .1; + item.visibility(self.tcx).is_accessible_from(def_scope, self.tcx) + } else { + true + }; + if is_accessible { + if is_inherent { + self.inherent_candidates.push(candidate); + } else { + self.extension_candidates.push(candidate); + } + } else if self.private_candidate.is_none() { + self.private_candidate = + Some((candidate.item.kind.as_def_kind(), candidate.item.def_id)); + } + } + + fn assemble_inherent_candidates(&mut self) { + for step in self.steps.iter() { + self.assemble_probe(&step.self_ty); + } + } + + fn assemble_probe(&mut self, self_ty: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>) { + debug!("assemble_probe: self_ty={:?}", self_ty); + let raw_self_ty = self_ty.value.value; + match *raw_self_ty.kind() { + ty::Dynamic(data, ..) if let Some(p) = data.principal() => { + // Subtle: we can't use `instantiate_query_response` here: using it will + // commit to all of the type equalities assumed by inference going through + // autoderef (see the `method-probe-no-guessing` test). + // + // However, in this code, it is OK if we end up with an object type that is + // "more general" than the object type that we are evaluating. For *every* + // object type `MY_OBJECT`, a function call that goes through a trait-ref + // of the form `::func` is a valid + // `ObjectCandidate`, and it should be discoverable "exactly" through one + // of the iterations in the autoderef loop, so there is no problem with it + // being discoverable in another one of these iterations. + // + // Using `instantiate_canonical_with_fresh_inference_vars` on our + // `Canonical>>` and then *throwing away* the + // `CanonicalVarValues` will exactly give us such a generalization - it + // will still match the original object type, but it won't pollute our + // type variables in any form, so just do that! + let (QueryResponse { value: generalized_self_ty, .. }, _ignored_var_values) = + self.fcx + .instantiate_canonical_with_fresh_inference_vars(self.span, self_ty); + + self.assemble_inherent_candidates_from_object(generalized_self_ty); + self.assemble_inherent_impl_candidates_for_type(p.def_id()); + if self.tcx.has_attr(p.def_id(), sym::rustc_has_incoherent_inherent_impls) { + self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty); + } + } + ty::Adt(def, _) => { + let def_id = def.did(); + self.assemble_inherent_impl_candidates_for_type(def_id); + if self.tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) { + self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty); + } + } + ty::Foreign(did) => { + self.assemble_inherent_impl_candidates_for_type(did); + if self.tcx.has_attr(did, sym::rustc_has_incoherent_inherent_impls) { + self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty); + } + } + ty::Param(p) => { + self.assemble_inherent_candidates_from_param(p); + } + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Str + | ty::Array(..) + | ty::Slice(_) + | ty::RawPtr(_) + | ty::Ref(..) + | ty::Never + | ty::Tuple(..) => self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty), + _ => {} + } + } + + fn assemble_inherent_candidates_for_incoherent_ty(&mut self, self_ty: Ty<'tcx>) { + let Some(simp) = simplify_type(self.tcx, self_ty, TreatParams::AsInfer) else { + bug!("unexpected incoherent type: {:?}", self_ty) + }; + for &impl_def_id in self.tcx.incoherent_impls(simp) { + self.assemble_inherent_impl_probe(impl_def_id); + } + } + + fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) { + let impl_def_ids = self.tcx.at(self.span).inherent_impls(def_id); + for &impl_def_id in impl_def_ids.iter() { + self.assemble_inherent_impl_probe(impl_def_id); + } + } + + fn assemble_inherent_impl_probe(&mut self, impl_def_id: DefId) { + if !self.impl_dups.insert(impl_def_id) { + return; // already visited + } + + debug!("assemble_inherent_impl_probe {:?}", impl_def_id); + + for item in self.impl_or_trait_item(impl_def_id) { + if !self.has_applicable_self(&item) { + // No receiver declared. Not a candidate. + self.record_static_candidate(CandidateSource::Impl(impl_def_id)); + continue; + } + + let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id); + let impl_ty = impl_ty.subst(self.tcx, impl_substs); + + debug!("impl_ty: {:?}", impl_ty); + + // Determine the receiver type that the method itself expects. + let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(&item, impl_ty, impl_substs); + debug!("xform_self_ty: {:?}, xform_ret_ty: {:?}", xform_self_ty, xform_ret_ty); + + // We can't use normalize_associated_types_in as it will pollute the + // fcx's fulfillment context after this probe is over. + // Note: we only normalize `xform_self_ty` here since the normalization + // of the return type can lead to inference results that prohibit + // valid candidates from being found, see issue #85671 + // FIXME Postponing the normalization of the return type likely only hides a deeper bug, + // which might be caused by the `param_env` itself. The clauses of the `param_env` + // maybe shouldn't include `Param`s, but rather fresh variables or be canonicalized, + // see issue #89650 + let cause = traits::ObligationCause::misc(self.span, self.body_id); + let selcx = &mut traits::SelectionContext::new(self.fcx); + let traits::Normalized { value: xform_self_ty, obligations } = + traits::normalize(selcx, self.param_env, cause, xform_self_ty); + debug!( + "assemble_inherent_impl_probe after normalization: xform_self_ty = {:?}/{:?}", + xform_self_ty, xform_ret_ty + ); + + self.push_candidate( + Candidate { + xform_self_ty, + xform_ret_ty, + item, + kind: InherentImplCandidate(impl_substs, obligations), + import_ids: smallvec![], + }, + true, + ); + } + } + + fn assemble_inherent_candidates_from_object(&mut self, self_ty: Ty<'tcx>) { + debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty); + + let principal = match self_ty.kind() { + ty::Dynamic(ref data, ..) => Some(data), + _ => None, + } + .and_then(|data| data.principal()) + .unwrap_or_else(|| { + span_bug!( + self.span, + "non-object {:?} in assemble_inherent_candidates_from_object", + self_ty + ) + }); + + // It is illegal to invoke a method on a trait instance that refers to + // the `Self` type. An [`ObjectSafetyViolation::SupertraitSelf`] error + // will be reported by `object_safety.rs` if the method refers to the + // `Self` type anywhere other than the receiver. Here, we use a + // substitution that replaces `Self` with the object type itself. Hence, + // a `&self` method will wind up with an argument type like `&dyn Trait`. + let trait_ref = principal.with_self_ty(self.tcx, self_ty); + self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| { + let new_trait_ref = this.erase_late_bound_regions(new_trait_ref); + + let (xform_self_ty, xform_ret_ty) = + this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); + this.push_candidate( + Candidate { + xform_self_ty, + xform_ret_ty, + item, + kind: ObjectCandidate, + import_ids: smallvec![], + }, + true, + ); + }); + } + + fn assemble_inherent_candidates_from_param(&mut self, param_ty: ty::ParamTy) { + // FIXME: do we want to commit to this behavior for param bounds? + debug!("assemble_inherent_candidates_from_param(param_ty={:?})", param_ty); + + let bounds = self.param_env.caller_bounds().iter().filter_map(|predicate| { + let bound_predicate = predicate.kind(); + match bound_predicate.skip_binder() { + ty::PredicateKind::Trait(trait_predicate) => { + match *trait_predicate.trait_ref.self_ty().kind() { + ty::Param(p) if p == param_ty => { + Some(bound_predicate.rebind(trait_predicate.trait_ref)) + } + _ => None, + } + } + ty::PredicateKind::Subtype(..) + | ty::PredicateKind::Coerce(..) + | ty::PredicateKind::Projection(..) + | ty::PredicateKind::RegionOutlives(..) + | ty::PredicateKind::WellFormed(..) + | ty::PredicateKind::ObjectSafe(..) + | ty::PredicateKind::ClosureKind(..) + | ty::PredicateKind::TypeOutlives(..) + | ty::PredicateKind::ConstEvaluatable(..) + | ty::PredicateKind::ConstEquate(..) + | ty::PredicateKind::TypeWellFormedFromEnv(..) => None, + } + }); + + self.elaborate_bounds(bounds, |this, poly_trait_ref, item| { + let trait_ref = this.erase_late_bound_regions(poly_trait_ref); + + let (xform_self_ty, xform_ret_ty) = + this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs); + + // Because this trait derives from a where-clause, it + // should not contain any inference variables or other + // artifacts. This means it is safe to put into the + // `WhereClauseCandidate` and (eventually) into the + // `WhereClausePick`. + assert!(!trait_ref.substs.needs_infer()); + + this.push_candidate( + Candidate { + xform_self_ty, + xform_ret_ty, + item, + kind: WhereClauseCandidate(poly_trait_ref), + import_ids: smallvec![], + }, + true, + ); + }); + } + + // Do a search through a list of bounds, using a callback to actually + // create the candidates. + fn elaborate_bounds( + &mut self, + bounds: impl Iterator>, + mut mk_cand: F, + ) where + F: for<'b> FnMut(&mut ProbeContext<'b, 'tcx>, ty::PolyTraitRef<'tcx>, ty::AssocItem), + { + let tcx = self.tcx; + for bound_trait_ref in traits::transitive_bounds(tcx, bounds) { + debug!("elaborate_bounds(bound_trait_ref={:?})", bound_trait_ref); + for item in self.impl_or_trait_item(bound_trait_ref.def_id()) { + if !self.has_applicable_self(&item) { + self.record_static_candidate(CandidateSource::Trait(bound_trait_ref.def_id())); + } else { + mk_cand(self, bound_trait_ref, item); + } + } + } + } + + fn assemble_extension_candidates_for_traits_in_scope(&mut self, expr_hir_id: hir::HirId) { + let mut duplicates = FxHashSet::default(); + let opt_applicable_traits = self.tcx.in_scope_traits(expr_hir_id); + if let Some(applicable_traits) = opt_applicable_traits { + for trait_candidate in applicable_traits.iter() { + let trait_did = trait_candidate.def_id; + if duplicates.insert(trait_did) { + self.assemble_extension_candidates_for_trait( + &trait_candidate.import_ids, + trait_did, + ); + } + } + } + } + + fn assemble_extension_candidates_for_all_traits(&mut self) { + let mut duplicates = FxHashSet::default(); + for trait_info in suggest::all_traits(self.tcx) { + if duplicates.insert(trait_info.def_id) { + self.assemble_extension_candidates_for_trait(&smallvec![], trait_info.def_id); + } + } + } + + pub fn matches_return_type( + &self, + method: &ty::AssocItem, + self_ty: Option>, + expected: Ty<'tcx>, + ) -> bool { + match method.kind { + ty::AssocKind::Fn => { + let fty = self.tcx.bound_fn_sig(method.def_id); + self.probe(|_| { + let substs = self.fresh_substs_for_item(self.span, method.def_id); + let fty = fty.subst(self.tcx, substs); + let fty = + self.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, fty); + + if let Some(self_ty) = self_ty { + if self + .at(&ObligationCause::dummy(), self.param_env) + .sup(fty.inputs()[0], self_ty) + .is_err() + { + return false; + } + } + self.can_sub(self.param_env, fty.output(), expected).is_ok() + }) + } + _ => false, + } + } + + fn assemble_extension_candidates_for_trait( + &mut self, + import_ids: &SmallVec<[LocalDefId; 1]>, + trait_def_id: DefId, + ) { + debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id); + let trait_substs = self.fresh_item_substs(trait_def_id); + let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs); + + if self.tcx.is_trait_alias(trait_def_id) { + // For trait aliases, assume all supertraits are relevant. + let bounds = iter::once(ty::Binder::dummy(trait_ref)); + self.elaborate_bounds(bounds, |this, new_trait_ref, item| { + let new_trait_ref = this.erase_late_bound_regions(new_trait_ref); + + let (xform_self_ty, xform_ret_ty) = + this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); + this.push_candidate( + Candidate { + xform_self_ty, + xform_ret_ty, + item, + import_ids: import_ids.clone(), + kind: TraitCandidate(new_trait_ref), + }, + false, + ); + }); + } else { + debug_assert!(self.tcx.is_trait(trait_def_id)); + for item in self.impl_or_trait_item(trait_def_id) { + // Check whether `trait_def_id` defines a method with suitable name. + if !self.has_applicable_self(&item) { + debug!("method has inapplicable self"); + self.record_static_candidate(CandidateSource::Trait(trait_def_id)); + continue; + } + + let (xform_self_ty, xform_ret_ty) = + self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs); + self.push_candidate( + Candidate { + xform_self_ty, + xform_ret_ty, + item, + import_ids: import_ids.clone(), + kind: TraitCandidate(trait_ref), + }, + false, + ); + } + } + } + + fn candidate_method_names(&self) -> Vec { + let mut set = FxHashSet::default(); + let mut names: Vec<_> = self + .inherent_candidates + .iter() + .chain(&self.extension_candidates) + .filter(|candidate| { + if let Some(return_ty) = self.return_type { + self.matches_return_type(&candidate.item, None, return_ty) + } else { + true + } + }) + .map(|candidate| candidate.item.ident(self.tcx)) + .filter(|&name| set.insert(name)) + .collect(); + + // Sort them by the name so we have a stable result. + names.sort_by(|a, b| a.as_str().partial_cmp(b.as_str()).unwrap()); + names + } + + /////////////////////////////////////////////////////////////////////////// + // THE ACTUAL SEARCH + + fn pick(mut self) -> PickResult<'tcx> { + assert!(self.method_name.is_some()); + + if let Some(r) = self.pick_core() { + return r; + } + + debug!("pick: actual search failed, assemble diagnostics"); + + let static_candidates = mem::take(&mut self.static_candidates); + let private_candidate = self.private_candidate.take(); + let unsatisfied_predicates = mem::take(&mut self.unsatisfied_predicates); + + // things failed, so lets look at all traits, for diagnostic purposes now: + self.reset(); + + let span = self.span; + let tcx = self.tcx; + + self.assemble_extension_candidates_for_all_traits(); + + let out_of_scope_traits = match self.pick_core() { + Some(Ok(p)) => vec![p.item.container_id(self.tcx)], + //Some(Ok(p)) => p.iter().map(|p| p.item.container().id()).collect(), + Some(Err(MethodError::Ambiguity(v))) => v + .into_iter() + .map(|source| match source { + CandidateSource::Trait(id) => id, + CandidateSource::Impl(impl_id) => match tcx.trait_id_of_impl(impl_id) { + Some(id) => id, + None => span_bug!(span, "found inherent method when looking at traits"), + }, + }) + .collect(), + Some(Err(MethodError::NoMatch(NoMatchData { + out_of_scope_traits: others, .. + }))) => { + assert!(others.is_empty()); + vec![] + } + _ => vec![], + }; + + if let Some((kind, def_id)) = private_candidate { + return Err(MethodError::PrivateMatch(kind, def_id, out_of_scope_traits)); + } + let lev_candidate = self.probe_for_lev_candidate()?; + + Err(MethodError::NoMatch(NoMatchData { + static_candidates, + unsatisfied_predicates, + out_of_scope_traits, + lev_candidate, + mode: self.mode, + })) + } + + fn pick_core(&mut self) -> Option> { + let mut unstable_candidates = Vec::new(); + let pick = self.pick_all_method(Some(&mut unstable_candidates)); + + // In this case unstable picking is done by `pick_method`. + if !self.tcx.sess.opts.unstable_opts.pick_stable_methods_before_any_unstable { + return pick; + } + + match pick { + // Emit a lint if there are unstable candidates alongside the stable ones. + // + // We suppress warning if we're picking the method only because it is a + // suggestion. + Some(Ok(ref p)) if !self.is_suggestion.0 && !unstable_candidates.is_empty() => { + self.emit_unstable_name_collision_hint(p, &unstable_candidates); + pick + } + Some(_) => pick, + None => self.pick_all_method(None), + } + } + + fn pick_all_method( + &mut self, + mut unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>, + ) -> Option> { + let steps = self.steps.clone(); + steps + .iter() + .filter(|step| { + debug!("pick_all_method: step={:?}", step); + // skip types that are from a type error or that would require dereferencing + // a raw pointer + !step.self_ty.references_error() && !step.from_unsafe_deref + }) + .flat_map(|step| { + let InferOk { value: self_ty, obligations: _ } = self + .fcx + .probe_instantiate_query_response( + self.span, + &self.orig_steps_var_values, + &step.self_ty, + ) + .unwrap_or_else(|_| { + span_bug!(self.span, "{:?} was applicable but now isn't?", step.self_ty) + }); + self.pick_by_value_method(step, self_ty, unstable_candidates.as_deref_mut()) + .or_else(|| { + self.pick_autorefd_method( + step, + self_ty, + hir::Mutability::Not, + unstable_candidates.as_deref_mut(), + ) + .or_else(|| { + self.pick_autorefd_method( + step, + self_ty, + hir::Mutability::Mut, + unstable_candidates.as_deref_mut(), + ) + }) + .or_else(|| { + self.pick_const_ptr_method( + step, + self_ty, + unstable_candidates.as_deref_mut(), + ) + }) + }) + }) + .next() + } + + /// For each type `T` in the step list, this attempts to find a method where + /// the (transformed) self type is exactly `T`. We do however do one + /// transformation on the adjustment: if we are passing a region pointer in, + /// we will potentially *reborrow* it to a shorter lifetime. This allows us + /// to transparently pass `&mut` pointers, in particular, without consuming + /// them for their entire lifetime. + fn pick_by_value_method( + &mut self, + step: &CandidateStep<'tcx>, + self_ty: Ty<'tcx>, + unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>, + ) -> Option> { + if step.unsize { + return None; + } + + self.pick_method(self_ty, unstable_candidates).map(|r| { + r.map(|mut pick| { + pick.autoderefs = step.autoderefs; + + // Insert a `&*` or `&mut *` if this is a reference type: + if let ty::Ref(_, _, mutbl) = *step.self_ty.value.value.kind() { + pick.autoderefs += 1; + pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::Autoref { + mutbl, + unsize: pick.autoref_or_ptr_adjustment.map_or(false, |a| a.get_unsize()), + }) + } + + pick + }) + }) + } + + fn pick_autorefd_method( + &mut self, + step: &CandidateStep<'tcx>, + self_ty: Ty<'tcx>, + mutbl: hir::Mutability, + unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>, + ) -> Option> { + let tcx = self.tcx; + + // In general, during probing we erase regions. + let region = tcx.lifetimes.re_erased; + + let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut { ty: self_ty, mutbl }); + self.pick_method(autoref_ty, unstable_candidates).map(|r| { + r.map(|mut pick| { + pick.autoderefs = step.autoderefs; + pick.autoref_or_ptr_adjustment = + Some(AutorefOrPtrAdjustment::Autoref { mutbl, unsize: step.unsize }); + pick + }) + }) + } + + /// If `self_ty` is `*mut T` then this picks `*const T` methods. The reason why we have a + /// special case for this is because going from `*mut T` to `*const T` with autoderefs and + /// autorefs would require dereferencing the pointer, which is not safe. + fn pick_const_ptr_method( + &mut self, + step: &CandidateStep<'tcx>, + self_ty: Ty<'tcx>, + unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>, + ) -> Option> { + // Don't convert an unsized reference to ptr + if step.unsize { + return None; + } + + let &ty::RawPtr(ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut }) = self_ty.kind() else { + return None; + }; + + let const_self_ty = ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }; + let const_ptr_ty = self.tcx.mk_ptr(const_self_ty); + self.pick_method(const_ptr_ty, unstable_candidates).map(|r| { + r.map(|mut pick| { + pick.autoderefs = step.autoderefs; + pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::ToConstPtr); + pick + }) + }) + } + + fn pick_method_with_unstable(&mut self, self_ty: Ty<'tcx>) -> Option> { + debug!("pick_method_with_unstable(self_ty={})", self.ty_to_string(self_ty)); + + let mut possibly_unsatisfied_predicates = Vec::new(); + let mut unstable_candidates = Vec::new(); + + for (kind, candidates) in + &[("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates)] + { + debug!("searching {} candidates", kind); + let res = self.consider_candidates( + self_ty, + candidates.iter(), + &mut possibly_unsatisfied_predicates, + Some(&mut unstable_candidates), + ); + if let Some(pick) = res { + if !self.is_suggestion.0 && !unstable_candidates.is_empty() { + if let Ok(p) = &pick { + // Emit a lint if there are unstable candidates alongside the stable ones. + // + // We suppress warning if we're picking the method only because it is a + // suggestion. + self.emit_unstable_name_collision_hint(p, &unstable_candidates); + } + } + return Some(pick); + } + } + + debug!("searching unstable candidates"); + let res = self.consider_candidates( + self_ty, + unstable_candidates.iter().map(|(c, _)| c), + &mut possibly_unsatisfied_predicates, + None, + ); + if res.is_none() { + self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates); + } + res + } + + fn pick_method( + &mut self, + self_ty: Ty<'tcx>, + mut unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>, + ) -> Option> { + if !self.tcx.sess.opts.unstable_opts.pick_stable_methods_before_any_unstable { + return self.pick_method_with_unstable(self_ty); + } + + debug!("pick_method(self_ty={})", self.ty_to_string(self_ty)); + + let mut possibly_unsatisfied_predicates = Vec::new(); + + for (kind, candidates) in + &[("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates)] + { + debug!("searching {} candidates", kind); + let res = self.consider_candidates( + self_ty, + candidates.iter(), + &mut possibly_unsatisfied_predicates, + unstable_candidates.as_deref_mut(), + ); + if let Some(pick) = res { + return Some(pick); + } + } + + // `pick_method` may be called twice for the same self_ty if no stable methods + // match. Only extend once. + if unstable_candidates.is_some() { + self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates); + } + None + } + + fn consider_candidates<'b, ProbesIter>( + &self, + self_ty: Ty<'tcx>, + probes: ProbesIter, + possibly_unsatisfied_predicates: &mut Vec<( + ty::Predicate<'tcx>, + Option>, + Option>, + )>, + unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>, + ) -> Option> + where + ProbesIter: Iterator> + Clone, + 'tcx: 'b, + { + let mut applicable_candidates: Vec<_> = probes + .clone() + .map(|probe| { + (probe, self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates)) + }) + .filter(|&(_, status)| status != ProbeResult::NoMatch) + .collect(); + + debug!("applicable_candidates: {:?}", applicable_candidates); + + if applicable_candidates.len() > 1 { + if let Some(pick) = + self.collapse_candidates_to_trait_pick(self_ty, &applicable_candidates) + { + return Some(Ok(pick)); + } + } + + if let Some(uc) = unstable_candidates { + applicable_candidates.retain(|&(p, _)| { + if let stability::EvalResult::Deny { feature, .. } = + self.tcx.eval_stability(p.item.def_id, None, self.span, None) + { + uc.push((p.clone(), feature)); + return false; + } + true + }); + } + + if applicable_candidates.len() > 1 { + let sources = probes.map(|p| self.candidate_source(p, self_ty)).collect(); + return Some(Err(MethodError::Ambiguity(sources))); + } + + applicable_candidates.pop().map(|(probe, status)| { + if status == ProbeResult::Match { + Ok(probe.to_unadjusted_pick(self_ty)) + } else { + Err(MethodError::BadReturnType) + } + }) + } + + fn emit_unstable_name_collision_hint( + &self, + stable_pick: &Pick<'_>, + unstable_candidates: &[(Candidate<'tcx>, Symbol)], + ) { + self.tcx.struct_span_lint_hir( + lint::builtin::UNSTABLE_NAME_COLLISIONS, + self.scope_expr_id, + self.span, + |lint| { + let def_kind = stable_pick.item.kind.as_def_kind(); + let mut diag = lint.build(&format!( + "{} {} with this name may be added to the standard library in the future", + def_kind.article(), + def_kind.descr(stable_pick.item.def_id), + )); + match (stable_pick.item.kind, stable_pick.item.container) { + (ty::AssocKind::Fn, _) => { + // FIXME: This should be a `span_suggestion` instead of `help` + // However `self.span` only + // highlights the method name, so we can't use it. Also consider reusing + // the code from `report_method_error()`. + diag.help(&format!( + "call with fully qualified syntax `{}(...)` to keep using the current \ + method", + self.tcx.def_path_str(stable_pick.item.def_id), + )); + } + (ty::AssocKind::Const, ty::AssocItemContainer::TraitContainer) => { + let def_id = stable_pick.item.container_id(self.tcx); + diag.span_suggestion( + self.span, + "use the fully qualified path to the associated const", + format!( + "<{} as {}>::{}", + stable_pick.self_ty, + self.tcx.def_path_str(def_id), + stable_pick.item.name + ), + Applicability::MachineApplicable, + ); + } + _ => {} + } + if self.tcx.sess.is_nightly_build() { + for (candidate, feature) in unstable_candidates { + diag.help(&format!( + "add `#![feature({})]` to the crate attributes to enable `{}`", + feature, + self.tcx.def_path_str(candidate.item.def_id), + )); + } + } + + diag.emit(); + }, + ); + } + + fn select_trait_candidate( + &self, + trait_ref: ty::TraitRef<'tcx>, + ) -> traits::SelectionResult<'tcx, traits::Selection<'tcx>> { + let cause = traits::ObligationCause::misc(self.span, self.body_id); + let predicate = ty::Binder::dummy(trait_ref).to_poly_trait_predicate(); + let obligation = traits::Obligation::new(cause, self.param_env, predicate); + traits::SelectionContext::new(self).select(&obligation) + } + + fn candidate_source(&self, candidate: &Candidate<'tcx>, self_ty: Ty<'tcx>) -> CandidateSource { + match candidate.kind { + InherentImplCandidate(..) => { + CandidateSource::Impl(candidate.item.container_id(self.tcx)) + } + ObjectCandidate | WhereClauseCandidate(_) => { + CandidateSource::Trait(candidate.item.container_id(self.tcx)) + } + TraitCandidate(trait_ref) => self.probe(|_| { + let _ = self + .at(&ObligationCause::dummy(), self.param_env) + .define_opaque_types(false) + .sup(candidate.xform_self_ty, self_ty); + match self.select_trait_candidate(trait_ref) { + Ok(Some(traits::ImplSource::UserDefined(ref impl_data))) => { + // If only a single impl matches, make the error message point + // to that impl. + CandidateSource::Impl(impl_data.impl_def_id) + } + _ => CandidateSource::Trait(candidate.item.container_id(self.tcx)), + } + }), + } + } + + fn consider_probe( + &self, + self_ty: Ty<'tcx>, + probe: &Candidate<'tcx>, + possibly_unsatisfied_predicates: &mut Vec<( + ty::Predicate<'tcx>, + Option>, + Option>, + )>, + ) -> ProbeResult { + debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe); + + self.probe(|_| { + // First check that the self type can be related. + let sub_obligations = match self + .at(&ObligationCause::dummy(), self.param_env) + .define_opaque_types(false) + .sup(probe.xform_self_ty, self_ty) + { + Ok(InferOk { obligations, value: () }) => obligations, + Err(err) => { + debug!("--> cannot relate self-types {:?}", err); + return ProbeResult::NoMatch; + } + }; + + let mut result = ProbeResult::Match; + let mut xform_ret_ty = probe.xform_ret_ty; + debug!(?xform_ret_ty); + + let selcx = &mut traits::SelectionContext::new(self); + let cause = traits::ObligationCause::misc(self.span, self.body_id); + + let mut parent_pred = None; + + // If so, impls may carry other conditions (e.g., where + // clauses) that must be considered. Make sure that those + // match as well (or at least may match, sometimes we + // don't have enough information to fully evaluate). + match probe.kind { + InherentImplCandidate(ref substs, ref ref_obligations) => { + // `xform_ret_ty` hasn't been normalized yet, only `xform_self_ty`, + // see the reasons mentioned in the comments in `assemble_inherent_impl_probe` + // for why this is necessary + let traits::Normalized { + value: normalized_xform_ret_ty, + obligations: normalization_obligations, + } = traits::normalize(selcx, self.param_env, cause.clone(), probe.xform_ret_ty); + xform_ret_ty = normalized_xform_ret_ty; + debug!("xform_ret_ty after normalization: {:?}", xform_ret_ty); + + // Check whether the impl imposes obligations we have to worry about. + let impl_def_id = probe.item.container_id(self.tcx); + let impl_bounds = self.tcx.predicates_of(impl_def_id); + let impl_bounds = impl_bounds.instantiate(self.tcx, substs); + let traits::Normalized { value: impl_bounds, obligations: norm_obligations } = + traits::normalize(selcx, self.param_env, cause.clone(), impl_bounds); + + // Convert the bounds into obligations. + let impl_obligations = + traits::predicates_for_generics(cause, self.param_env, impl_bounds); + + let candidate_obligations = impl_obligations + .chain(norm_obligations.into_iter()) + .chain(ref_obligations.iter().cloned()) + .chain(normalization_obligations.into_iter()); + + // Evaluate those obligations to see if they might possibly hold. + for o in candidate_obligations { + let o = self.resolve_vars_if_possible(o); + if !self.predicate_may_hold(&o) { + result = ProbeResult::NoMatch; + possibly_unsatisfied_predicates.push(( + o.predicate, + None, + Some(o.cause), + )); + } + } + } + + ObjectCandidate | WhereClauseCandidate(..) => { + // These have no additional conditions to check. + } + + TraitCandidate(trait_ref) => { + if let Some(method_name) = self.method_name { + // Some trait methods are excluded for arrays before 2021. + // (`array.into_iter()` wants a slice iterator for compatibility.) + if self_ty.is_array() && !method_name.span.rust_2021() { + let trait_def = self.tcx.trait_def(trait_ref.def_id); + if trait_def.skip_array_during_method_dispatch { + return ProbeResult::NoMatch; + } + } + } + let predicate = + ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx); + parent_pred = Some(predicate); + let obligation = traits::Obligation::new(cause, self.param_env, predicate); + if !self.predicate_may_hold(&obligation) { + result = ProbeResult::NoMatch; + if self.probe(|_| { + match self.select_trait_candidate(trait_ref) { + Err(_) => return true, + Ok(Some(impl_source)) + if !impl_source.borrow_nested_obligations().is_empty() => + { + for obligation in impl_source.borrow_nested_obligations() { + // Determine exactly which obligation wasn't met, so + // that we can give more context in the error. + if !self.predicate_may_hold(obligation) { + let nested_predicate = + self.resolve_vars_if_possible(obligation.predicate); + let predicate = + self.resolve_vars_if_possible(predicate); + let p = if predicate == nested_predicate { + // Avoid "`MyStruct: Foo` which is required by + // `MyStruct: Foo`" in E0599. + None + } else { + Some(predicate) + }; + possibly_unsatisfied_predicates.push(( + nested_predicate, + p, + Some(obligation.cause.clone()), + )); + } + } + } + _ => { + // Some nested subobligation of this predicate + // failed. + let predicate = self.resolve_vars_if_possible(predicate); + possibly_unsatisfied_predicates.push((predicate, None, None)); + } + } + false + }) { + // This candidate's primary obligation doesn't even + // select - don't bother registering anything in + // `potentially_unsatisfied_predicates`. + return ProbeResult::NoMatch; + } + } + } + } + + // Evaluate those obligations to see if they might possibly hold. + for o in sub_obligations { + let o = self.resolve_vars_if_possible(o); + if !self.predicate_may_hold(&o) { + result = ProbeResult::NoMatch; + possibly_unsatisfied_predicates.push((o.predicate, parent_pred, Some(o.cause))); + } + } + + if let ProbeResult::Match = result { + if let (Some(return_ty), Some(xform_ret_ty)) = (self.return_type, xform_ret_ty) { + let xform_ret_ty = self.resolve_vars_if_possible(xform_ret_ty); + debug!( + "comparing return_ty {:?} with xform ret ty {:?}", + return_ty, probe.xform_ret_ty + ); + if self + .at(&ObligationCause::dummy(), self.param_env) + .define_opaque_types(false) + .sup(return_ty, xform_ret_ty) + .is_err() + { + return ProbeResult::BadReturnType; + } + } + } + + result + }) + } + + /// Sometimes we get in a situation where we have multiple probes that are all impls of the + /// same trait, but we don't know which impl to use. In this case, since in all cases the + /// external interface of the method can be determined from the trait, it's ok not to decide. + /// We can basically just collapse all of the probes for various impls into one where-clause + /// probe. This will result in a pending obligation so when more type-info is available we can + /// make the final decision. + /// + /// Example (`src/test/ui/method-two-trait-defer-resolution-1.rs`): + /// + /// ```ignore (illustrative) + /// trait Foo { ... } + /// impl Foo for Vec { ... } + /// impl Foo for Vec { ... } + /// ``` + /// + /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we + /// use, so it's ok to just commit to "using the method from the trait Foo". + fn collapse_candidates_to_trait_pick( + &self, + self_ty: Ty<'tcx>, + probes: &[(&Candidate<'tcx>, ProbeResult)], + ) -> Option> { + // Do all probes correspond to the same trait? + let container = probes[0].0.item.trait_container(self.tcx)?; + for (p, _) in &probes[1..] { + let p_container = p.item.trait_container(self.tcx)?; + if p_container != container { + return None; + } + } + + // FIXME: check the return type here somehow. + // If so, just use this trait and call it a day. + Some(Pick { + item: probes[0].0.item, + kind: TraitPick, + import_ids: probes[0].0.import_ids.clone(), + autoderefs: 0, + autoref_or_ptr_adjustment: None, + self_ty, + }) + } + + /// Similarly to `probe_for_return_type`, this method attempts to find the best matching + /// candidate method where the method name may have been misspelled. Similarly to other + /// Levenshtein based suggestions, we provide at most one such suggestion. + fn probe_for_lev_candidate(&mut self) -> Result, MethodError<'tcx>> { + debug!("probing for method names similar to {:?}", self.method_name); + + let steps = self.steps.clone(); + self.probe(|_| { + let mut pcx = ProbeContext::new( + self.fcx, + self.span, + self.mode, + self.method_name, + self.return_type, + self.orig_steps_var_values.clone(), + steps, + IsSuggestion(true), + self.scope_expr_id, + ); + pcx.allow_similar_names = true; + pcx.assemble_inherent_candidates(); + + let method_names = pcx.candidate_method_names(); + pcx.allow_similar_names = false; + let applicable_close_candidates: Vec = method_names + .iter() + .filter_map(|&method_name| { + pcx.reset(); + pcx.method_name = Some(method_name); + pcx.assemble_inherent_candidates(); + pcx.pick_core().and_then(|pick| pick.ok()).map(|pick| pick.item) + }) + .collect(); + + if applicable_close_candidates.is_empty() { + Ok(None) + } else { + let best_name = { + let names = applicable_close_candidates + .iter() + .map(|cand| cand.name) + .collect::>(); + find_best_match_for_name_with_substrings( + &names, + self.method_name.unwrap().name, + None, + ) + } + .unwrap(); + Ok(applicable_close_candidates.into_iter().find(|method| method.name == best_name)) + } + }) + } + + /////////////////////////////////////////////////////////////////////////// + // MISCELLANY + fn has_applicable_self(&self, item: &ty::AssocItem) -> bool { + // "Fast track" -- check for usage of sugar when in method call + // mode. + // + // In Path mode (i.e., resolving a value like `T::next`), consider any + // associated value (i.e., methods, constants) but not types. + match self.mode { + Mode::MethodCall => item.fn_has_self_parameter, + Mode::Path => match item.kind { + ty::AssocKind::Type => false, + ty::AssocKind::Fn | ty::AssocKind::Const => true, + }, + } + // FIXME -- check for types that deref to `Self`, + // like `Rc` and so on. + // + // Note also that the current code will break if this type + // includes any of the type parameters defined on the method + // -- but this could be overcome. + } + + fn record_static_candidate(&mut self, source: CandidateSource) { + self.static_candidates.push(source); + } + + #[instrument(level = "debug", skip(self))] + fn xform_self_ty( + &self, + item: &ty::AssocItem, + impl_ty: Ty<'tcx>, + substs: SubstsRef<'tcx>, + ) -> (Ty<'tcx>, Option>) { + if item.kind == ty::AssocKind::Fn && self.mode == Mode::MethodCall { + let sig = self.xform_method_sig(item.def_id, substs); + (sig.inputs()[0], Some(sig.output())) + } else { + (impl_ty, None) + } + } + + #[instrument(level = "debug", skip(self))] + fn xform_method_sig(&self, method: DefId, substs: SubstsRef<'tcx>) -> ty::FnSig<'tcx> { + let fn_sig = self.tcx.bound_fn_sig(method); + debug!(?fn_sig); + + assert!(!substs.has_escaping_bound_vars()); + + // It is possible for type parameters or early-bound lifetimes + // to appear in the signature of `self`. The substitutions we + // are given do not include type/lifetime parameters for the + // method yet. So create fresh variables here for those too, + // if there are any. + let generics = self.tcx.generics_of(method); + assert_eq!(substs.len(), generics.parent_count as usize); + + let xform_fn_sig = if generics.params.is_empty() { + fn_sig.subst(self.tcx, substs) + } else { + let substs = InternalSubsts::for_item(self.tcx, method, |param, _| { + let i = param.index as usize; + if i < substs.len() { + substs[i] + } else { + match param.kind { + GenericParamDefKind::Lifetime => { + // In general, during probe we erase regions. + self.tcx.lifetimes.re_erased.into() + } + GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => { + self.var_for_def(self.span, param) + } + } + } + }); + fn_sig.subst(self.tcx, substs) + }; + + self.erase_late_bound_regions(xform_fn_sig) + } + + /// Gets the type of an impl and generate substitutions with inference vars. + fn impl_ty_and_substs( + &self, + impl_def_id: DefId, + ) -> (ty::EarlyBinder>, SubstsRef<'tcx>) { + (self.tcx.bound_type_of(impl_def_id), self.fresh_item_substs(impl_def_id)) + } + + fn fresh_item_substs(&self, def_id: DefId) -> SubstsRef<'tcx> { + InternalSubsts::for_item(self.tcx, def_id, |param, _| match param.kind { + GenericParamDefKind::Lifetime => self.tcx.lifetimes.re_erased.into(), + GenericParamDefKind::Type { .. } => self + .next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::SubstitutionPlaceholder, + span: self.tcx.def_span(def_id), + }) + .into(), + GenericParamDefKind::Const { .. } => { + let span = self.tcx.def_span(def_id); + let origin = ConstVariableOrigin { + kind: ConstVariableOriginKind::SubstitutionPlaceholder, + span, + }; + self.next_const_var(self.tcx.type_of(param.def_id), origin).into() + } + }) + } + + /// Replaces late-bound-regions bound by `value` with `'static` using + /// `ty::erase_late_bound_regions`. + /// + /// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of + /// method matching. It is reasonable during the probe phase because we don't consider region + /// relationships at all. Therefore, we can just replace all the region variables with 'static + /// rather than creating fresh region variables. This is nice for two reasons: + /// + /// 1. Because the numbers of the region variables would otherwise be fairly unique to this + /// particular method call, it winds up creating fewer types overall, which helps for memory + /// usage. (Admittedly, this is a rather small effect, though measurable.) + /// + /// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any + /// late-bound regions with 'static. Otherwise, if we were going to replace late-bound + /// regions with actual region variables as is proper, we'd have to ensure that the same + /// region got replaced with the same variable, which requires a bit more coordination + /// and/or tracking the substitution and + /// so forth. + fn erase_late_bound_regions(&self, value: ty::Binder<'tcx, T>) -> T + where + T: TypeFoldable<'tcx>, + { + self.tcx.erase_late_bound_regions(value) + } + + /// Finds the method with the appropriate name (or return type, as the case may be). If + /// `allow_similar_names` is set, find methods with close-matching names. + // The length of the returned iterator is nearly always 0 or 1 and this + // method is fairly hot. + fn impl_or_trait_item(&self, def_id: DefId) -> SmallVec<[ty::AssocItem; 1]> { + if let Some(name) = self.method_name { + if self.allow_similar_names { + let max_dist = max(name.as_str().len(), 3) / 3; + self.tcx + .associated_items(def_id) + .in_definition_order() + .filter(|x| { + if x.kind.namespace() != Namespace::ValueNS { + return false; + } + match lev_distance_with_substrings(name.as_str(), x.name.as_str(), max_dist) + { + Some(d) => d > 0, + None => false, + } + }) + .copied() + .collect() + } else { + self.fcx + .associated_value(def_id, name) + .map_or_else(SmallVec::new, |x| SmallVec::from_buf([x])) + } + } else { + self.tcx.associated_items(def_id).in_definition_order().copied().collect() + } + } +} + +impl<'tcx> Candidate<'tcx> { + fn to_unadjusted_pick(&self, self_ty: Ty<'tcx>) -> Pick<'tcx> { + Pick { + item: self.item, + kind: match self.kind { + InherentImplCandidate(..) => InherentImplPick, + ObjectCandidate => ObjectPick, + TraitCandidate(_) => TraitPick, + WhereClauseCandidate(ref trait_ref) => { + // Only trait derived from where-clauses should + // appear here, so they should not contain any + // inference variables or other artifacts. This + // means they are safe to put into the + // `WhereClausePick`. + assert!( + !trait_ref.skip_binder().substs.needs_infer() + && !trait_ref.skip_binder().substs.has_placeholders() + ); + + WhereClausePick(*trait_ref) + } + }, + import_ids: self.import_ids.clone(), + autoderefs: 0, + autoref_or_ptr_adjustment: None, + self_ty, + } + } +} diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs new file mode 100644 index 000000000..c92b93cbc --- /dev/null +++ b/compiler/rustc_typeck/src/check/method/suggest.rs @@ -0,0 +1,2286 @@ +//! Give useful errors and suggestions to users when an item can't be +//! found or is otherwise invalid. + +use crate::check::FnCtxt; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::{ + pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, + MultiSpan, +}; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::DefId; +use rustc_hir::lang_items::LangItem; +use rustc_hir::{ExprKind, Node, QPath}; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_middle::traits::util::supertraits; +use rustc_middle::ty::fast_reject::{simplify_type, TreatParams}; +use rustc_middle::ty::print::with_crate_prefix; +use rustc_middle::ty::ToPolyTraitRef; +use rustc_middle::ty::{self, DefIdTree, ToPredicate, Ty, TyCtxt, TypeVisitable}; +use rustc_span::symbol::{kw, sym, Ident}; +use rustc_span::Symbol; +use rustc_span::{lev_distance, source_map, ExpnKind, FileName, MacroKind, Span}; +use rustc_trait_selection::traits::error_reporting::on_unimplemented::InferCtxtExt as _; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; +use rustc_trait_selection::traits::{ + FulfillmentError, Obligation, ObligationCause, ObligationCauseCode, OnUnimplementedNote, +}; + +use std::cmp::Ordering; +use std::iter; + +use super::probe::{Mode, ProbeScope}; +use super::{super::suggest_call_constructor, CandidateSource, MethodError, NoMatchData}; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + fn is_fn_ty(&self, ty: Ty<'tcx>, span: Span) -> bool { + let tcx = self.tcx; + match ty.kind() { + // Not all of these (e.g., unsafe fns) implement `FnOnce`, + // so we look for these beforehand. + ty::Closure(..) | ty::FnDef(..) | ty::FnPtr(_) => true, + // If it's not a simple function, look for things which implement `FnOnce`. + _ => { + let Some(fn_once) = tcx.lang_items().fn_once_trait() else { + return false; + }; + + // This conditional prevents us from asking to call errors and unresolved types. + // It might seem that we can use `predicate_must_hold_modulo_regions`, + // but since a Dummy binder is used to fill in the FnOnce trait's arguments, + // type resolution always gives a "maybe" here. + if self.autoderef(span, ty).any(|(ty, _)| { + info!("check deref {:?} error", ty); + matches!(ty.kind(), ty::Error(_) | ty::Infer(_)) + }) { + return false; + } + + self.autoderef(span, ty).any(|(ty, _)| { + info!("check deref {:?} impl FnOnce", ty); + self.probe(|_| { + let fn_once_substs = tcx.mk_substs_trait( + ty, + &[self + .next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span, + }) + .into()], + ); + let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs); + let poly_trait_ref = ty::Binder::dummy(trait_ref); + let obligation = Obligation::misc( + span, + self.body_id, + self.param_env, + poly_trait_ref.without_const().to_predicate(tcx), + ); + self.predicate_may_hold(&obligation) + }) + }) + } + } + } + + fn is_slice_ty(&self, ty: Ty<'tcx>, span: Span) -> bool { + self.autoderef(span, ty).any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..))) + } + + pub fn report_method_error( + &self, + mut span: Span, + rcvr_ty: Ty<'tcx>, + item_name: Ident, + source: SelfSource<'tcx>, + error: MethodError<'tcx>, + args: Option<&'tcx [hir::Expr<'tcx>]>, + ) -> Option> { + // Avoid suggestions when we don't know what's going on. + if rcvr_ty.references_error() { + return None; + } + + let report_candidates = |span: Span, + err: &mut Diagnostic, + mut sources: Vec, + sugg_span: Span| { + sources.sort(); + sources.dedup(); + // Dynamic limit to avoid hiding just one candidate, which is silly. + let limit = if sources.len() == 5 { 5 } else { 4 }; + + for (idx, source) in sources.iter().take(limit).enumerate() { + match *source { + CandidateSource::Impl(impl_did) => { + // Provide the best span we can. Use the item, if local to crate, else + // the impl, if local to crate (item may be defaulted), else nothing. + let Some(item) = self.associated_value(impl_did, item_name).or_else(|| { + let impl_trait_ref = self.tcx.impl_trait_ref(impl_did)?; + self.associated_value(impl_trait_ref.def_id, item_name) + }) else { + continue; + }; + + let note_span = if item.def_id.is_local() { + Some(self.tcx.def_span(item.def_id)) + } else if impl_did.is_local() { + Some(self.tcx.def_span(impl_did)) + } else { + None + }; + + let impl_ty = self.tcx.at(span).type_of(impl_did); + + let insertion = match self.tcx.impl_trait_ref(impl_did) { + None => String::new(), + Some(trait_ref) => format!( + " of the trait `{}`", + self.tcx.def_path_str(trait_ref.def_id) + ), + }; + + let (note_str, idx) = if sources.len() > 1 { + ( + format!( + "candidate #{} is defined in an impl{} for the type `{}`", + idx + 1, + insertion, + impl_ty, + ), + Some(idx + 1), + ) + } else { + ( + format!( + "the candidate is defined in an impl{} for the type `{}`", + insertion, impl_ty, + ), + None, + ) + }; + if let Some(note_span) = note_span { + // We have a span pointing to the method. Show note with snippet. + err.span_note(note_span, ¬e_str); + } else { + err.note(¬e_str); + } + if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_did) { + let path = self.tcx.def_path_str(trait_ref.def_id); + + let ty = match item.kind { + ty::AssocKind::Const | ty::AssocKind::Type => rcvr_ty, + ty::AssocKind::Fn => self + .tcx + .fn_sig(item.def_id) + .inputs() + .skip_binder() + .get(0) + .filter(|ty| ty.is_region_ptr() && !rcvr_ty.is_region_ptr()) + .copied() + .unwrap_or(rcvr_ty), + }; + print_disambiguation_help( + item_name, + args, + err, + path, + ty, + item.kind, + item.def_id, + sugg_span, + idx, + self.tcx.sess.source_map(), + item.fn_has_self_parameter, + ); + } + } + CandidateSource::Trait(trait_did) => { + let Some(item) = self.associated_value(trait_did, item_name) else { continue }; + let item_span = self.tcx.def_span(item.def_id); + let idx = if sources.len() > 1 { + let msg = &format!( + "candidate #{} is defined in the trait `{}`", + idx + 1, + self.tcx.def_path_str(trait_did) + ); + err.span_note(item_span, msg); + Some(idx + 1) + } else { + let msg = &format!( + "the candidate is defined in the trait `{}`", + self.tcx.def_path_str(trait_did) + ); + err.span_note(item_span, msg); + None + }; + let path = self.tcx.def_path_str(trait_did); + print_disambiguation_help( + item_name, + args, + err, + path, + rcvr_ty, + item.kind, + item.def_id, + sugg_span, + idx, + self.tcx.sess.source_map(), + item.fn_has_self_parameter, + ); + } + } + } + if sources.len() > limit { + err.note(&format!("and {} others", sources.len() - limit)); + } + }; + + let sugg_span = if let SelfSource::MethodCall(expr) = source { + // Given `foo.bar(baz)`, `expr` is `bar`, but we want to point to the whole thing. + self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id)).span + } else { + span + }; + + match error { + MethodError::NoMatch(NoMatchData { + static_candidates: static_sources, + unsatisfied_predicates, + out_of_scope_traits, + lev_candidate, + mode, + }) => { + let tcx = self.tcx; + + let actual = self.resolve_vars_if_possible(rcvr_ty); + let ty_str = self.ty_to_string(actual); + let is_method = mode == Mode::MethodCall; + let item_kind = if is_method { + "method" + } else if actual.is_enum() { + "variant or associated item" + } else { + match (item_name.as_str().chars().next(), actual.is_fresh_ty()) { + (Some(name), false) if name.is_lowercase() => "function or associated item", + (Some(_), false) => "associated item", + (Some(_), true) | (None, false) => "variant or associated item", + (None, true) => "variant", + } + }; + + if self.suggest_constraining_numerical_ty( + tcx, actual, source, span, item_kind, item_name, &ty_str, + ) { + return None; + } + + span = item_name.span; + + // Don't show generic arguments when the method can't be found in any implementation (#81576). + let mut ty_str_reported = ty_str.clone(); + if let ty::Adt(_, generics) = actual.kind() { + if generics.len() > 0 { + let mut autoderef = self.autoderef(span, actual); + let candidate_found = autoderef.any(|(ty, _)| { + if let ty::Adt(adt_deref, _) = ty.kind() { + self.tcx + .inherent_impls(adt_deref.did()) + .iter() + .filter_map(|def_id| self.associated_value(*def_id, item_name)) + .count() + >= 1 + } else { + false + } + }); + let has_deref = autoderef.step_count() > 0; + if !candidate_found && !has_deref && unsatisfied_predicates.is_empty() { + if let Some((path_string, _)) = ty_str.split_once('<') { + ty_str_reported = path_string.to_string(); + } + } + } + } + + let mut err = struct_span_err!( + tcx.sess, + span, + E0599, + "no {} named `{}` found for {} `{}` in the current scope", + item_kind, + item_name, + actual.prefix_string(self.tcx), + ty_str_reported, + ); + if actual.references_error() { + err.downgrade_to_delayed_bug(); + } + + if let Mode::MethodCall = mode && let SelfSource::MethodCall(cal) = source { + self.suggest_await_before_method( + &mut err, item_name, actual, cal, span, + ); + } + if let Some(span) = tcx.resolutions(()).confused_type_with_std_module.get(&span) { + err.span_suggestion( + span.shrink_to_lo(), + "you are looking for the module in `std`, not the primitive type", + "std::", + Applicability::MachineApplicable, + ); + } + if let ty::RawPtr(_) = &actual.kind() { + err.note( + "try using `<*const T>::as_ref()` to get a reference to the \ + type behind the pointer: https://doc.rust-lang.org/std/\ + primitive.pointer.html#method.as_ref", + ); + err.note( + "using `<*const T>::as_ref()` on a pointer which is unaligned or points \ + to invalid or uninitialized memory is undefined behavior", + ); + } + + let ty_span = match actual.kind() { + ty::Param(param_type) => { + let generics = self.tcx.generics_of(self.body_id.owner.to_def_id()); + let type_param = generics.type_param(param_type, self.tcx); + Some(self.tcx.def_span(type_param.def_id)) + } + ty::Adt(def, _) if def.did().is_local() => Some(tcx.def_span(def.did())), + _ => None, + }; + + if let Some(span) = ty_span { + err.span_label( + span, + format!( + "{item_kind} `{item_name}` not found for this {}", + actual.prefix_string(self.tcx) + ), + ); + } + + if self.is_fn_ty(rcvr_ty, span) { + if let SelfSource::MethodCall(expr) = source { + let suggest = if let ty::FnDef(def_id, _) = rcvr_ty.kind() { + if let Some(local_id) = def_id.as_local() { + let hir_id = tcx.hir().local_def_id_to_hir_id(local_id); + let node = tcx.hir().get(hir_id); + let fields = node.tuple_fields(); + if let Some(fields) = fields + && let Some(DefKind::Ctor(of, _)) = self.tcx.opt_def_kind(local_id) { + Some((fields.len(), of)) + } else { + None + } + } else { + // The logic here isn't smart but `associated_item_def_ids` + // doesn't work nicely on local. + if let DefKind::Ctor(of, _) = tcx.def_kind(def_id) { + let parent_def_id = tcx.parent(*def_id); + Some((tcx.associated_item_def_ids(parent_def_id).len(), of)) + } else { + None + } + } + } else { + None + }; + + // If the function is a tuple constructor, we recommend that they call it + if let Some((fields, kind)) = suggest { + suggest_call_constructor(expr.span, kind, fields, &mut err); + } else { + // General case + err.span_label( + expr.span, + "this is a function, perhaps you wish to call it", + ); + } + } + } + + let mut custom_span_label = false; + + if !static_sources.is_empty() { + err.note( + "found the following associated functions; to be used as methods, \ + functions must have a `self` parameter", + ); + err.span_label(span, "this is an associated function, not a method"); + custom_span_label = true; + } + if static_sources.len() == 1 { + let ty_str = + if let Some(CandidateSource::Impl(impl_did)) = static_sources.get(0) { + // When the "method" is resolved through dereferencing, we really want the + // original type that has the associated function for accurate suggestions. + // (#61411) + let ty = tcx.at(span).type_of(*impl_did); + match (&ty.peel_refs().kind(), &actual.peel_refs().kind()) { + (ty::Adt(def, _), ty::Adt(def_actual, _)) if def == def_actual => { + // Use `actual` as it will have more `substs` filled in. + self.ty_to_value_string(actual.peel_refs()) + } + _ => self.ty_to_value_string(ty.peel_refs()), + } + } else { + self.ty_to_value_string(actual.peel_refs()) + }; + if let SelfSource::MethodCall(expr) = source { + err.span_suggestion( + expr.span.to(span), + "use associated function syntax instead", + format!("{}::{}", ty_str, item_name), + Applicability::MachineApplicable, + ); + } else { + err.help(&format!("try with `{}::{}`", ty_str, item_name,)); + } + + report_candidates(span, &mut err, static_sources, sugg_span); + } else if static_sources.len() > 1 { + report_candidates(span, &mut err, static_sources, sugg_span); + } + + let mut bound_spans = vec![]; + let mut restrict_type_params = false; + let mut unsatisfied_bounds = false; + if item_name.name == sym::count && self.is_slice_ty(actual, span) { + let msg = "consider using `len` instead"; + if let SelfSource::MethodCall(_expr) = source { + err.span_suggestion_short( + span, + msg, + "len", + Applicability::MachineApplicable, + ); + } else { + err.span_label(span, msg); + } + if let Some(iterator_trait) = self.tcx.get_diagnostic_item(sym::Iterator) { + let iterator_trait = self.tcx.def_path_str(iterator_trait); + err.note(&format!("`count` is defined on `{iterator_trait}`, which `{actual}` does not implement")); + } + } else if !unsatisfied_predicates.is_empty() { + let mut type_params = FxHashMap::default(); + + // Pick out the list of unimplemented traits on the receiver. + // This is used for custom error messages with the `#[rustc_on_unimplemented]` attribute. + let mut unimplemented_traits = FxHashMap::default(); + let mut unimplemented_traits_only = true; + for (predicate, _parent_pred, cause) in &unsatisfied_predicates { + if let (ty::PredicateKind::Trait(p), Some(cause)) = + (predicate.kind().skip_binder(), cause.as_ref()) + { + if p.trait_ref.self_ty() != rcvr_ty { + // This is necessary, not just to keep the errors clean, but also + // because our derived obligations can wind up with a trait ref that + // requires a different param_env to be correctly compared. + continue; + } + unimplemented_traits.entry(p.trait_ref.def_id).or_insert(( + predicate.kind().rebind(p.trait_ref), + Obligation { + cause: cause.clone(), + param_env: self.param_env, + predicate: *predicate, + recursion_depth: 0, + }, + )); + } + } + + // Make sure that, if any traits other than the found ones were involved, + // we don't don't report an unimplemented trait. + // We don't want to say that `iter::Cloned` is not an iterator, just + // because of some non-Clone item being iterated over. + for (predicate, _parent_pred, _cause) in &unsatisfied_predicates { + match predicate.kind().skip_binder() { + ty::PredicateKind::Trait(p) + if unimplemented_traits.contains_key(&p.trait_ref.def_id) => {} + _ => { + unimplemented_traits_only = false; + break; + } + } + } + + let mut collect_type_param_suggestions = + |self_ty: Ty<'tcx>, parent_pred: ty::Predicate<'tcx>, obligation: &str| { + // We don't care about regions here, so it's fine to skip the binder here. + if let (ty::Param(_), ty::PredicateKind::Trait(p)) = + (self_ty.kind(), parent_pred.kind().skip_binder()) + { + let node = match p.trait_ref.self_ty().kind() { + ty::Param(_) => { + // Account for `fn` items like in `issue-35677.rs` to + // suggest restricting its type params. + let did = self.tcx.hir().body_owner_def_id(hir::BodyId { + hir_id: self.body_id, + }); + Some( + self.tcx + .hir() + .get(self.tcx.hir().local_def_id_to_hir_id(did)), + ) + } + ty::Adt(def, _) => def.did().as_local().map(|def_id| { + self.tcx + .hir() + .get(self.tcx.hir().local_def_id_to_hir_id(def_id)) + }), + _ => None, + }; + if let Some(hir::Node::Item(hir::Item { kind, .. })) = node { + if let Some(g) = kind.generics() { + let key = ( + g.tail_span_for_predicate_suggestion(), + g.add_where_or_trailing_comma(), + ); + type_params + .entry(key) + .or_insert_with(FxHashSet::default) + .insert(obligation.to_owned()); + } + } + } + }; + let mut bound_span_label = |self_ty: Ty<'_>, obligation: &str, quiet: &str| { + let msg = format!( + "doesn't satisfy `{}`", + if obligation.len() > 50 { quiet } else { obligation } + ); + match &self_ty.kind() { + // Point at the type that couldn't satisfy the bound. + ty::Adt(def, _) => { + bound_spans.push((self.tcx.def_span(def.did()), msg)) + } + // Point at the trait object that couldn't satisfy the bound. + ty::Dynamic(preds, _) => { + for pred in preds.iter() { + match pred.skip_binder() { + ty::ExistentialPredicate::Trait(tr) => bound_spans + .push((self.tcx.def_span(tr.def_id), msg.clone())), + ty::ExistentialPredicate::Projection(_) + | ty::ExistentialPredicate::AutoTrait(_) => {} + } + } + } + // Point at the closure that couldn't satisfy the bound. + ty::Closure(def_id, _) => bound_spans.push(( + tcx.def_span(*def_id), + format!("doesn't satisfy `{}`", quiet), + )), + _ => {} + } + }; + let mut format_pred = |pred: ty::Predicate<'tcx>| { + let bound_predicate = pred.kind(); + match bound_predicate.skip_binder() { + ty::PredicateKind::Projection(pred) => { + let pred = bound_predicate.rebind(pred); + // `::Item = String`. + let projection_ty = pred.skip_binder().projection_ty; + + let substs_with_infer_self = tcx.mk_substs( + iter::once(tcx.mk_ty_var(ty::TyVid::from_u32(0)).into()) + .chain(projection_ty.substs.iter().skip(1)), + ); + + let quiet_projection_ty = ty::ProjectionTy { + substs: substs_with_infer_self, + item_def_id: projection_ty.item_def_id, + }; + + let term = pred.skip_binder().term; + + let obligation = format!("{} = {}", projection_ty, term); + let quiet = format!("{} = {}", quiet_projection_ty, term); + + bound_span_label(projection_ty.self_ty(), &obligation, &quiet); + Some((obligation, projection_ty.self_ty())) + } + ty::PredicateKind::Trait(poly_trait_ref) => { + let p = poly_trait_ref.trait_ref; + let self_ty = p.self_ty(); + let path = p.print_only_trait_path(); + let obligation = format!("{}: {}", self_ty, path); + let quiet = format!("_: {}", path); + bound_span_label(self_ty, &obligation, &quiet); + Some((obligation, self_ty)) + } + _ => None, + } + }; + + // Find all the requirements that come from a local `impl` block. + let mut skip_list: FxHashSet<_> = Default::default(); + let mut spanned_predicates: FxHashMap = Default::default(); + for (data, p, parent_p, impl_def_id, cause) in unsatisfied_predicates + .iter() + .filter_map(|(p, parent, c)| c.as_ref().map(|c| (p, parent, c))) + .filter_map(|(p, parent, c)| match c.code() { + ObligationCauseCode::ImplDerivedObligation(ref data) => { + Some((&data.derived, p, parent, data.impl_def_id, data)) + } + _ => None, + }) + { + let parent_trait_ref = data.parent_trait_pred; + let path = parent_trait_ref.print_modifiers_and_trait_path(); + let tr_self_ty = parent_trait_ref.skip_binder().self_ty(); + let unsatisfied_msg = "unsatisfied trait bound introduced here"; + let derive_msg = + "unsatisfied trait bound introduced in this `derive` macro"; + match self.tcx.hir().get_if_local(impl_def_id) { + // Unmet obligation comes from a `derive` macro, point at it once to + // avoid multiple span labels pointing at the same place. + Some(Node::Item(hir::Item { + kind: hir::ItemKind::Trait(..), + ident, + .. + })) if matches!( + ident.span.ctxt().outer_expn_data().kind, + ExpnKind::Macro(MacroKind::Derive, _) + ) => + { + let span = ident.span.ctxt().outer_expn_data().call_site; + let mut spans: MultiSpan = span.into(); + spans.push_span_label(span, derive_msg); + let entry = spanned_predicates.entry(spans); + entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p); + } + + Some(Node::Item(hir::Item { + kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }), + .. + })) if matches!( + self_ty.span.ctxt().outer_expn_data().kind, + ExpnKind::Macro(MacroKind::Derive, _) + ) || matches!( + of_trait.as_ref().map(|t| t + .path + .span + .ctxt() + .outer_expn_data() + .kind), + Some(ExpnKind::Macro(MacroKind::Derive, _)) + ) => + { + let span = self_ty.span.ctxt().outer_expn_data().call_site; + let mut spans: MultiSpan = span.into(); + spans.push_span_label(span, derive_msg); + let entry = spanned_predicates.entry(spans); + entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p); + } + + // Unmet obligation coming from a `trait`. + Some(Node::Item(hir::Item { + kind: hir::ItemKind::Trait(..), + ident, + span: item_span, + .. + })) if !matches!( + ident.span.ctxt().outer_expn_data().kind, + ExpnKind::Macro(MacroKind::Derive, _) + ) => + { + if let Some(pred) = parent_p { + // Done to add the "doesn't satisfy" `span_label`. + let _ = format_pred(*pred); + } + skip_list.insert(p); + let mut spans = if cause.span != *item_span { + let mut spans: MultiSpan = cause.span.into(); + spans.push_span_label(cause.span, unsatisfied_msg); + spans + } else { + ident.span.into() + }; + spans.push_span_label(ident.span, "in this trait"); + let entry = spanned_predicates.entry(spans); + entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p); + } + + // Unmet obligation coming from an `impl`. + Some(Node::Item(hir::Item { + kind: + hir::ItemKind::Impl(hir::Impl { + of_trait, self_ty, generics, .. + }), + span: item_span, + .. + })) if !matches!( + self_ty.span.ctxt().outer_expn_data().kind, + ExpnKind::Macro(MacroKind::Derive, _) + ) && !matches!( + of_trait.as_ref().map(|t| t + .path + .span + .ctxt() + .outer_expn_data() + .kind), + Some(ExpnKind::Macro(MacroKind::Derive, _)) + ) => + { + let sized_pred = + unsatisfied_predicates.iter().any(|(pred, _, _)| { + match pred.kind().skip_binder() { + ty::PredicateKind::Trait(pred) => { + Some(pred.def_id()) + == self.tcx.lang_items().sized_trait() + && pred.polarity == ty::ImplPolarity::Positive + } + _ => false, + } + }); + for param in generics.params { + if param.span == cause.span && sized_pred { + let (sp, sugg) = match param.colon_span { + Some(sp) => (sp.shrink_to_hi(), " ?Sized +"), + None => (param.span.shrink_to_hi(), ": ?Sized"), + }; + err.span_suggestion_verbose( + sp, + "consider relaxing the type parameter's implicit \ + `Sized` bound", + sugg, + Applicability::MachineApplicable, + ); + } + } + if let Some(pred) = parent_p { + // Done to add the "doesn't satisfy" `span_label`. + let _ = format_pred(*pred); + } + skip_list.insert(p); + let mut spans = if cause.span != *item_span { + let mut spans: MultiSpan = cause.span.into(); + spans.push_span_label(cause.span, unsatisfied_msg); + spans + } else { + let mut spans = Vec::with_capacity(2); + if let Some(trait_ref) = of_trait { + spans.push(trait_ref.path.span); + } + spans.push(self_ty.span); + spans.into() + }; + if let Some(trait_ref) = of_trait { + spans.push_span_label(trait_ref.path.span, ""); + } + spans.push_span_label(self_ty.span, ""); + + let entry = spanned_predicates.entry(spans); + entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p); + } + _ => {} + } + } + let mut spanned_predicates: Vec<_> = spanned_predicates.into_iter().collect(); + spanned_predicates.sort_by_key(|(span, (_, _, _))| span.primary_span()); + for (span, (_path, _self_ty, preds)) in spanned_predicates { + let mut preds: Vec<_> = preds + .into_iter() + .filter_map(|pred| format_pred(*pred)) + .map(|(p, _)| format!("`{}`", p)) + .collect(); + preds.sort(); + preds.dedup(); + let msg = if let [pred] = &preds[..] { + format!("trait bound {} was not satisfied", pred) + } else { + format!( + "the following trait bounds were not satisfied:\n{}", + preds.join("\n"), + ) + }; + err.span_note(span, &msg); + unsatisfied_bounds = true; + } + + // The requirements that didn't have an `impl` span to show. + let mut bound_list = unsatisfied_predicates + .iter() + .filter_map(|(pred, parent_pred, _cause)| { + format_pred(*pred).map(|(p, self_ty)| { + collect_type_param_suggestions(self_ty, *pred, &p); + ( + match parent_pred { + None => format!("`{}`", &p), + Some(parent_pred) => match format_pred(*parent_pred) { + None => format!("`{}`", &p), + Some((parent_p, _)) => { + collect_type_param_suggestions( + self_ty, + *parent_pred, + &p, + ); + format!( + "`{}`\nwhich is required by `{}`", + p, parent_p + ) + } + }, + }, + *pred, + ) + }) + }) + .filter(|(_, pred)| !skip_list.contains(&pred)) + .map(|(t, _)| t) + .enumerate() + .collect::>(); + + for ((span, add_where_or_comma), obligations) in type_params.into_iter() { + restrict_type_params = true; + // #74886: Sort here so that the output is always the same. + let mut obligations = obligations.into_iter().collect::>(); + obligations.sort(); + err.span_suggestion_verbose( + span, + &format!( + "consider restricting the type parameter{s} to satisfy the \ + trait bound{s}", + s = pluralize!(obligations.len()) + ), + format!("{} {}", add_where_or_comma, obligations.join(", ")), + Applicability::MaybeIncorrect, + ); + } + + bound_list.sort_by(|(_, a), (_, b)| a.cmp(b)); // Sort alphabetically. + bound_list.dedup_by(|(_, a), (_, b)| a == b); // #35677 + bound_list.sort_by_key(|(pos, _)| *pos); // Keep the original predicate order. + + if !bound_list.is_empty() || !skip_list.is_empty() { + let bound_list = bound_list + .into_iter() + .map(|(_, path)| path) + .collect::>() + .join("\n"); + let actual_prefix = actual.prefix_string(self.tcx); + info!("unimplemented_traits.len() == {}", unimplemented_traits.len()); + let (primary_message, label) = + if unimplemented_traits.len() == 1 && unimplemented_traits_only { + unimplemented_traits + .into_iter() + .next() + .map(|(_, (trait_ref, obligation))| { + if trait_ref.self_ty().references_error() + || actual.references_error() + { + // Avoid crashing. + return (None, None); + } + let OnUnimplementedNote { message, label, .. } = + self.on_unimplemented_note(trait_ref, &obligation); + (message, label) + }) + .unwrap_or((None, None)) + } else { + (None, None) + }; + let primary_message = primary_message.unwrap_or_else(|| format!( + "the {item_kind} `{item_name}` exists for {actual_prefix} `{ty_str}`, but its trait bounds were not satisfied" + )); + err.set_primary_message(&primary_message); + if let Some(label) = label { + custom_span_label = true; + err.span_label(span, label); + } + if !bound_list.is_empty() { + err.note(&format!( + "the following trait bounds were not satisfied:\n{bound_list}" + )); + } + self.suggest_derive(&mut err, &unsatisfied_predicates); + + unsatisfied_bounds = true; + } + } + + let label_span_not_found = |err: &mut DiagnosticBuilder<'_, _>| { + if unsatisfied_predicates.is_empty() { + err.span_label(span, format!("{item_kind} not found in `{ty_str}`")); + let is_string_or_ref_str = match actual.kind() { + ty::Ref(_, ty, _) => { + ty.is_str() + || matches!( + ty.kind(), + ty::Adt(adt, _) if self.tcx.is_diagnostic_item(sym::String, adt.did()) + ) + } + ty::Adt(adt, _) => self.tcx.is_diagnostic_item(sym::String, adt.did()), + _ => false, + }; + if is_string_or_ref_str && item_name.name == sym::iter { + err.span_suggestion_verbose( + item_name.span, + "because of the in-memory representation of `&str`, to obtain \ + an `Iterator` over each of its codepoint use method `chars`", + "chars", + Applicability::MachineApplicable, + ); + } + if let ty::Adt(adt, _) = rcvr_ty.kind() { + let mut inherent_impls_candidate = self + .tcx + .inherent_impls(adt.did()) + .iter() + .copied() + .filter(|def_id| { + if let Some(assoc) = self.associated_value(*def_id, item_name) { + // Check for both mode is the same so we avoid suggesting + // incorrect associated item. + match (mode, assoc.fn_has_self_parameter, source) { + (Mode::MethodCall, true, SelfSource::MethodCall(_)) => { + // We check that the suggest type is actually + // different from the received one + // So we avoid suggestion method with Box + // for instance + self.tcx.at(span).type_of(*def_id) != actual + && self.tcx.at(span).type_of(*def_id) != rcvr_ty + } + (Mode::Path, false, _) => true, + _ => false, + } + } else { + false + } + }) + .collect::>(); + if !inherent_impls_candidate.is_empty() { + inherent_impls_candidate.sort(); + inherent_impls_candidate.dedup(); + + // number of type to shows at most. + let limit = if inherent_impls_candidate.len() == 5 { 5 } else { 4 }; + let type_candidates = inherent_impls_candidate + .iter() + .take(limit) + .map(|impl_item| { + format!("- `{}`", self.tcx.at(span).type_of(*impl_item)) + }) + .collect::>() + .join("\n"); + let additional_types = if inherent_impls_candidate.len() > limit { + format!( + "\nand {} more types", + inherent_impls_candidate.len() - limit + ) + } else { + "".to_string() + }; + err.note(&format!( + "the {item_kind} was found for\n{}{}", + type_candidates, additional_types + )); + } + } + } else { + err.span_label(span, format!("{item_kind} cannot be called on `{ty_str}` due to unsatisfied trait bounds")); + } + }; + + // If the method name is the name of a field with a function or closure type, + // give a helping note that it has to be called as `(x.f)(...)`. + if let SelfSource::MethodCall(expr) = source { + if !self.suggest_field_call(span, rcvr_ty, expr, item_name, &mut err) + && lev_candidate.is_none() + && !custom_span_label + { + label_span_not_found(&mut err); + } + } else if !custom_span_label { + label_span_not_found(&mut err); + } + + self.check_for_field_method(&mut err, source, span, actual, item_name); + + self.check_for_unwrap_self(&mut err, source, span, actual, item_name); + + bound_spans.sort(); + bound_spans.dedup(); + for (span, msg) in bound_spans.into_iter() { + err.span_label(span, &msg); + } + + if actual.is_numeric() && actual.is_fresh() || restrict_type_params { + } else { + self.suggest_traits_to_import( + &mut err, + span, + rcvr_ty, + item_name, + args.map(|args| args.len()), + source, + out_of_scope_traits, + &unsatisfied_predicates, + unsatisfied_bounds, + ); + } + + // Don't emit a suggestion if we found an actual method + // that had unsatisfied trait bounds + if unsatisfied_predicates.is_empty() && actual.is_enum() { + let adt_def = actual.ty_adt_def().expect("enum is not an ADT"); + if let Some(suggestion) = lev_distance::find_best_match_for_name( + &adt_def.variants().iter().map(|s| s.name).collect::>(), + item_name.name, + None, + ) { + err.span_suggestion( + span, + "there is a variant with a similar name", + suggestion, + Applicability::MaybeIncorrect, + ); + } + } + + if item_name.name == sym::as_str && actual.peel_refs().is_str() { + let msg = "remove this method call"; + let mut fallback_span = true; + if let SelfSource::MethodCall(expr) = source { + let call_expr = + self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id)); + if let Some(span) = call_expr.span.trim_start(expr.span) { + err.span_suggestion(span, msg, "", Applicability::MachineApplicable); + fallback_span = false; + } + } + if fallback_span { + err.span_label(span, msg); + } + } else if let Some(lev_candidate) = lev_candidate { + // Don't emit a suggestion if we found an actual method + // that had unsatisfied trait bounds + if unsatisfied_predicates.is_empty() { + let def_kind = lev_candidate.kind.as_def_kind(); + err.span_suggestion( + span, + &format!( + "there is {} {} with a similar name", + def_kind.article(), + def_kind.descr(lev_candidate.def_id), + ), + lev_candidate.name, + Applicability::MaybeIncorrect, + ); + } + } + + return Some(err); + } + + MethodError::Ambiguity(sources) => { + let mut err = struct_span_err!( + self.sess(), + item_name.span, + E0034, + "multiple applicable items in scope" + ); + err.span_label(item_name.span, format!("multiple `{}` found", item_name)); + + report_candidates(span, &mut err, sources, sugg_span); + err.emit(); + } + + MethodError::PrivateMatch(kind, def_id, out_of_scope_traits) => { + let kind = kind.descr(def_id); + let mut err = struct_span_err!( + self.tcx.sess, + item_name.span, + E0624, + "{} `{}` is private", + kind, + item_name + ); + err.span_label(item_name.span, &format!("private {}", kind)); + let sp = self + .tcx + .hir() + .span_if_local(def_id) + .unwrap_or_else(|| self.tcx.def_span(def_id)); + err.span_label(sp, &format!("private {} defined here", kind)); + self.suggest_valid_traits(&mut err, out_of_scope_traits); + err.emit(); + } + + MethodError::IllegalSizedBound(candidates, needs_mut, bound_span) => { + let msg = format!("the `{}` method cannot be invoked on a trait object", item_name); + let mut err = self.sess().struct_span_err(span, &msg); + err.span_label(bound_span, "this has a `Sized` requirement"); + if !candidates.is_empty() { + let help = format!( + "{an}other candidate{s} {were} found in the following trait{s}, perhaps \ + add a `use` for {one_of_them}:", + an = if candidates.len() == 1 { "an" } else { "" }, + s = pluralize!(candidates.len()), + were = pluralize!("was", candidates.len()), + one_of_them = if candidates.len() == 1 { "it" } else { "one_of_them" }, + ); + self.suggest_use_candidates(&mut err, help, candidates); + } + if let ty::Ref(region, t_type, mutability) = rcvr_ty.kind() { + if needs_mut { + let trait_type = self.tcx.mk_ref( + *region, + ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() }, + ); + err.note(&format!("you need `{}` instead of `{}`", trait_type, rcvr_ty)); + } + } + err.emit(); + } + + MethodError::BadReturnType => bug!("no return type expectations but got BadReturnType"), + } + None + } + + fn suggest_field_call( + &self, + span: Span, + rcvr_ty: Ty<'tcx>, + expr: &hir::Expr<'_>, + item_name: Ident, + err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>, + ) -> bool { + let tcx = self.tcx; + let field_receiver = self.autoderef(span, rcvr_ty).find_map(|(ty, _)| match ty.kind() { + ty::Adt(def, substs) if !def.is_enum() => { + let variant = &def.non_enum_variant(); + tcx.find_field_index(item_name, variant).map(|index| { + let field = &variant.fields[index]; + let field_ty = field.ty(tcx, substs); + (field, field_ty) + }) + } + _ => None, + }); + if let Some((field, field_ty)) = field_receiver { + let scope = tcx.parent_module(self.body_id).to_def_id(); + let is_accessible = field.vis.is_accessible_from(scope, tcx); + + if is_accessible { + if self.is_fn_ty(field_ty, span) { + let expr_span = expr.span.to(item_name.span); + err.multipart_suggestion( + &format!( + "to call the function stored in `{}`, \ + surround the field access with parentheses", + item_name, + ), + vec![ + (expr_span.shrink_to_lo(), '('.to_string()), + (expr_span.shrink_to_hi(), ')'.to_string()), + ], + Applicability::MachineApplicable, + ); + } else { + let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id)); + + if let Some(span) = call_expr.span.trim_start(item_name.span) { + err.span_suggestion( + span, + "remove the arguments", + "", + Applicability::MaybeIncorrect, + ); + } + } + } + + let field_kind = if is_accessible { "field" } else { "private field" }; + err.span_label(item_name.span, format!("{}, not a method", field_kind)); + return true; + } + false + } + + fn suggest_constraining_numerical_ty( + &self, + tcx: TyCtxt<'tcx>, + actual: Ty<'tcx>, + source: SelfSource<'_>, + span: Span, + item_kind: &str, + item_name: Ident, + ty_str: &str, + ) -> bool { + let found_candidate = all_traits(self.tcx) + .into_iter() + .any(|info| self.associated_value(info.def_id, item_name).is_some()); + let found_assoc = |ty: Ty<'tcx>| { + simplify_type(tcx, ty, TreatParams::AsInfer) + .and_then(|simp| { + tcx.incoherent_impls(simp) + .iter() + .find_map(|&id| self.associated_value(id, item_name)) + }) + .is_some() + }; + let found_candidate = found_candidate + || found_assoc(tcx.types.i8) + || found_assoc(tcx.types.i16) + || found_assoc(tcx.types.i32) + || found_assoc(tcx.types.i64) + || found_assoc(tcx.types.i128) + || found_assoc(tcx.types.u8) + || found_assoc(tcx.types.u16) + || found_assoc(tcx.types.u32) + || found_assoc(tcx.types.u64) + || found_assoc(tcx.types.u128) + || found_assoc(tcx.types.f32) + || found_assoc(tcx.types.f32); + if found_candidate + && actual.is_numeric() + && !actual.has_concrete_skeleton() + && let SelfSource::MethodCall(expr) = source + { + let mut err = struct_span_err!( + tcx.sess, + span, + E0689, + "can't call {} `{}` on ambiguous numeric type `{}`", + item_kind, + item_name, + ty_str + ); + let concrete_type = if actual.is_integral() { "i32" } else { "f32" }; + match expr.kind { + ExprKind::Lit(ref lit) => { + // numeric literal + let snippet = tcx + .sess + .source_map() + .span_to_snippet(lit.span) + .unwrap_or_else(|_| "".to_owned()); + + // If this is a floating point literal that ends with '.', + // get rid of it to stop this from becoming a member access. + let snippet = snippet.strip_suffix('.').unwrap_or(&snippet); + + err.span_suggestion( + lit.span, + &format!( + "you must specify a concrete type for this numeric value, \ + like `{}`", + concrete_type + ), + format!("{snippet}_{concrete_type}"), + Applicability::MaybeIncorrect, + ); + } + ExprKind::Path(QPath::Resolved(_, path)) => { + // local binding + if let hir::def::Res::Local(hir_id) = path.res { + let span = tcx.hir().span(hir_id); + let snippet = tcx.sess.source_map().span_to_snippet(span); + let filename = tcx.sess.source_map().span_to_filename(span); + + let parent_node = + self.tcx.hir().get(self.tcx.hir().get_parent_node(hir_id)); + let msg = format!( + "you must specify a type for this binding, like `{}`", + concrete_type, + ); + + match (filename, parent_node, snippet) { + ( + FileName::Real(_), + Node::Local(hir::Local { + source: hir::LocalSource::Normal, + ty, + .. + }), + Ok(ref snippet), + ) => { + err.span_suggestion( + // account for `let x: _ = 42;` + // ^^^^ + span.to(ty.as_ref().map(|ty| ty.span).unwrap_or(span)), + &msg, + format!("{}: {}", snippet, concrete_type), + Applicability::MaybeIncorrect, + ); + } + _ => { + err.span_label(span, msg); + } + } + } + } + _ => {} + } + err.emit(); + return true; + } + false + } + + fn check_for_field_method( + &self, + err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>, + source: SelfSource<'tcx>, + span: Span, + actual: Ty<'tcx>, + item_name: Ident, + ) { + if let SelfSource::MethodCall(expr) = source + && let Some((fields, substs)) = self.get_field_candidates(span, actual) + { + let call_expr = self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id)); + for candidate_field in fields.iter() { + if let Some(field_path) = self.check_for_nested_field_satisfying( + span, + &|_, field_ty| { + self.lookup_probe( + span, + item_name, + field_ty, + call_expr, + ProbeScope::AllTraits, + ) + .is_ok() + }, + candidate_field, + substs, + vec![], + self.tcx.parent_module(expr.hir_id).to_def_id(), + ) { + let field_path_str = field_path + .iter() + .map(|id| id.name.to_ident_string()) + .collect::>() + .join("."); + debug!("field_path_str: {:?}", field_path_str); + + err.span_suggestion_verbose( + item_name.span.shrink_to_lo(), + "one of the expressions' fields has a method of the same name", + format!("{field_path_str}."), + Applicability::MaybeIncorrect, + ); + } + } + } + } + + fn check_for_unwrap_self( + &self, + err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>, + source: SelfSource<'tcx>, + span: Span, + actual: Ty<'tcx>, + item_name: Ident, + ) { + let tcx = self.tcx; + let SelfSource::MethodCall(expr) = source else { return; }; + let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id)); + + let ty::Adt(kind, substs) = actual.kind() else { return; }; + if !kind.is_enum() { + return; + } + + let matching_variants: Vec<_> = kind + .variants() + .iter() + .flat_map(|variant| { + let [field] = &variant.fields[..] else { return None; }; + let field_ty = field.ty(tcx, substs); + + // Skip `_`, since that'll just lead to ambiguity. + if self.resolve_vars_if_possible(field_ty).is_ty_var() { + return None; + } + + self.lookup_probe(span, item_name, field_ty, call_expr, ProbeScope::AllTraits) + .ok() + .map(|pick| (variant, field, pick)) + }) + .collect(); + + let ret_ty_matches = |diagnostic_item| { + if let Some(ret_ty) = self + .ret_coercion + .as_ref() + .map(|c| self.resolve_vars_if_possible(c.borrow().expected_ty())) + && let ty::Adt(kind, _) = ret_ty.kind() + && tcx.get_diagnostic_item(diagnostic_item) == Some(kind.did()) + { + true + } else { + false + } + }; + + match &matching_variants[..] { + [(_, field, pick)] => { + let self_ty = field.ty(tcx, substs); + err.span_note( + tcx.def_span(pick.item.def_id), + &format!("the method `{item_name}` exists on the type `{self_ty}`"), + ); + let (article, kind, variant, question) = + if Some(kind.did()) == tcx.get_diagnostic_item(sym::Result) { + ("a", "Result", "Err", ret_ty_matches(sym::Result)) + } else if Some(kind.did()) == tcx.get_diagnostic_item(sym::Option) { + ("an", "Option", "None", ret_ty_matches(sym::Option)) + } else { + return; + }; + if question { + err.span_suggestion_verbose( + expr.span.shrink_to_hi(), + format!( + "use the `?` operator to extract the `{self_ty}` value, propagating \ + {article} `{kind}::{variant}` value to the caller" + ), + "?", + Applicability::MachineApplicable, + ); + } else { + err.span_suggestion_verbose( + expr.span.shrink_to_hi(), + format!( + "consider using `{kind}::expect` to unwrap the `{self_ty}` value, \ + panicking if the value is {article} `{kind}::{variant}`" + ), + ".expect(\"REASON\")", + Applicability::HasPlaceholders, + ); + } + } + // FIXME(compiler-errors): Support suggestions for other matching enum variants + _ => {} + } + } + + pub(crate) fn note_unmet_impls_on_type( + &self, + err: &mut Diagnostic, + errors: Vec>, + ) { + let all_local_types_needing_impls = + errors.iter().all(|e| match e.obligation.predicate.kind().skip_binder() { + ty::PredicateKind::Trait(pred) => match pred.self_ty().kind() { + ty::Adt(def, _) => def.did().is_local(), + _ => false, + }, + _ => false, + }); + let mut preds: Vec<_> = errors + .iter() + .filter_map(|e| match e.obligation.predicate.kind().skip_binder() { + ty::PredicateKind::Trait(pred) => Some(pred), + _ => None, + }) + .collect(); + preds.sort_by_key(|pred| (pred.def_id(), pred.self_ty())); + let def_ids = preds + .iter() + .filter_map(|pred| match pred.self_ty().kind() { + ty::Adt(def, _) => Some(def.did()), + _ => None, + }) + .collect::>(); + let mut spans: MultiSpan = def_ids + .iter() + .filter_map(|def_id| { + let span = self.tcx.def_span(*def_id); + if span.is_dummy() { None } else { Some(span) } + }) + .collect::>() + .into(); + + for pred in &preds { + match pred.self_ty().kind() { + ty::Adt(def, _) if def.did().is_local() => { + spans.push_span_label( + self.tcx.def_span(def.did()), + format!("must implement `{}`", pred.trait_ref.print_only_trait_path()), + ); + } + _ => {} + } + } + + if all_local_types_needing_impls && spans.primary_span().is_some() { + let msg = if preds.len() == 1 { + format!( + "an implementation of `{}` might be missing for `{}`", + preds[0].trait_ref.print_only_trait_path(), + preds[0].self_ty() + ) + } else { + format!( + "the following type{} would have to `impl` {} required trait{} for this \ + operation to be valid", + pluralize!(def_ids.len()), + if def_ids.len() == 1 { "its" } else { "their" }, + pluralize!(preds.len()), + ) + }; + err.span_note(spans, &msg); + } + + let preds: Vec<_> = errors + .iter() + .map(|e| (e.obligation.predicate, None, Some(e.obligation.cause.clone()))) + .collect(); + self.suggest_derive(err, &preds); + } + + fn suggest_derive( + &self, + err: &mut Diagnostic, + unsatisfied_predicates: &[( + ty::Predicate<'tcx>, + Option>, + Option>, + )], + ) { + let mut derives = Vec::<(String, Span, Symbol)>::new(); + let mut traits = Vec::::new(); + for (pred, _, _) in unsatisfied_predicates { + let ty::PredicateKind::Trait(trait_pred) = pred.kind().skip_binder() else { continue }; + let adt = match trait_pred.self_ty().ty_adt_def() { + Some(adt) if adt.did().is_local() => adt, + _ => continue, + }; + if let Some(diagnostic_name) = self.tcx.get_diagnostic_name(trait_pred.def_id()) { + let can_derive = match diagnostic_name { + sym::Default => !adt.is_enum(), + sym::Eq + | sym::PartialEq + | sym::Ord + | sym::PartialOrd + | sym::Clone + | sym::Copy + | sym::Hash + | sym::Debug => true, + _ => false, + }; + if can_derive { + let self_name = trait_pred.self_ty().to_string(); + let self_span = self.tcx.def_span(adt.did()); + if let Some(poly_trait_ref) = pred.to_opt_poly_trait_pred() { + for super_trait in supertraits(self.tcx, poly_trait_ref.to_poly_trait_ref()) + { + if let Some(parent_diagnostic_name) = + self.tcx.get_diagnostic_name(super_trait.def_id()) + { + derives.push(( + self_name.clone(), + self_span, + parent_diagnostic_name, + )); + } + } + } + derives.push((self_name, self_span, diagnostic_name)); + } else { + traits.push(self.tcx.def_span(trait_pred.def_id())); + } + } else { + traits.push(self.tcx.def_span(trait_pred.def_id())); + } + } + traits.sort(); + traits.dedup(); + + derives.sort(); + derives.dedup(); + + let mut derives_grouped = Vec::<(String, Span, String)>::new(); + for (self_name, self_span, trait_name) in derives.into_iter() { + if let Some((last_self_name, _, ref mut last_trait_names)) = derives_grouped.last_mut() + { + if last_self_name == &self_name { + last_trait_names.push_str(format!(", {}", trait_name).as_str()); + continue; + } + } + derives_grouped.push((self_name, self_span, trait_name.to_string())); + } + + let len = traits.len(); + if len > 0 { + let span: MultiSpan = traits.into(); + err.span_note( + span, + &format!("the following trait{} must be implemented", pluralize!(len),), + ); + } + + for (self_name, self_span, traits) in &derives_grouped { + err.span_suggestion_verbose( + self_span.shrink_to_lo(), + &format!("consider annotating `{}` with `#[derive({})]`", self_name, traits), + format!("#[derive({})]\n", traits), + Applicability::MaybeIncorrect, + ); + } + } + + /// Print out the type for use in value namespace. + fn ty_to_value_string(&self, ty: Ty<'tcx>) -> String { + match ty.kind() { + ty::Adt(def, substs) => format!("{}", ty::Instance::new(def.did(), substs)), + _ => self.ty_to_string(ty), + } + } + + fn suggest_await_before_method( + &self, + err: &mut Diagnostic, + item_name: Ident, + ty: Ty<'tcx>, + call: &hir::Expr<'_>, + span: Span, + ) { + let output_ty = match self.get_impl_future_output_ty(ty) { + Some(output_ty) => self.resolve_vars_if_possible(output_ty).skip_binder(), + _ => return, + }; + let method_exists = self.method_exists(item_name, output_ty, call.hir_id, true); + debug!("suggest_await_before_method: is_method_exist={}", method_exists); + if method_exists { + err.span_suggestion_verbose( + span.shrink_to_lo(), + "consider `await`ing on the `Future` and calling the method on its `Output`", + "await.", + Applicability::MaybeIncorrect, + ); + } + } + + fn suggest_use_candidates(&self, err: &mut Diagnostic, msg: String, candidates: Vec) { + let parent_map = self.tcx.visible_parent_map(()); + + // Separate out candidates that must be imported with a glob, because they are named `_` + // and cannot be referred with their identifier. + let (candidates, globs): (Vec<_>, Vec<_>) = candidates.into_iter().partition(|trait_did| { + if let Some(parent_did) = parent_map.get(trait_did) { + // If the item is re-exported as `_`, we should suggest a glob-import instead. + if *parent_did != self.tcx.parent(*trait_did) + && self + .tcx + .module_children(*parent_did) + .iter() + .filter(|child| child.res.opt_def_id() == Some(*trait_did)) + .all(|child| child.ident.name == kw::Underscore) + { + return false; + } + } + + true + }); + + let module_did = self.tcx.parent_module(self.body_id); + let (module, _, _) = self.tcx.hir().get_module(module_did); + let span = module.spans.inject_use_span; + + let path_strings = candidates.iter().map(|trait_did| { + format!("use {};\n", with_crate_prefix!(self.tcx.def_path_str(*trait_did)),) + }); + + let glob_path_strings = globs.iter().map(|trait_did| { + let parent_did = parent_map.get(trait_did).unwrap(); + format!( + "use {}::*; // trait {}\n", + with_crate_prefix!(self.tcx.def_path_str(*parent_did)), + self.tcx.item_name(*trait_did), + ) + }); + + err.span_suggestions( + span, + &msg, + path_strings.chain(glob_path_strings), + Applicability::MaybeIncorrect, + ); + } + + fn suggest_valid_traits( + &self, + err: &mut Diagnostic, + valid_out_of_scope_traits: Vec, + ) -> bool { + if !valid_out_of_scope_traits.is_empty() { + let mut candidates = valid_out_of_scope_traits; + candidates.sort(); + candidates.dedup(); + + // `TryFrom` and `FromIterator` have no methods + let edition_fix = candidates + .iter() + .find(|did| self.tcx.is_diagnostic_item(sym::TryInto, **did)) + .copied(); + + err.help("items from traits can only be used if the trait is in scope"); + let msg = format!( + "the following {traits_are} implemented but not in scope; \ + perhaps add a `use` for {one_of_them}:", + traits_are = if candidates.len() == 1 { "trait is" } else { "traits are" }, + one_of_them = if candidates.len() == 1 { "it" } else { "one of them" }, + ); + + self.suggest_use_candidates(err, msg, candidates); + if let Some(did) = edition_fix { + err.note(&format!( + "'{}' is included in the prelude starting in Edition 2021", + with_crate_prefix!(self.tcx.def_path_str(did)) + )); + } + + true + } else { + false + } + } + + fn suggest_traits_to_import( + &self, + err: &mut Diagnostic, + span: Span, + rcvr_ty: Ty<'tcx>, + item_name: Ident, + inputs_len: Option, + source: SelfSource<'tcx>, + valid_out_of_scope_traits: Vec, + unsatisfied_predicates: &[( + ty::Predicate<'tcx>, + Option>, + Option>, + )], + unsatisfied_bounds: bool, + ) { + let mut alt_rcvr_sugg = false; + if let (SelfSource::MethodCall(rcvr), false) = (source, unsatisfied_bounds) { + debug!(?span, ?item_name, ?rcvr_ty, ?rcvr); + let skippable = [ + self.tcx.lang_items().clone_trait(), + self.tcx.lang_items().deref_trait(), + self.tcx.lang_items().deref_mut_trait(), + self.tcx.lang_items().drop_trait(), + self.tcx.get_diagnostic_item(sym::AsRef), + ]; + // Try alternative arbitrary self types that could fulfill this call. + // FIXME: probe for all types that *could* be arbitrary self-types, not + // just this list. + for (rcvr_ty, post) in &[ + (rcvr_ty, ""), + (self.tcx.mk_mut_ref(self.tcx.lifetimes.re_erased, rcvr_ty), "&mut "), + (self.tcx.mk_imm_ref(self.tcx.lifetimes.re_erased, rcvr_ty), "&"), + ] { + match self.lookup_probe(span, item_name, *rcvr_ty, rcvr, ProbeScope::AllTraits) { + Ok(pick) => { + // If the method is defined for the receiver we have, it likely wasn't `use`d. + // We point at the method, but we just skip the rest of the check for arbitrary + // self types and rely on the suggestion to `use` the trait from + // `suggest_valid_traits`. + let did = Some(pick.item.container_id(self.tcx)); + let skip = skippable.contains(&did); + if pick.autoderefs == 0 && !skip { + err.span_label( + pick.item.ident(self.tcx).span, + &format!("the method is available for `{}` here", rcvr_ty), + ); + } + break; + } + Err(MethodError::Ambiguity(_)) => { + // If the method is defined (but ambiguous) for the receiver we have, it is also + // likely we haven't `use`d it. It may be possible that if we `Box`/`Pin`/etc. + // the receiver, then it might disambiguate this method, but I think these + // suggestions are generally misleading (see #94218). + break; + } + _ => {} + } + + for (rcvr_ty, pre) in &[ + (self.tcx.mk_lang_item(*rcvr_ty, LangItem::OwnedBox), "Box::new"), + (self.tcx.mk_lang_item(*rcvr_ty, LangItem::Pin), "Pin::new"), + (self.tcx.mk_diagnostic_item(*rcvr_ty, sym::Arc), "Arc::new"), + (self.tcx.mk_diagnostic_item(*rcvr_ty, sym::Rc), "Rc::new"), + ] { + if let Some(new_rcvr_t) = *rcvr_ty + && let Ok(pick) = self.lookup_probe( + span, + item_name, + new_rcvr_t, + rcvr, + ProbeScope::AllTraits, + ) + { + debug!("try_alt_rcvr: pick candidate {:?}", pick); + let did = Some(pick.item.container_id(self.tcx)); + // We don't want to suggest a container type when the missing + // method is `.clone()` or `.deref()` otherwise we'd suggest + // `Arc::new(foo).clone()`, which is far from what the user wants. + // Explicitly ignore the `Pin::as_ref()` method as `Pin` does not + // implement the `AsRef` trait. + let skip = skippable.contains(&did) + || (("Pin::new" == *pre) && (sym::as_ref == item_name.name)) + || inputs_len.map_or(false, |inputs_len| pick.item.kind == ty::AssocKind::Fn && self.tcx.fn_sig(pick.item.def_id).skip_binder().inputs().len() != inputs_len); + // Make sure the method is defined for the *actual* receiver: we don't + // want to treat `Box` as a receiver if it only works because of + // an autoderef to `&self` + if pick.autoderefs == 0 && !skip { + err.span_label( + pick.item.ident(self.tcx).span, + &format!("the method is available for `{}` here", new_rcvr_t), + ); + err.multipart_suggestion( + "consider wrapping the receiver expression with the \ + appropriate type", + vec![ + (rcvr.span.shrink_to_lo(), format!("{}({}", pre, post)), + (rcvr.span.shrink_to_hi(), ")".to_string()), + ], + Applicability::MaybeIncorrect, + ); + // We don't care about the other suggestions. + alt_rcvr_sugg = true; + } + } + } + } + } + if self.suggest_valid_traits(err, valid_out_of_scope_traits) { + return; + } + + let type_is_local = self.type_derefs_to_local(span, rcvr_ty, source); + + let mut arbitrary_rcvr = vec![]; + // There are no traits implemented, so lets suggest some traits to + // implement, by finding ones that have the item name, and are + // legal to implement. + let mut candidates = all_traits(self.tcx) + .into_iter() + // Don't issue suggestions for unstable traits since they're + // unlikely to be implementable anyway + .filter(|info| match self.tcx.lookup_stability(info.def_id) { + Some(attr) => attr.level.is_stable(), + None => true, + }) + .filter(|info| { + // We approximate the coherence rules to only suggest + // traits that are legal to implement by requiring that + // either the type or trait is local. Multi-dispatch means + // this isn't perfect (that is, there are cases when + // implementing a trait would be legal but is rejected + // here). + unsatisfied_predicates.iter().all(|(p, _, _)| { + match p.kind().skip_binder() { + // Hide traits if they are present in predicates as they can be fixed without + // having to implement them. + ty::PredicateKind::Trait(t) => t.def_id() == info.def_id, + ty::PredicateKind::Projection(p) => { + p.projection_ty.item_def_id == info.def_id + } + _ => false, + } + }) && (type_is_local || info.def_id.is_local()) + && self + .associated_value(info.def_id, item_name) + .filter(|item| { + if let ty::AssocKind::Fn = item.kind { + let id = item + .def_id + .as_local() + .map(|def_id| self.tcx.hir().local_def_id_to_hir_id(def_id)); + if let Some(hir::Node::TraitItem(hir::TraitItem { + kind: hir::TraitItemKind::Fn(fn_sig, method), + .. + })) = id.map(|id| self.tcx.hir().get(id)) + { + let self_first_arg = match method { + hir::TraitFn::Required([ident, ..]) => { + ident.name == kw::SelfLower + } + hir::TraitFn::Provided(body_id) => { + self.tcx.hir().body(*body_id).params.first().map_or( + false, + |param| { + matches!( + param.pat.kind, + hir::PatKind::Binding(_, _, ident, _) + if ident.name == kw::SelfLower + ) + }, + ) + } + _ => false, + }; + + if !fn_sig.decl.implicit_self.has_implicit_self() + && self_first_arg + { + if let Some(ty) = fn_sig.decl.inputs.get(0) { + arbitrary_rcvr.push(ty.span); + } + return false; + } + } + } + // We only want to suggest public or local traits (#45781). + item.visibility(self.tcx).is_public() || info.def_id.is_local() + }) + .is_some() + }) + .collect::>(); + for span in &arbitrary_rcvr { + err.span_label( + *span, + "the method might not be found because of this arbitrary self type", + ); + } + if alt_rcvr_sugg { + return; + } + + if !candidates.is_empty() { + // Sort from most relevant to least relevant. + candidates.sort_by(|a, b| a.cmp(b).reverse()); + candidates.dedup(); + + let param_type = match rcvr_ty.kind() { + ty::Param(param) => Some(param), + ty::Ref(_, ty, _) => match ty.kind() { + ty::Param(param) => Some(param), + _ => None, + }, + _ => None, + }; + err.help(if param_type.is_some() { + "items from traits can only be used if the type parameter is bounded by the trait" + } else { + "items from traits can only be used if the trait is implemented and in scope" + }); + let candidates_len = candidates.len(); + let message = |action| { + format!( + "the following {traits_define} an item `{name}`, perhaps you need to {action} \ + {one_of_them}:", + traits_define = + if candidates_len == 1 { "trait defines" } else { "traits define" }, + action = action, + one_of_them = if candidates_len == 1 { "it" } else { "one of them" }, + name = item_name, + ) + }; + // Obtain the span for `param` and use it for a structured suggestion. + if let Some(param) = param_type { + let generics = self.tcx.generics_of(self.body_id.owner.to_def_id()); + let type_param = generics.type_param(param, self.tcx); + let hir = self.tcx.hir(); + if let Some(def_id) = type_param.def_id.as_local() { + let id = hir.local_def_id_to_hir_id(def_id); + // Get the `hir::Param` to verify whether it already has any bounds. + // We do this to avoid suggesting code that ends up as `T: FooBar`, + // instead we suggest `T: Foo + Bar` in that case. + match hir.get(id) { + Node::GenericParam(param) => { + enum Introducer { + Plus, + Colon, + Nothing, + } + let ast_generics = hir.get_generics(id.owner).unwrap(); + let (sp, mut introducer) = if let Some(span) = + ast_generics.bounds_span_for_suggestions(def_id) + { + (span, Introducer::Plus) + } else if let Some(colon_span) = param.colon_span { + (colon_span.shrink_to_hi(), Introducer::Nothing) + } else { + (param.span.shrink_to_hi(), Introducer::Colon) + }; + if matches!( + param.kind, + hir::GenericParamKind::Type { synthetic: true, .. }, + ) { + introducer = Introducer::Plus + } + let trait_def_ids: FxHashSet = ast_generics + .bounds_for_param(def_id) + .flat_map(|bp| bp.bounds.iter()) + .filter_map(|bound| bound.trait_ref()?.trait_def_id()) + .collect(); + if !candidates.iter().any(|t| trait_def_ids.contains(&t.def_id)) { + err.span_suggestions( + sp, + &message(format!( + "restrict type parameter `{}` with", + param.name.ident(), + )), + candidates.iter().map(|t| { + format!( + "{} {}", + match introducer { + Introducer::Plus => " +", + Introducer::Colon => ":", + Introducer::Nothing => "", + }, + self.tcx.def_path_str(t.def_id), + ) + }), + Applicability::MaybeIncorrect, + ); + } + return; + } + Node::Item(hir::Item { + kind: hir::ItemKind::Trait(.., bounds, _), + ident, + .. + }) => { + let (sp, sep, article) = if bounds.is_empty() { + (ident.span.shrink_to_hi(), ":", "a") + } else { + (bounds.last().unwrap().span().shrink_to_hi(), " +", "another") + }; + err.span_suggestions( + sp, + &message(format!("add {} supertrait for", article)), + candidates.iter().map(|t| { + format!("{} {}", sep, self.tcx.def_path_str(t.def_id),) + }), + Applicability::MaybeIncorrect, + ); + return; + } + _ => {} + } + } + } + + let (potential_candidates, explicitly_negative) = if param_type.is_some() { + // FIXME: Even though negative bounds are not implemented, we could maybe handle + // cases where a positive bound implies a negative impl. + (candidates, Vec::new()) + } else if let Some(simp_rcvr_ty) = + simplify_type(self.tcx, rcvr_ty, TreatParams::AsPlaceholder) + { + let mut potential_candidates = Vec::new(); + let mut explicitly_negative = Vec::new(); + for candidate in candidates { + // Check if there's a negative impl of `candidate` for `rcvr_ty` + if self + .tcx + .all_impls(candidate.def_id) + .filter(|imp_did| { + self.tcx.impl_polarity(*imp_did) == ty::ImplPolarity::Negative + }) + .any(|imp_did| { + let imp = self.tcx.impl_trait_ref(imp_did).unwrap(); + let imp_simp = + simplify_type(self.tcx, imp.self_ty(), TreatParams::AsPlaceholder); + imp_simp.map_or(false, |s| s == simp_rcvr_ty) + }) + { + explicitly_negative.push(candidate); + } else { + potential_candidates.push(candidate); + } + } + (potential_candidates, explicitly_negative) + } else { + // We don't know enough about `recv_ty` to make proper suggestions. + (candidates, Vec::new()) + }; + + let action = if let Some(param) = param_type { + format!("restrict type parameter `{}` with", param) + } else { + // FIXME: it might only need to be imported into scope, not implemented. + "implement".to_string() + }; + match &potential_candidates[..] { + [] => {} + [trait_info] if trait_info.def_id.is_local() => { + err.span_note( + self.tcx.def_span(trait_info.def_id), + &format!( + "`{}` defines an item `{}`, perhaps you need to {} it", + self.tcx.def_path_str(trait_info.def_id), + item_name, + action + ), + ); + } + trait_infos => { + let mut msg = message(action); + for (i, trait_info) in trait_infos.iter().enumerate() { + msg.push_str(&format!( + "\ncandidate #{}: `{}`", + i + 1, + self.tcx.def_path_str(trait_info.def_id), + )); + } + err.note(&msg); + } + } + match &explicitly_negative[..] { + [] => {} + [trait_info] => { + let msg = format!( + "the trait `{}` defines an item `{}`, but is explicitly unimplemented", + self.tcx.def_path_str(trait_info.def_id), + item_name + ); + err.note(&msg); + } + trait_infos => { + let mut msg = format!( + "the following traits define an item `{}`, but are explicitly unimplemented:", + item_name + ); + for trait_info in trait_infos { + msg.push_str(&format!("\n{}", self.tcx.def_path_str(trait_info.def_id))); + } + err.note(&msg); + } + } + } + } + + /// Checks whether there is a local type somewhere in the chain of + /// autoderefs of `rcvr_ty`. + fn type_derefs_to_local( + &self, + span: Span, + rcvr_ty: Ty<'tcx>, + source: SelfSource<'tcx>, + ) -> bool { + fn is_local(ty: Ty<'_>) -> bool { + match ty.kind() { + ty::Adt(def, _) => def.did().is_local(), + ty::Foreign(did) => did.is_local(), + ty::Dynamic(tr, ..) => tr.principal().map_or(false, |d| d.def_id().is_local()), + ty::Param(_) => true, + + // Everything else (primitive types, etc.) is effectively + // non-local (there are "edge" cases, e.g., `(LocalType,)`, but + // the noise from these sort of types is usually just really + // annoying, rather than any sort of help). + _ => false, + } + } + + // This occurs for UFCS desugaring of `T::method`, where there is no + // receiver expression for the method call, and thus no autoderef. + if let SelfSource::QPath(_) = source { + return is_local(self.resolve_vars_with_obligations(rcvr_ty)); + } + + self.autoderef(span, rcvr_ty).any(|(ty, _)| is_local(ty)) + } +} + +#[derive(Copy, Clone, Debug)] +pub enum SelfSource<'a> { + QPath(&'a hir::Ty<'a>), + MethodCall(&'a hir::Expr<'a> /* rcvr */), +} + +#[derive(Copy, Clone)] +pub struct TraitInfo { + pub def_id: DefId, +} + +impl PartialEq for TraitInfo { + fn eq(&self, other: &TraitInfo) -> bool { + self.cmp(other) == Ordering::Equal + } +} +impl Eq for TraitInfo {} +impl PartialOrd for TraitInfo { + fn partial_cmp(&self, other: &TraitInfo) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for TraitInfo { + fn cmp(&self, other: &TraitInfo) -> Ordering { + // Local crates are more important than remote ones (local: + // `cnum == 0`), and otherwise we throw in the defid for totality. + + let lhs = (other.def_id.krate, other.def_id); + let rhs = (self.def_id.krate, self.def_id); + lhs.cmp(&rhs) + } +} + +/// Retrieves all traits in this crate and any dependent crates, +/// and wraps them into `TraitInfo` for custom sorting. +pub fn all_traits(tcx: TyCtxt<'_>) -> Vec { + tcx.all_traits().map(|def_id| TraitInfo { def_id }).collect() +} + +fn print_disambiguation_help<'tcx>( + item_name: Ident, + args: Option<&'tcx [hir::Expr<'tcx>]>, + err: &mut Diagnostic, + trait_name: String, + rcvr_ty: Ty<'_>, + kind: ty::AssocKind, + def_id: DefId, + span: Span, + candidate: Option, + source_map: &source_map::SourceMap, + fn_has_self_parameter: bool, +) { + let mut applicability = Applicability::MachineApplicable; + let (span, sugg) = if let (ty::AssocKind::Fn, Some(args)) = (kind, args) { + let args = format!( + "({}{})", + if rcvr_ty.is_region_ptr() { + if rcvr_ty.is_mutable_ptr() { "&mut " } else { "&" } + } else { + "" + }, + args.iter() + .map(|arg| source_map.span_to_snippet(arg.span).unwrap_or_else(|_| { + applicability = Applicability::HasPlaceholders; + "_".to_owned() + })) + .collect::>() + .join(", "), + ); + let trait_name = if !fn_has_self_parameter { + format!("<{} as {}>", rcvr_ty, trait_name) + } else { + trait_name + }; + (span, format!("{}::{}{}", trait_name, item_name, args)) + } else { + (span.with_hi(item_name.span.lo()), format!("<{} as {}>::", rcvr_ty, trait_name)) + }; + err.span_suggestion_verbose( + span, + &format!( + "disambiguate the {} for {}", + kind.as_def_kind().descr(def_id), + if let Some(candidate) = candidate { + format!("candidate #{}", candidate) + } else { + "the candidate".to_string() + }, + ), + sugg, + applicability, + ); +} diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs new file mode 100644 index 000000000..17c2e4868 --- /dev/null +++ b/compiler/rustc_typeck/src/check/mod.rs @@ -0,0 +1,970 @@ +/*! + +# typeck: check phase + +Within the check phase of type check, we check each item one at a time +(bodies of function expressions are checked as part of the containing +function). Inference is used to supply types wherever they are unknown. + +By far the most complex case is checking the body of a function. This +can be broken down into several distinct phases: + +- gather: creates type variables to represent the type of each local + variable and pattern binding. + +- main: the main pass does the lion's share of the work: it + determines the types of all expressions, resolves + methods, checks for most invalid conditions, and so forth. In + some cases, where a type is unknown, it may create a type or region + variable and use that as the type of an expression. + + In the process of checking, various constraints will be placed on + these type variables through the subtyping relationships requested + through the `demand` module. The `infer` module is in charge + of resolving those constraints. + +- regionck: after main is complete, the regionck pass goes over all + types looking for regions and making sure that they did not escape + into places where they are not in scope. This may also influence the + final assignments of the various region variables if there is some + flexibility. + +- writeback: writes the final types within a function body, replacing + type variables with their final inferred types. These final types + are written into the `tcx.node_types` table, which should *never* contain + any reference to a type variable. + +## Intermediate types + +While type checking a function, the intermediate types for the +expressions, blocks, and so forth contained within the function are +stored in `fcx.node_types` and `fcx.node_substs`. These types +may contain unresolved type variables. After type checking is +complete, the functions in the writeback module are used to take the +types from this table, resolve them, and then write them into their +permanent home in the type context `tcx`. + +This means that during inferencing you should use `fcx.write_ty()` +and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of +nodes within the function. + +The types of top-level items, which never contain unbound type +variables, are stored directly into the `tcx` typeck_results. + +N.B., a type variable is not the same thing as a type parameter. A +type variable is an instance of a type parameter. That is, +given a generic function `fn foo(t: T)`, while checking the +function `foo`, the type `ty_param(0)` refers to the type `T`, which +is treated in abstract. However, when `foo()` is called, `T` will be +substituted for a fresh type variable `N`. This variable will +eventually be resolved to some concrete type (which might itself be +a type parameter). + +*/ + +pub mod _match; +mod autoderef; +mod callee; +pub mod cast; +mod check; +mod closure; +pub mod coercion; +mod compare_method; +pub mod demand; +mod diverges; +pub mod dropck; +mod expectation; +mod expr; +mod fallback; +mod fn_ctxt; +mod gather_locals; +mod generator_interior; +mod inherited; +pub mod intrinsic; +mod intrinsicck; +pub mod method; +mod op; +mod pat; +mod place_op; +mod region; +pub mod regionck; +pub mod rvalue_scopes; +mod upvar; +pub mod wfcheck; +pub mod writeback; + +use check::{check_abi, check_fn, check_mod_item_types}; +pub use diverges::Diverges; +pub use expectation::Expectation; +pub use fn_ctxt::*; +use hir::def::CtorOf; +pub use inherited::{Inherited, InheritedBuilder}; + +use crate::astconv::AstConv; +use crate::check::gather_locals::GatherLocalsVisitor; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::{ + pluralize, struct_span_err, Applicability, DiagnosticBuilder, EmissionGuarantee, MultiSpan, +}; +use rustc_hir as hir; +use rustc_hir::def::Res; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::intravisit::Visitor; +use rustc_hir::{HirIdMap, ImplicitSelfKind, Node}; +use rustc_index::bit_set::BitSet; +use rustc_index::vec::Idx; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef}; +use rustc_middle::ty::{self, Ty, TyCtxt, UserType}; +use rustc_session::config; +use rustc_session::parse::feature_err; +use rustc_session::Session; +use rustc_span::source_map::DUMMY_SP; +use rustc_span::symbol::{kw, Ident}; +use rustc_span::{self, BytePos, Span}; +use rustc_target::abi::VariantIdx; +use rustc_target::spec::abi::Abi; +use rustc_trait_selection::traits; +use rustc_trait_selection::traits::error_reporting::recursive_type_with_infinite_size_error; +use rustc_trait_selection::traits::error_reporting::suggestions::ReturnsVisitor; +use std::cell::RefCell; + +use crate::require_c_abi_if_c_variadic; +use crate::util::common::indenter; + +use self::coercion::DynamicCoerceMany; +use self::region::region_scope_tree; +pub use self::Expectation::*; + +#[macro_export] +macro_rules! type_error_struct { + ($session:expr, $span:expr, $typ:expr, $code:ident, $($message:tt)*) => ({ + let mut err = rustc_errors::struct_span_err!($session, $span, $code, $($message)*); + + if $typ.references_error() { + err.downgrade_to_delayed_bug(); + } + + err + }) +} + +/// The type of a local binding, including the revealed type for anon types. +#[derive(Copy, Clone, Debug)] +pub struct LocalTy<'tcx> { + decl_ty: Ty<'tcx>, + revealed_ty: Ty<'tcx>, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Needs { + MutPlace, + None, +} + +impl Needs { + fn maybe_mut_place(m: hir::Mutability) -> Self { + match m { + hir::Mutability::Mut => Needs::MutPlace, + hir::Mutability::Not => Needs::None, + } + } +} + +#[derive(Copy, Clone)] +pub struct UnsafetyState { + pub def: hir::HirId, + pub unsafety: hir::Unsafety, + from_fn: bool, +} + +impl UnsafetyState { + pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState { + UnsafetyState { def, unsafety, from_fn: true } + } + + pub fn recurse(self, blk: &hir::Block<'_>) -> UnsafetyState { + use hir::BlockCheckMode; + match self.unsafety { + // If this unsafe, then if the outer function was already marked as + // unsafe we shouldn't attribute the unsafe'ness to the block. This + // way the block can be warned about instead of ignoring this + // extraneous block (functions are never warned about). + hir::Unsafety::Unsafe if self.from_fn => self, + + unsafety => { + let (unsafety, def) = match blk.rules { + BlockCheckMode::UnsafeBlock(..) => (hir::Unsafety::Unsafe, blk.hir_id), + BlockCheckMode::DefaultBlock => (unsafety, self.def), + }; + UnsafetyState { def, unsafety, from_fn: false } + } + } + } +} + +#[derive(Debug, Copy, Clone)] +pub enum PlaceOp { + Deref, + Index, +} + +pub struct BreakableCtxt<'tcx> { + may_break: bool, + + // this is `null` for loops where break with a value is illegal, + // such as `while`, `for`, and `while let` + coerce: Option>, +} + +pub struct EnclosingBreakables<'tcx> { + stack: Vec>, + by_id: HirIdMap, +} + +impl<'tcx> EnclosingBreakables<'tcx> { + fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'tcx> { + self.opt_find_breakable(target_id).unwrap_or_else(|| { + bug!("could not find enclosing breakable with id {}", target_id); + }) + } + + fn opt_find_breakable(&mut self, target_id: hir::HirId) -> Option<&mut BreakableCtxt<'tcx>> { + match self.by_id.get(&target_id) { + Some(ix) => Some(&mut self.stack[*ix]), + None => None, + } + } +} + +pub fn provide(providers: &mut Providers) { + method::provide(providers); + wfcheck::provide(providers); + *providers = Providers { + typeck_item_bodies, + typeck_const_arg, + typeck, + diagnostic_only_typeck, + has_typeck_results, + adt_destructor, + used_trait_imports, + check_mod_item_types, + region_scope_tree, + ..*providers + }; +} + +fn adt_destructor(tcx: TyCtxt<'_>, def_id: DefId) -> Option { + tcx.calculate_dtor(def_id, dropck::check_drop_impl) +} + +/// If this `DefId` is a "primary tables entry", returns +/// `Some((body_id, body_ty, fn_sig))`. Otherwise, returns `None`. +/// +/// If this function returns `Some`, then `typeck_results(def_id)` will +/// succeed; if it returns `None`, then `typeck_results(def_id)` may or +/// may not succeed. In some cases where this function returns `None` +/// (notably closures), `typeck_results(def_id)` would wind up +/// redirecting to the owning function. +fn primary_body_of( + tcx: TyCtxt<'_>, + id: hir::HirId, +) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> { + match tcx.hir().get(id) { + Node::Item(item) => match item.kind { + hir::ItemKind::Const(ty, body) | hir::ItemKind::Static(ty, _, body) => { + Some((body, Some(ty), None)) + } + hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(sig))), + _ => None, + }, + Node::TraitItem(item) => match item.kind { + hir::TraitItemKind::Const(ty, Some(body)) => Some((body, Some(ty), None)), + hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => { + Some((body, None, Some(sig))) + } + _ => None, + }, + Node::ImplItem(item) => match item.kind { + hir::ImplItemKind::Const(ty, body) => Some((body, Some(ty), None)), + hir::ImplItemKind::Fn(ref sig, body) => Some((body, None, Some(sig))), + _ => None, + }, + Node::AnonConst(constant) => Some((constant.body, None, None)), + _ => None, + } +} + +fn has_typeck_results(tcx: TyCtxt<'_>, def_id: DefId) -> bool { + // Closures' typeck results come from their outermost function, + // as they are part of the same "inference environment". + let typeck_root_def_id = tcx.typeck_root_def_id(def_id); + if typeck_root_def_id != def_id { + return tcx.has_typeck_results(typeck_root_def_id); + } + + if let Some(def_id) = def_id.as_local() { + let id = tcx.hir().local_def_id_to_hir_id(def_id); + primary_body_of(tcx, id).is_some() + } else { + false + } +} + +fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &FxHashSet { + &*tcx.typeck(def_id).used_trait_imports +} + +fn typeck_const_arg<'tcx>( + tcx: TyCtxt<'tcx>, + (did, param_did): (LocalDefId, DefId), +) -> &ty::TypeckResults<'tcx> { + let fallback = move || tcx.type_of(param_did); + typeck_with_fallback(tcx, did, fallback) +} + +fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> { + if let Some(param_did) = tcx.opt_const_param_of(def_id) { + tcx.typeck_const_arg((def_id, param_did)) + } else { + let fallback = move || tcx.type_of(def_id.to_def_id()); + typeck_with_fallback(tcx, def_id, fallback) + } +} + +/// Used only to get `TypeckResults` for type inference during error recovery. +/// Currently only used for type inference of `static`s and `const`s to avoid type cycle errors. +fn diagnostic_only_typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> { + let fallback = move || { + let span = tcx.hir().span(tcx.hir().local_def_id_to_hir_id(def_id)); + tcx.ty_error_with_message(span, "diagnostic only typeck table used") + }; + typeck_with_fallback(tcx, def_id, fallback) +} + +#[instrument(skip(tcx, fallback))] +fn typeck_with_fallback<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, + fallback: impl Fn() -> Ty<'tcx> + 'tcx, +) -> &'tcx ty::TypeckResults<'tcx> { + // Closures' typeck results come from their outermost function, + // as they are part of the same "inference environment". + let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()).expect_local(); + if typeck_root_def_id != def_id { + return tcx.typeck(typeck_root_def_id); + } + + let id = tcx.hir().local_def_id_to_hir_id(def_id); + let span = tcx.hir().span(id); + + // Figure out what primary body this item has. + let (body_id, body_ty, fn_sig) = primary_body_of(tcx, id).unwrap_or_else(|| { + span_bug!(span, "can't type-check body of {:?}", def_id); + }); + let body = tcx.hir().body(body_id); + + let typeck_results = Inherited::build(tcx, def_id).enter(|inh| { + let param_env = tcx.param_env(def_id); + let fcx = if let Some(hir::FnSig { header, decl, .. }) = fn_sig { + let fn_sig = if crate::collect::get_infer_ret_ty(&decl.output).is_some() { + let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id); + >::ty_of_fn(&fcx, id, header.unsafety, header.abi, decl, None, None) + } else { + tcx.fn_sig(def_id) + }; + + check_abi(tcx, id, span, fn_sig.abi()); + + // Compute the function signature from point of view of inside the fn. + let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig); + let fn_sig = inh.normalize_associated_types_in( + body.value.span, + body_id.hir_id, + param_env, + fn_sig, + ); + check_fn(&inh, param_env, fn_sig, decl, id, body, None, true).0 + } else { + let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id); + let expected_type = body_ty + .and_then(|ty| match ty.kind { + hir::TyKind::Infer => Some(>::ast_ty_to_ty(&fcx, ty)), + _ => None, + }) + .unwrap_or_else(|| match tcx.hir().get(id) { + Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(id)) { + Node::Expr(&hir::Expr { + kind: hir::ExprKind::ConstBlock(ref anon_const), + .. + }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span, + }), + Node::Ty(&hir::Ty { + kind: hir::TyKind::Typeof(ref anon_const), .. + }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span, + }), + Node::Expr(&hir::Expr { kind: hir::ExprKind::InlineAsm(asm), .. }) + | Node::Item(&hir::Item { kind: hir::ItemKind::GlobalAsm(asm), .. }) => { + let operand_ty = asm + .operands + .iter() + .filter_map(|(op, _op_sp)| match op { + hir::InlineAsmOperand::Const { anon_const } + if anon_const.hir_id == id => + { + // Inline assembly constants must be integers. + Some(fcx.next_int_var()) + } + hir::InlineAsmOperand::SymFn { anon_const } + if anon_const.hir_id == id => + { + Some(fcx.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span, + })) + } + _ => None, + }) + .next(); + operand_ty.unwrap_or_else(fallback) + } + _ => fallback(), + }, + _ => fallback(), + }); + + let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type); + fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized); + + // Gather locals in statics (because of block expressions). + GatherLocalsVisitor::new(&fcx).visit_body(body); + + fcx.check_expr_coercable_to_type(&body.value, expected_type, None); + + fcx.write_ty(id, expected_type); + + fcx + }; + + let fallback_has_occurred = fcx.type_inference_fallback(); + + // Even though coercion casts provide type hints, we check casts after fallback for + // backwards compatibility. This makes fallback a stronger type hint than a cast coercion. + fcx.check_casts(); + fcx.select_obligations_where_possible(fallback_has_occurred, |_| {}); + + // Closure and generator analysis may run after fallback + // because they don't constrain other type variables. + fcx.closure_analyze(body); + assert!(fcx.deferred_call_resolutions.borrow().is_empty()); + // Before the generator analysis, temporary scopes shall be marked to provide more + // precise information on types to be captured. + fcx.resolve_rvalue_scopes(def_id.to_def_id()); + fcx.resolve_generator_interiors(def_id.to_def_id()); + + for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) { + let ty = fcx.normalize_ty(span, ty); + fcx.require_type_is_sized(ty, span, code); + } + + fcx.select_all_obligations_or_error(); + + if !fcx.infcx.is_tainted_by_errors() { + fcx.check_transmutes(); + } + + fcx.check_asms(); + + fcx.infcx.skip_region_resolution(); + + fcx.resolve_type_vars_in_body(body) + }); + + // Consistency check our TypeckResults instance can hold all ItemLocalIds + // it will need to hold. + assert_eq!(typeck_results.hir_owner, id.owner); + + typeck_results +} + +/// When `check_fn` is invoked on a generator (i.e., a body that +/// includes yield), it returns back some information about the yield +/// points. +struct GeneratorTypes<'tcx> { + /// Type of generator argument / values returned by `yield`. + resume_ty: Ty<'tcx>, + + /// Type of value that is yielded. + yield_ty: Ty<'tcx>, + + /// Types that are captured (see `GeneratorInterior` for more). + interior: Ty<'tcx>, + + /// Indicates if the generator is movable or static (immovable). + movability: hir::Movability, +} + +/// Given a `DefId` for an opaque type in return position, find its parent item's return +/// expressions. +fn get_owner_return_paths<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, +) -> Option<(LocalDefId, ReturnsVisitor<'tcx>)> { + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let parent_id = tcx.hir().get_parent_item(hir_id); + tcx.hir().find_by_def_id(parent_id).and_then(|node| node.body_id()).map(|body_id| { + let body = tcx.hir().body(body_id); + let mut visitor = ReturnsVisitor::default(); + visitor.visit_body(body); + (parent_id, visitor) + }) +} + +// Forbid defining intrinsics in Rust code, +// as they must always be defined by the compiler. +fn fn_maybe_err(tcx: TyCtxt<'_>, sp: Span, abi: Abi) { + if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = abi { + tcx.sess.span_err(sp, "intrinsic must be in `extern \"rust-intrinsic\" { ... }` block"); + } +} + +fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) { + // Only restricted on wasm target for now + if !tcx.sess.target.is_like_wasm { + return; + } + + // If `#[link_section]` is missing, then nothing to verify + let attrs = tcx.codegen_fn_attrs(id); + if attrs.link_section.is_none() { + return; + } + + // For the wasm32 target statics with `#[link_section]` are placed into custom + // sections of the final output file, but this isn't link custom sections of + // other executable formats. Namely we can only embed a list of bytes, + // nothing with pointers to anything else or relocations. If any relocation + // show up, reject them here. + // `#[link_section]` may contain arbitrary, or even undefined bytes, but it is + // the consumer's responsibility to ensure all bytes that have been read + // have defined values. + if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id()) + && alloc.inner().relocations().len() != 0 + { + let msg = "statics with a custom `#[link_section]` must be a \ + simple list of bytes on the wasm target with no \ + extra levels of indirection such as references"; + tcx.sess.span_err(tcx.def_span(id), msg); + } +} + +fn report_forbidden_specialization( + tcx: TyCtxt<'_>, + impl_item: &hir::ImplItemRef, + parent_impl: DefId, +) { + let mut err = struct_span_err!( + tcx.sess, + impl_item.span, + E0520, + "`{}` specializes an item from a parent `impl`, but \ + that item is not marked `default`", + impl_item.ident + ); + err.span_label(impl_item.span, format!("cannot specialize default item `{}`", impl_item.ident)); + + match tcx.span_of_impl(parent_impl) { + Ok(span) => { + err.span_label(span, "parent `impl` is here"); + err.note(&format!( + "to specialize, `{}` in the parent `impl` must be marked `default`", + impl_item.ident + )); + } + Err(cname) => { + err.note(&format!("parent implementation is in crate `{cname}`")); + } + } + + err.emit(); +} + +fn missing_items_err( + tcx: TyCtxt<'_>, + impl_span: Span, + missing_items: &[&ty::AssocItem], + full_impl_span: Span, +) { + let missing_items_msg = missing_items + .iter() + .map(|trait_item| trait_item.name.to_string()) + .collect::>() + .join("`, `"); + + let mut err = struct_span_err!( + tcx.sess, + impl_span, + E0046, + "not all trait items implemented, missing: `{missing_items_msg}`", + ); + err.span_label(impl_span, format!("missing `{missing_items_msg}` in implementation")); + + // `Span` before impl block closing brace. + let hi = full_impl_span.hi() - BytePos(1); + // Point at the place right before the closing brace of the relevant `impl` to suggest + // adding the associated item at the end of its body. + let sugg_sp = full_impl_span.with_lo(hi).with_hi(hi); + // Obtain the level of indentation ending in `sugg_sp`. + let padding = + tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new()); + + for trait_item in missing_items { + let snippet = suggestion_signature(trait_item, tcx); + let code = format!("{}{}\n{}", padding, snippet, padding); + let msg = format!("implement the missing item: `{snippet}`"); + let appl = Applicability::HasPlaceholders; + if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) { + err.span_label(span, format!("`{}` from trait", trait_item.name)); + err.tool_only_span_suggestion(sugg_sp, &msg, code, appl); + } else { + err.span_suggestion_hidden(sugg_sp, &msg, code, appl); + } + } + err.emit(); +} + +fn missing_items_must_implement_one_of_err( + tcx: TyCtxt<'_>, + impl_span: Span, + missing_items: &[Ident], + annotation_span: Option, +) { + let missing_items_msg = + missing_items.iter().map(Ident::to_string).collect::>().join("`, `"); + + let mut err = struct_span_err!( + tcx.sess, + impl_span, + E0046, + "not all trait items implemented, missing one of: `{missing_items_msg}`", + ); + err.span_label(impl_span, format!("missing one of `{missing_items_msg}` in implementation")); + + if let Some(annotation_span) = annotation_span { + err.span_note(annotation_span, "required because of this annotation"); + } + + err.emit(); +} + +/// Re-sugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions. +fn bounds_from_generic_predicates<'tcx>( + tcx: TyCtxt<'tcx>, + predicates: ty::GenericPredicates<'tcx>, +) -> (String, String) { + let mut types: FxHashMap, Vec> = FxHashMap::default(); + let mut projections = vec![]; + for (predicate, _) in predicates.predicates { + debug!("predicate {:?}", predicate); + let bound_predicate = predicate.kind(); + match bound_predicate.skip_binder() { + ty::PredicateKind::Trait(trait_predicate) => { + let entry = types.entry(trait_predicate.self_ty()).or_default(); + let def_id = trait_predicate.def_id(); + if Some(def_id) != tcx.lang_items().sized_trait() { + // Type params are `Sized` by default, do not add that restriction to the list + // if it is a positive requirement. + entry.push(trait_predicate.def_id()); + } + } + ty::PredicateKind::Projection(projection_pred) => { + projections.push(bound_predicate.rebind(projection_pred)); + } + _ => {} + } + } + let generics = if types.is_empty() { + "".to_string() + } else { + format!( + "<{}>", + types + .keys() + .filter_map(|t| match t.kind() { + ty::Param(_) => Some(t.to_string()), + // Avoid suggesting the following: + // fn foo::Bar>(_: T) where T: Trait, ::Bar: Other {} + _ => None, + }) + .collect::>() + .join(", ") + ) + }; + let mut where_clauses = vec![]; + for (ty, bounds) in types { + where_clauses + .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound)))); + } + for projection in &projections { + let p = projection.skip_binder(); + // FIXME: this is not currently supported syntax, we should be looking at the `types` and + // insert the associated types where they correspond, but for now let's be "lazy" and + // propose this instead of the following valid resugaring: + // `T: Trait, Trait::Assoc = K` → `T: Trait` + where_clauses.push(format!( + "{} = {}", + tcx.def_path_str(p.projection_ty.item_def_id), + p.term, + )); + } + let where_clauses = if where_clauses.is_empty() { + String::new() + } else { + format!(" where {}", where_clauses.join(", ")) + }; + (generics, where_clauses) +} + +/// Return placeholder code for the given function. +fn fn_sig_suggestion<'tcx>( + tcx: TyCtxt<'tcx>, + sig: ty::FnSig<'tcx>, + ident: Ident, + predicates: ty::GenericPredicates<'tcx>, + assoc: &ty::AssocItem, +) -> String { + let args = sig + .inputs() + .iter() + .enumerate() + .map(|(i, ty)| { + Some(match ty.kind() { + ty::Param(_) if assoc.fn_has_self_parameter && i == 0 => "self".to_string(), + ty::Ref(reg, ref_ty, mutability) if i == 0 => { + let reg = format!("{reg} "); + let reg = match ®[..] { + "'_ " | " " => "", + reg => reg, + }; + if assoc.fn_has_self_parameter { + match ref_ty.kind() { + ty::Param(param) if param.name == kw::SelfUpper => { + format!("&{}{}self", reg, mutability.prefix_str()) + } + + _ => format!("self: {ty}"), + } + } else { + format!("_: {ty}") + } + } + _ => { + if assoc.fn_has_self_parameter && i == 0 { + format!("self: {ty}") + } else { + format!("_: {ty}") + } + } + }) + }) + .chain(std::iter::once(if sig.c_variadic { Some("...".to_string()) } else { None })) + .flatten() + .collect::>() + .join(", "); + let output = sig.output(); + let output = if !output.is_unit() { format!(" -> {output}") } else { String::new() }; + + let unsafety = sig.unsafety.prefix_str(); + let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates); + + // FIXME: this is not entirely correct, as the lifetimes from borrowed params will + // not be present in the `fn` definition, not will we account for renamed + // lifetimes between the `impl` and the `trait`, but this should be good enough to + // fill in a significant portion of the missing code, and other subsequent + // suggestions can help the user fix the code. + format!("{unsafety}fn {ident}{generics}({args}){output}{where_clauses} {{ todo!() }}") +} + +/// Return placeholder code for the given associated item. +/// Similar to `ty::AssocItem::suggestion`, but appropriate for use as the code snippet of a +/// structured suggestion. +fn suggestion_signature(assoc: &ty::AssocItem, tcx: TyCtxt<'_>) -> String { + match assoc.kind { + ty::AssocKind::Fn => { + // We skip the binder here because the binder would deanonymize all + // late-bound regions, and we don't want method signatures to show up + // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound + // regions just fine, showing `fn(&MyType)`. + fn_sig_suggestion( + tcx, + tcx.fn_sig(assoc.def_id).skip_binder(), + assoc.ident(tcx), + tcx.predicates_of(assoc.def_id), + assoc, + ) + } + ty::AssocKind::Type => format!("type {} = Type;", assoc.name), + ty::AssocKind::Const => { + let ty = tcx.type_of(assoc.def_id); + let val = expr::ty_kind_suggestion(ty).unwrap_or("value"); + format!("const {}: {} = {};", assoc.name, ty, val) + } + } +} + +/// Emit an error when encountering two or more variants in a transparent enum. +fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>, sp: Span, did: DefId) { + let variant_spans: Vec<_> = adt + .variants() + .iter() + .map(|variant| tcx.hir().span_if_local(variant.def_id).unwrap()) + .collect(); + let msg = format!("needs exactly one variant, but has {}", adt.variants().len(),); + let mut err = struct_span_err!(tcx.sess, sp, E0731, "transparent enum {msg}"); + err.span_label(sp, &msg); + if let [start @ .., end] = &*variant_spans { + for variant_span in start { + err.span_label(*variant_span, ""); + } + err.span_label(*end, &format!("too many variants in `{}`", tcx.def_path_str(did))); + } + err.emit(); +} + +/// Emit an error when encountering two or more non-zero-sized fields in a transparent +/// enum. +fn bad_non_zero_sized_fields<'tcx>( + tcx: TyCtxt<'tcx>, + adt: ty::AdtDef<'tcx>, + field_count: usize, + field_spans: impl Iterator, + sp: Span, +) { + let msg = format!("needs at most one non-zero-sized field, but has {field_count}"); + let mut err = struct_span_err!( + tcx.sess, + sp, + E0690, + "{}transparent {} {}", + if adt.is_enum() { "the variant of a " } else { "" }, + adt.descr(), + msg, + ); + err.span_label(sp, &msg); + for sp in field_spans { + err.span_label(sp, "this field is non-zero-sized"); + } + err.emit(); +} + +fn report_unexpected_variant_res(tcx: TyCtxt<'_>, res: Res, qpath: &hir::QPath<'_>, span: Span) { + struct_span_err!( + tcx.sess, + span, + E0533, + "expected unit struct, unit variant or constant, found {} `{}`", + res.descr(), + rustc_hir_pretty::qpath_to_string(qpath), + ) + .emit(); +} + +/// Controls whether the arguments are tupled. This is used for the call +/// operator. +/// +/// Tupling means that all call-side arguments are packed into a tuple and +/// passed as a single parameter. For example, if tupling is enabled, this +/// function: +/// ``` +/// fn f(x: (isize, isize)) {} +/// ``` +/// Can be called as: +/// ```ignore UNSOLVED (can this be done in user code?) +/// # fn f(x: (isize, isize)) {} +/// f(1, 2); +/// ``` +/// Instead of: +/// ``` +/// # fn f(x: (isize, isize)) {} +/// f((1, 2)); +/// ``` +#[derive(Clone, Eq, PartialEq)] +enum TupleArgumentsFlag { + DontTupleArguments, + TupleArguments, +} + +fn typeck_item_bodies(tcx: TyCtxt<'_>, (): ()) { + tcx.hir().par_body_owners(|body_owner_def_id| tcx.ensure().typeck(body_owner_def_id)); +} + +fn fatally_break_rust(sess: &Session) { + let handler = sess.diagnostic(); + handler.span_bug_no_panic( + MultiSpan::new(), + "It looks like you're trying to break rust; would you like some ICE?", + ); + handler.note_without_error("the compiler expectedly panicked. this is a feature."); + handler.note_without_error( + "we would appreciate a joke overview: \ + https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675", + ); + handler.note_without_error(&format!( + "rustc {} running on {}", + option_env!("CFG_VERSION").unwrap_or("unknown_version"), + config::host_triple(), + )); +} + +fn potentially_plural_count(count: usize, word: &str) -> String { + format!("{} {}{}", count, word, pluralize!(count)) +} + +fn has_expected_num_generic_args<'tcx>( + tcx: TyCtxt<'tcx>, + trait_did: Option, + expected: usize, +) -> bool { + trait_did.map_or(true, |trait_did| { + let generics = tcx.generics_of(trait_did); + generics.count() == expected + if generics.has_self { 1 } else { 0 } + }) +} + +/// Suggests calling the constructor of a tuple struct or enum variant +/// +/// * `snippet` - The snippet of code that references the constructor +/// * `span` - The span of the snippet +/// * `params` - The number of parameters the constructor accepts +/// * `err` - A mutable diagnostic builder to add the suggestion to +fn suggest_call_constructor( + span: Span, + kind: CtorOf, + params: usize, + err: &mut DiagnosticBuilder<'_, G>, +) { + // Note: tuple-structs don't have named fields, so just use placeholders + let args = vec!["_"; params].join(", "); + let applicable = if params > 0 { + Applicability::HasPlaceholders + } else { + // When n = 0, it's an empty-tuple struct/enum variant + // so we trivially know how to construct it + Applicability::MachineApplicable + }; + let kind = match kind { + CtorOf::Struct => "a struct", + CtorOf::Variant => "an enum variant", + }; + err.span_label(span, &format!("this is the constructor of {kind}")); + err.multipart_suggestion( + "call the constructor", + vec![(span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), format!(")({args})"))], + applicable, + ); +} diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs new file mode 100644 index 000000000..920b3e688 --- /dev/null +++ b/compiler/rustc_typeck/src/check/op.rs @@ -0,0 +1,1076 @@ +//! Code related to processing overloaded binary and unary operators. + +use super::method::MethodCallee; +use super::{has_expected_num_generic_args, FnCtxt}; +use crate::check::Expectation; +use rustc_ast as ast; +use rustc_errors::{self, struct_span_err, Applicability, Diagnostic}; +use rustc_hir as hir; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::traits::ObligationCauseCode; +use rustc_middle::ty::adjustment::{ + Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, +}; +use rustc_middle::ty::{ + self, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeSuperVisitable, TypeVisitable, TypeVisitor, +}; +use rustc_span::source_map::Spanned; +use rustc_span::symbol::{sym, Ident}; +use rustc_span::Span; +use rustc_trait_selection::infer::InferCtxtExt; +use rustc_trait_selection::traits::error_reporting::suggestions::InferCtxtExt as _; +use rustc_trait_selection::traits::{FulfillmentError, TraitEngine, TraitEngineExt}; +use rustc_type_ir::sty::TyKind::*; + +use std::ops::ControlFlow; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Checks a `a = b` + pub fn check_binop_assign( + &self, + expr: &'tcx hir::Expr<'tcx>, + op: hir::BinOp, + lhs: &'tcx hir::Expr<'tcx>, + rhs: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let (lhs_ty, rhs_ty, return_ty) = + self.check_overloaded_binop(expr, lhs, rhs, op, IsAssign::Yes, expected); + + let ty = + if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) { + self.enforce_builtin_binop_types(lhs.span, lhs_ty, rhs.span, rhs_ty, op); + self.tcx.mk_unit() + } else { + return_ty + }; + + self.check_lhs_assignable(lhs, "E0067", op.span, |err| { + if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) { + if self + .lookup_op_method( + lhs_deref_ty, + Some(rhs_ty), + Some(rhs), + Op::Binary(op, IsAssign::Yes), + expected, + ) + .is_ok() + { + // Suppress this error, since we already emitted + // a deref suggestion in check_overloaded_binop + err.delay_as_bug(); + } + } + }); + + ty + } + + /// Checks a potentially overloaded binary operator. + pub fn check_binop( + &self, + expr: &'tcx hir::Expr<'tcx>, + op: hir::BinOp, + lhs_expr: &'tcx hir::Expr<'tcx>, + rhs_expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + + debug!( + "check_binop(expr.hir_id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})", + expr.hir_id, expr, op, lhs_expr, rhs_expr + ); + + match BinOpCategory::from(op) { + BinOpCategory::Shortcircuit => { + // && and || are a simple case. + self.check_expr_coercable_to_type(lhs_expr, tcx.types.bool, None); + let lhs_diverges = self.diverges.get(); + self.check_expr_coercable_to_type(rhs_expr, tcx.types.bool, None); + + // Depending on the LHS' value, the RHS can never execute. + self.diverges.set(lhs_diverges); + + tcx.types.bool + } + _ => { + // Otherwise, we always treat operators as if they are + // overloaded. This is the way to be most flexible w/r/t + // types that get inferred. + let (lhs_ty, rhs_ty, return_ty) = self.check_overloaded_binop( + expr, + lhs_expr, + rhs_expr, + op, + IsAssign::No, + expected, + ); + + // Supply type inference hints if relevant. Probably these + // hints should be enforced during select as part of the + // `consider_unification_despite_ambiguity` routine, but this + // more convenient for now. + // + // The basic idea is to help type inference by taking + // advantage of things we know about how the impls for + // scalar types are arranged. This is important in a + // scenario like `1_u32 << 2`, because it lets us quickly + // deduce that the result type should be `u32`, even + // though we don't know yet what type 2 has and hence + // can't pin this down to a specific impl. + if !lhs_ty.is_ty_var() + && !rhs_ty.is_ty_var() + && is_builtin_binop(lhs_ty, rhs_ty, op) + { + let builtin_return_ty = self.enforce_builtin_binop_types( + lhs_expr.span, + lhs_ty, + rhs_expr.span, + rhs_ty, + op, + ); + self.demand_suptype(expr.span, builtin_return_ty, return_ty); + } + + return_ty + } + } + } + + fn enforce_builtin_binop_types( + &self, + lhs_span: Span, + lhs_ty: Ty<'tcx>, + rhs_span: Span, + rhs_ty: Ty<'tcx>, + op: hir::BinOp, + ) -> Ty<'tcx> { + debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op)); + + // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work. + // (See https://github.com/rust-lang/rust/issues/57447.) + let (lhs_ty, rhs_ty) = (deref_ty_if_possible(lhs_ty), deref_ty_if_possible(rhs_ty)); + + let tcx = self.tcx; + match BinOpCategory::from(op) { + BinOpCategory::Shortcircuit => { + self.demand_suptype(lhs_span, tcx.types.bool, lhs_ty); + self.demand_suptype(rhs_span, tcx.types.bool, rhs_ty); + tcx.types.bool + } + + BinOpCategory::Shift => { + // result type is same as LHS always + lhs_ty + } + + BinOpCategory::Math | BinOpCategory::Bitwise => { + // both LHS and RHS and result will have the same type + self.demand_suptype(rhs_span, lhs_ty, rhs_ty); + lhs_ty + } + + BinOpCategory::Comparison => { + // both LHS and RHS and result will have the same type + self.demand_suptype(rhs_span, lhs_ty, rhs_ty); + tcx.types.bool + } + } + } + + fn check_overloaded_binop( + &self, + expr: &'tcx hir::Expr<'tcx>, + lhs_expr: &'tcx hir::Expr<'tcx>, + rhs_expr: &'tcx hir::Expr<'tcx>, + op: hir::BinOp, + is_assign: IsAssign, + expected: Expectation<'tcx>, + ) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) { + debug!( + "check_overloaded_binop(expr.hir_id={}, op={:?}, is_assign={:?})", + expr.hir_id, op, is_assign + ); + + let lhs_ty = match is_assign { + IsAssign::No => { + // Find a suitable supertype of the LHS expression's type, by coercing to + // a type variable, to pass as the `Self` to the trait, avoiding invariant + // trait matching creating lifetime constraints that are too strict. + // e.g., adding `&'a T` and `&'b T`, given `&'x T: Add<&'x T>`, will result + // in `&'a T <: &'x T` and `&'b T <: &'x T`, instead of `'a = 'b = 'x`. + let lhs_ty = self.check_expr(lhs_expr); + let fresh_var = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: lhs_expr.span, + }); + self.demand_coerce(lhs_expr, lhs_ty, fresh_var, Some(rhs_expr), AllowTwoPhase::No) + } + IsAssign::Yes => { + // rust-lang/rust#52126: We have to use strict + // equivalence on the LHS of an assign-op like `+=`; + // overwritten or mutably-borrowed places cannot be + // coerced to a supertype. + self.check_expr(lhs_expr) + } + }; + let lhs_ty = self.resolve_vars_with_obligations(lhs_ty); + + // N.B., as we have not yet type-checked the RHS, we don't have the + // type at hand. Make a variable to represent it. The whole reason + // for this indirection is so that, below, we can check the expr + // using this variable as the expected type, which sometimes lets + // us do better coercions than we would be able to do otherwise, + // particularly for things like `String + &String`. + let rhs_ty_var = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: rhs_expr.span, + }); + + let result = self.lookup_op_method( + lhs_ty, + Some(rhs_ty_var), + Some(rhs_expr), + Op::Binary(op, is_assign), + expected, + ); + + // see `NB` above + let rhs_ty = self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var, Some(lhs_expr)); + let rhs_ty = self.resolve_vars_with_obligations(rhs_ty); + + let return_ty = match result { + Ok(method) => { + let by_ref_binop = !op.node.is_by_value(); + if is_assign == IsAssign::Yes || by_ref_binop { + if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() { + let mutbl = match mutbl { + hir::Mutability::Not => AutoBorrowMutability::Not, + hir::Mutability::Mut => AutoBorrowMutability::Mut { + // Allow two-phase borrows for binops in initial deployment + // since they desugar to methods + allow_two_phase_borrow: AllowTwoPhase::Yes, + }, + }; + let autoref = Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)), + target: method.sig.inputs()[0], + }; + self.apply_adjustments(lhs_expr, vec![autoref]); + } + } + if by_ref_binop { + if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].kind() { + let mutbl = match mutbl { + hir::Mutability::Not => AutoBorrowMutability::Not, + hir::Mutability::Mut => AutoBorrowMutability::Mut { + // Allow two-phase borrows for binops in initial deployment + // since they desugar to methods + allow_two_phase_borrow: AllowTwoPhase::Yes, + }, + }; + let autoref = Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)), + target: method.sig.inputs()[1], + }; + // HACK(eddyb) Bypass checks due to reborrows being in + // some cases applied on the RHS, on top of which we need + // to autoref, which is not allowed by apply_adjustments. + // self.apply_adjustments(rhs_expr, vec![autoref]); + self.typeck_results + .borrow_mut() + .adjustments_mut() + .entry(rhs_expr.hir_id) + .or_default() + .push(autoref); + } + } + self.write_method_call(expr.hir_id, method); + + method.sig.output() + } + // error types are considered "builtin" + Err(_) if lhs_ty.references_error() || rhs_ty.references_error() => self.tcx.ty_error(), + Err(errors) => { + let source_map = self.tcx.sess.source_map(); + let (mut err, missing_trait, use_output) = match is_assign { + IsAssign::Yes => { + let mut err = struct_span_err!( + self.tcx.sess, + expr.span, + E0368, + "binary assignment operation `{}=` cannot be applied to type `{}`", + op.node.as_str(), + lhs_ty, + ); + err.span_label( + lhs_expr.span, + format!("cannot use `{}=` on type `{}`", op.node.as_str(), lhs_ty), + ); + let missing_trait = match op.node { + hir::BinOpKind::Add => Some("std::ops::AddAssign"), + hir::BinOpKind::Sub => Some("std::ops::SubAssign"), + hir::BinOpKind::Mul => Some("std::ops::MulAssign"), + hir::BinOpKind::Div => Some("std::ops::DivAssign"), + hir::BinOpKind::Rem => Some("std::ops::RemAssign"), + hir::BinOpKind::BitAnd => Some("std::ops::BitAndAssign"), + hir::BinOpKind::BitXor => Some("std::ops::BitXorAssign"), + hir::BinOpKind::BitOr => Some("std::ops::BitOrAssign"), + hir::BinOpKind::Shl => Some("std::ops::ShlAssign"), + hir::BinOpKind::Shr => Some("std::ops::ShrAssign"), + _ => None, + }; + self.note_unmet_impls_on_type(&mut err, errors); + (err, missing_trait, false) + } + IsAssign::No => { + let (message, missing_trait, use_output) = match op.node { + hir::BinOpKind::Add => ( + format!("cannot add `{rhs_ty}` to `{lhs_ty}`"), + Some("std::ops::Add"), + true, + ), + hir::BinOpKind::Sub => ( + format!("cannot subtract `{rhs_ty}` from `{lhs_ty}`"), + Some("std::ops::Sub"), + true, + ), + hir::BinOpKind::Mul => ( + format!("cannot multiply `{lhs_ty}` by `{rhs_ty}`"), + Some("std::ops::Mul"), + true, + ), + hir::BinOpKind::Div => ( + format!("cannot divide `{lhs_ty}` by `{rhs_ty}`"), + Some("std::ops::Div"), + true, + ), + hir::BinOpKind::Rem => ( + format!("cannot mod `{lhs_ty}` by `{rhs_ty}`"), + Some("std::ops::Rem"), + true, + ), + hir::BinOpKind::BitAnd => ( + format!("no implementation for `{lhs_ty} & {rhs_ty}`"), + Some("std::ops::BitAnd"), + true, + ), + hir::BinOpKind::BitXor => ( + format!("no implementation for `{lhs_ty} ^ {rhs_ty}`"), + Some("std::ops::BitXor"), + true, + ), + hir::BinOpKind::BitOr => ( + format!("no implementation for `{lhs_ty} | {rhs_ty}`"), + Some("std::ops::BitOr"), + true, + ), + hir::BinOpKind::Shl => ( + format!("no implementation for `{lhs_ty} << {rhs_ty}`"), + Some("std::ops::Shl"), + true, + ), + hir::BinOpKind::Shr => ( + format!("no implementation for `{lhs_ty} >> {rhs_ty}`"), + Some("std::ops::Shr"), + true, + ), + hir::BinOpKind::Eq | hir::BinOpKind::Ne => ( + format!( + "binary operation `{}` cannot be applied to type `{}`", + op.node.as_str(), + lhs_ty + ), + Some("std::cmp::PartialEq"), + false, + ), + hir::BinOpKind::Lt + | hir::BinOpKind::Le + | hir::BinOpKind::Gt + | hir::BinOpKind::Ge => ( + format!( + "binary operation `{}` cannot be applied to type `{}`", + op.node.as_str(), + lhs_ty + ), + Some("std::cmp::PartialOrd"), + false, + ), + _ => ( + format!( + "binary operation `{}` cannot be applied to type `{}`", + op.node.as_str(), + lhs_ty + ), + None, + false, + ), + }; + let mut err = struct_span_err!(self.tcx.sess, op.span, E0369, "{message}"); + if !lhs_expr.span.eq(&rhs_expr.span) { + self.add_type_neq_err_label( + &mut err, + lhs_expr.span, + lhs_ty, + rhs_ty, + rhs_expr, + op, + is_assign, + expected, + ); + self.add_type_neq_err_label( + &mut err, + rhs_expr.span, + rhs_ty, + lhs_ty, + lhs_expr, + op, + is_assign, + expected, + ); + } + self.note_unmet_impls_on_type(&mut err, errors); + (err, missing_trait, use_output) + } + }; + + let mut suggest_deref_binop = |lhs_deref_ty: Ty<'tcx>| { + if self + .lookup_op_method( + lhs_deref_ty, + Some(rhs_ty), + Some(rhs_expr), + Op::Binary(op, is_assign), + expected, + ) + .is_ok() + { + if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) { + let msg = &format!( + "`{}{}` can be used on `{}`, you can dereference `{}`", + op.node.as_str(), + match is_assign { + IsAssign::Yes => "=", + IsAssign::No => "", + }, + lhs_deref_ty.peel_refs(), + lstring, + ); + err.span_suggestion_verbose( + lhs_expr.span.shrink_to_lo(), + msg, + "*", + rustc_errors::Applicability::MachineApplicable, + ); + } + } + }; + + // We should suggest `a + b` => `*a + b` if `a` is copy, and suggest + // `a += b` => `*a += b` if a is a mut ref. + if is_assign == IsAssign::Yes + && let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) { + suggest_deref_binop(lhs_deref_ty); + } else if is_assign == IsAssign::No + && let Ref(_, lhs_deref_ty, _) = lhs_ty.kind() { + if self.type_is_copy_modulo_regions(self.param_env, *lhs_deref_ty, lhs_expr.span) { + suggest_deref_binop(*lhs_deref_ty); + } + } + if let Some(missing_trait) = missing_trait { + let mut visitor = TypeParamVisitor(vec![]); + visitor.visit_ty(lhs_ty); + + if op.node == hir::BinOpKind::Add + && self.check_str_addition( + lhs_expr, rhs_expr, lhs_ty, rhs_ty, &mut err, is_assign, op, + ) + { + // This has nothing here because it means we did string + // concatenation (e.g., "Hello " + "World!"). This means + // we don't want the note in the else clause to be emitted + } else if let [ty] = &visitor.0[..] { + // Look for a TraitPredicate in the Fulfillment errors, + // and use it to generate a suggestion. + // + // Note that lookup_op_method must be called again but + // with a specific rhs_ty instead of a placeholder so + // the resulting predicate generates a more specific + // suggestion for the user. + let errors = self + .lookup_op_method( + lhs_ty, + Some(rhs_ty), + Some(rhs_expr), + Op::Binary(op, is_assign), + expected, + ) + .unwrap_err(); + if !errors.is_empty() { + for error in errors { + if let Some(trait_pred) = + error.obligation.predicate.to_opt_poly_trait_pred() + { + let proj_pred = match error.obligation.cause.code() { + ObligationCauseCode::BinOp { + output_pred: Some(output_pred), + .. + } if use_output => { + output_pred.to_opt_poly_projection_pred() + } + _ => None, + }; + + self.suggest_restricting_param_bound( + &mut err, + trait_pred, + proj_pred, + self.body_id, + ); + } + } + } else if *ty != lhs_ty { + // When we know that a missing bound is responsible, we don't show + // this note as it is redundant. + err.note(&format!( + "the trait `{missing_trait}` is not implemented for `{lhs_ty}`" + )); + } + } + } + err.emit(); + self.tcx.ty_error() + } + }; + + (lhs_ty, rhs_ty, return_ty) + } + + /// If one of the types is an uncalled function and calling it would yield the other type, + /// suggest calling the function. Returns `true` if suggestion would apply (even if not given). + fn add_type_neq_err_label( + &self, + err: &mut Diagnostic, + span: Span, + ty: Ty<'tcx>, + other_ty: Ty<'tcx>, + other_expr: &'tcx hir::Expr<'tcx>, + op: hir::BinOp, + is_assign: IsAssign, + expected: Expectation<'tcx>, + ) -> bool /* did we suggest to call a function because of missing parentheses? */ { + err.span_label(span, ty.to_string()); + if let FnDef(def_id, _) = *ty.kind() { + if !self.tcx.has_typeck_results(def_id) { + return false; + } + // FIXME: Instead of exiting early when encountering bound vars in + // the function signature, consider keeping the binder here and + // propagating it downwards. + let Some(fn_sig) = self.tcx.fn_sig(def_id).no_bound_vars() else { + return false; + }; + + let other_ty = if let FnDef(def_id, _) = *other_ty.kind() { + if !self.tcx.has_typeck_results(def_id) { + return false; + } + // We're emitting a suggestion, so we can just ignore regions + self.tcx.fn_sig(def_id).skip_binder().output() + } else { + other_ty + }; + + if self + .lookup_op_method( + fn_sig.output(), + Some(other_ty), + Some(other_expr), + Op::Binary(op, is_assign), + expected, + ) + .is_ok() + { + let (variable_snippet, applicability) = if !fn_sig.inputs().is_empty() { + ("( /* arguments */ )", Applicability::HasPlaceholders) + } else { + ("()", Applicability::MaybeIncorrect) + }; + + err.span_suggestion_verbose( + span.shrink_to_hi(), + "you might have forgotten to call this function", + variable_snippet, + applicability, + ); + return true; + } + } + false + } + + /// Provide actionable suggestions when trying to add two strings with incorrect types, + /// like `&str + &str`, `String + String` and `&str + &String`. + /// + /// If this function returns `true` it means a note was printed, so we don't need + /// to print the normal "implementation of `std::ops::Add` might be missing" note + fn check_str_addition( + &self, + lhs_expr: &'tcx hir::Expr<'tcx>, + rhs_expr: &'tcx hir::Expr<'tcx>, + lhs_ty: Ty<'tcx>, + rhs_ty: Ty<'tcx>, + err: &mut Diagnostic, + is_assign: IsAssign, + op: hir::BinOp, + ) -> bool { + let str_concat_note = "string concatenation requires an owned `String` on the left"; + let rm_borrow_msg = "remove the borrow to obtain an owned `String`"; + let to_owned_msg = "create an owned `String` from a string reference"; + + let is_std_string = |ty: Ty<'tcx>| { + ty.ty_adt_def() + .map_or(false, |ty_def| self.tcx.is_diagnostic_item(sym::String, ty_def.did())) + }; + + match (lhs_ty.kind(), rhs_ty.kind()) { + (&Ref(_, l_ty, _), &Ref(_, r_ty, _)) // &str or &String + &str, &String or &&str + if (*l_ty.kind() == Str || is_std_string(l_ty)) + && (*r_ty.kind() == Str + || is_std_string(r_ty) + || matches!( + r_ty.kind(), Ref(_, inner_ty, _) if *inner_ty.kind() == Str + )) => + { + if let IsAssign::No = is_assign { // Do not supply this message if `&str += &str` + err.span_label(op.span, "`+` cannot be used to concatenate two `&str` strings"); + err.note(str_concat_note); + if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind { + err.span_suggestion_verbose( + lhs_expr.span.until(lhs_inner_expr.span), + rm_borrow_msg, + "", + Applicability::MachineApplicable + ); + } else { + err.span_suggestion_verbose( + lhs_expr.span.shrink_to_hi(), + to_owned_msg, + ".to_owned()", + Applicability::MachineApplicable + ); + } + } + true + } + (&Ref(_, l_ty, _), &Adt(..)) // Handle `&str` & `&String` + `String` + if (*l_ty.kind() == Str || is_std_string(l_ty)) && is_std_string(rhs_ty) => + { + err.span_label( + op.span, + "`+` cannot be used to concatenate a `&str` with a `String`", + ); + match is_assign { + IsAssign::No => { + let sugg_msg; + let lhs_sugg = if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind { + sugg_msg = "remove the borrow on the left and add one on the right"; + (lhs_expr.span.until(lhs_inner_expr.span), "".to_owned()) + } else { + sugg_msg = "create an owned `String` on the left and add a borrow on the right"; + (lhs_expr.span.shrink_to_hi(), ".to_owned()".to_owned()) + }; + let suggestions = vec![ + lhs_sugg, + (rhs_expr.span.shrink_to_lo(), "&".to_owned()), + ]; + err.multipart_suggestion_verbose( + sugg_msg, + suggestions, + Applicability::MachineApplicable, + ); + } + IsAssign::Yes => { + err.note(str_concat_note); + } + } + true + } + _ => false, + } + } + + pub fn check_user_unop( + &self, + ex: &'tcx hir::Expr<'tcx>, + operand_ty: Ty<'tcx>, + op: hir::UnOp, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + assert!(op.is_by_value()); + match self.lookup_op_method(operand_ty, None, None, Op::Unary(op, ex.span), expected) { + Ok(method) => { + self.write_method_call(ex.hir_id, method); + method.sig.output() + } + Err(errors) => { + let actual = self.resolve_vars_if_possible(operand_ty); + if !actual.references_error() { + let mut err = struct_span_err!( + self.tcx.sess, + ex.span, + E0600, + "cannot apply unary operator `{}` to type `{}`", + op.as_str(), + actual + ); + err.span_label( + ex.span, + format!("cannot apply unary operator `{}`", op.as_str()), + ); + + let mut visitor = TypeParamVisitor(vec![]); + visitor.visit_ty(operand_ty); + if let [_] = &visitor.0[..] && let ty::Param(_) = *operand_ty.kind() { + let predicates = errors + .iter() + .filter_map(|error| { + error.obligation.predicate.to_opt_poly_trait_pred() + }); + for pred in predicates { + self.suggest_restricting_param_bound( + &mut err, + pred, + None, + self.body_id, + ); + } + } + + let sp = self.tcx.sess.source_map().start_point(ex.span); + if let Some(sp) = + self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) + { + // If the previous expression was a block expression, suggest parentheses + // (turning this into a binary subtraction operation instead.) + // for example, `{2} - 2` -> `({2}) - 2` (see src\test\ui\parser\expr-as-stmt.rs) + self.tcx.sess.parse_sess.expr_parentheses_needed(&mut err, *sp); + } else { + match actual.kind() { + Uint(_) if op == hir::UnOp::Neg => { + err.note("unsigned values cannot be negated"); + + if let hir::ExprKind::Unary( + _, + hir::Expr { + kind: + hir::ExprKind::Lit(Spanned { + node: ast::LitKind::Int(1, _), + .. + }), + .. + }, + ) = ex.kind + { + err.span_suggestion( + ex.span, + &format!( + "you may have meant the maximum value of `{actual}`", + ), + format!("{actual}::MAX"), + Applicability::MaybeIncorrect, + ); + } + } + Str | Never | Char | Tuple(_) | Array(_, _) => {} + Ref(_, lty, _) if *lty.kind() == Str => {} + _ => { + self.note_unmet_impls_on_type(&mut err, errors); + } + } + } + err.emit(); + } + self.tcx.ty_error() + } + } + } + + fn lookup_op_method( + &self, + lhs_ty: Ty<'tcx>, + other_ty: Option>, + other_ty_expr: Option<&'tcx hir::Expr<'tcx>>, + op: Op, + expected: Expectation<'tcx>, + ) -> Result, Vec>> { + let lang = self.tcx.lang_items(); + + let span = match op { + Op::Binary(op, _) => op.span, + Op::Unary(_, span) => span, + }; + let (opname, trait_did) = if let Op::Binary(op, IsAssign::Yes) = op { + match op.node { + hir::BinOpKind::Add => (sym::add_assign, lang.add_assign_trait()), + hir::BinOpKind::Sub => (sym::sub_assign, lang.sub_assign_trait()), + hir::BinOpKind::Mul => (sym::mul_assign, lang.mul_assign_trait()), + hir::BinOpKind::Div => (sym::div_assign, lang.div_assign_trait()), + hir::BinOpKind::Rem => (sym::rem_assign, lang.rem_assign_trait()), + hir::BinOpKind::BitXor => (sym::bitxor_assign, lang.bitxor_assign_trait()), + hir::BinOpKind::BitAnd => (sym::bitand_assign, lang.bitand_assign_trait()), + hir::BinOpKind::BitOr => (sym::bitor_assign, lang.bitor_assign_trait()), + hir::BinOpKind::Shl => (sym::shl_assign, lang.shl_assign_trait()), + hir::BinOpKind::Shr => (sym::shr_assign, lang.shr_assign_trait()), + hir::BinOpKind::Lt + | hir::BinOpKind::Le + | hir::BinOpKind::Ge + | hir::BinOpKind::Gt + | hir::BinOpKind::Eq + | hir::BinOpKind::Ne + | hir::BinOpKind::And + | hir::BinOpKind::Or => { + span_bug!(span, "impossible assignment operation: {}=", op.node.as_str()) + } + } + } else if let Op::Binary(op, IsAssign::No) = op { + match op.node { + hir::BinOpKind::Add => (sym::add, lang.add_trait()), + hir::BinOpKind::Sub => (sym::sub, lang.sub_trait()), + hir::BinOpKind::Mul => (sym::mul, lang.mul_trait()), + hir::BinOpKind::Div => (sym::div, lang.div_trait()), + hir::BinOpKind::Rem => (sym::rem, lang.rem_trait()), + hir::BinOpKind::BitXor => (sym::bitxor, lang.bitxor_trait()), + hir::BinOpKind::BitAnd => (sym::bitand, lang.bitand_trait()), + hir::BinOpKind::BitOr => (sym::bitor, lang.bitor_trait()), + hir::BinOpKind::Shl => (sym::shl, lang.shl_trait()), + hir::BinOpKind::Shr => (sym::shr, lang.shr_trait()), + hir::BinOpKind::Lt => (sym::lt, lang.partial_ord_trait()), + hir::BinOpKind::Le => (sym::le, lang.partial_ord_trait()), + hir::BinOpKind::Ge => (sym::ge, lang.partial_ord_trait()), + hir::BinOpKind::Gt => (sym::gt, lang.partial_ord_trait()), + hir::BinOpKind::Eq => (sym::eq, lang.eq_trait()), + hir::BinOpKind::Ne => (sym::ne, lang.eq_trait()), + hir::BinOpKind::And | hir::BinOpKind::Or => { + span_bug!(span, "&& and || are not overloadable") + } + } + } else if let Op::Unary(hir::UnOp::Not, _) = op { + (sym::not, lang.not_trait()) + } else if let Op::Unary(hir::UnOp::Neg, _) = op { + (sym::neg, lang.neg_trait()) + } else { + bug!("lookup_op_method: op not supported: {:?}", op) + }; + + debug!( + "lookup_op_method(lhs_ty={:?}, op={:?}, opname={:?}, trait_did={:?})", + lhs_ty, op, opname, trait_did + ); + + // Catches cases like #83893, where a lang item is declared with the + // wrong number of generic arguments. Should have yielded an error + // elsewhere by now, but we have to catch it here so that we do not + // index `other_tys` out of bounds (if the lang item has too many + // generic arguments, `other_tys` is too short). + if !has_expected_num_generic_args( + self.tcx, + trait_did, + match op { + // Binary ops have a generic right-hand side, unary ops don't + Op::Binary(..) => 1, + Op::Unary(..) => 0, + }, + ) { + return Err(vec![]); + } + + let opname = Ident::with_dummy_span(opname); + let method = trait_did.and_then(|trait_did| { + self.lookup_op_method_in_trait( + span, + opname, + trait_did, + lhs_ty, + other_ty, + other_ty_expr, + expected, + ) + }); + + match (method, trait_did) { + (Some(ok), _) => { + let method = self.register_infer_ok_obligations(ok); + self.select_obligations_where_possible(false, |_| {}); + Ok(method) + } + (None, None) => Err(vec![]), + (None, Some(trait_did)) => { + let (obligation, _) = self.obligation_for_op_method( + span, + trait_did, + lhs_ty, + other_ty, + other_ty_expr, + expected, + ); + let mut fulfill = >::new(self.tcx); + fulfill.register_predicate_obligation(self, obligation); + Err(fulfill.select_where_possible(&self.infcx)) + } + } + } +} + +// Binary operator categories. These categories summarize the behavior +// with respect to the builtin operations supported. +enum BinOpCategory { + /// &&, || -- cannot be overridden + Shortcircuit, + + /// <<, >> -- when shifting a single integer, rhs can be any + /// integer type. For simd, types must match. + Shift, + + /// +, -, etc -- takes equal types, produces same type as input, + /// applicable to ints/floats/simd + Math, + + /// &, |, ^ -- takes equal types, produces same type as input, + /// applicable to ints/floats/simd/bool + Bitwise, + + /// ==, !=, etc -- takes equal types, produces bools, except for simd, + /// which produce the input type + Comparison, +} + +impl BinOpCategory { + fn from(op: hir::BinOp) -> BinOpCategory { + match op.node { + hir::BinOpKind::Shl | hir::BinOpKind::Shr => BinOpCategory::Shift, + + hir::BinOpKind::Add + | hir::BinOpKind::Sub + | hir::BinOpKind::Mul + | hir::BinOpKind::Div + | hir::BinOpKind::Rem => BinOpCategory::Math, + + hir::BinOpKind::BitXor | hir::BinOpKind::BitAnd | hir::BinOpKind::BitOr => { + BinOpCategory::Bitwise + } + + hir::BinOpKind::Eq + | hir::BinOpKind::Ne + | hir::BinOpKind::Lt + | hir::BinOpKind::Le + | hir::BinOpKind::Ge + | hir::BinOpKind::Gt => BinOpCategory::Comparison, + + hir::BinOpKind::And | hir::BinOpKind::Or => BinOpCategory::Shortcircuit, + } + } +} + +/// Whether the binary operation is an assignment (`a += b`), or not (`a + b`) +#[derive(Clone, Copy, Debug, PartialEq)] +enum IsAssign { + No, + Yes, +} + +#[derive(Clone, Copy, Debug)] +enum Op { + Binary(hir::BinOp, IsAssign), + Unary(hir::UnOp, Span), +} + +/// Dereferences a single level of immutable referencing. +fn deref_ty_if_possible<'tcx>(ty: Ty<'tcx>) -> Ty<'tcx> { + match ty.kind() { + ty::Ref(_, ty, hir::Mutability::Not) => *ty, + _ => ty, + } +} + +/// Returns `true` if this is a built-in arithmetic operation (e.g., u32 +/// + u32, i16x4 == i16x4) and false if these types would have to be +/// overloaded to be legal. There are two reasons that we distinguish +/// builtin operations from overloaded ones (vs trying to drive +/// everything uniformly through the trait system and intrinsics or +/// something like that): +/// +/// 1. Builtin operations can trivially be evaluated in constants. +/// 2. For comparison operators applied to SIMD types the result is +/// not of type `bool`. For example, `i16x4 == i16x4` yields a +/// type like `i16x4`. This means that the overloaded trait +/// `PartialEq` is not applicable. +/// +/// Reason #2 is the killer. I tried for a while to always use +/// overloaded logic and just check the types in constants/codegen after +/// the fact, and it worked fine, except for SIMD types. -nmatsakis +fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>, rhs: Ty<'tcx>, op: hir::BinOp) -> bool { + // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work. + // (See https://github.com/rust-lang/rust/issues/57447.) + let (lhs, rhs) = (deref_ty_if_possible(lhs), deref_ty_if_possible(rhs)); + + match BinOpCategory::from(op) { + BinOpCategory::Shortcircuit => true, + + BinOpCategory::Shift => { + lhs.references_error() + || rhs.references_error() + || lhs.is_integral() && rhs.is_integral() + } + + BinOpCategory::Math => { + lhs.references_error() + || rhs.references_error() + || lhs.is_integral() && rhs.is_integral() + || lhs.is_floating_point() && rhs.is_floating_point() + } + + BinOpCategory::Bitwise => { + lhs.references_error() + || rhs.references_error() + || lhs.is_integral() && rhs.is_integral() + || lhs.is_floating_point() && rhs.is_floating_point() + || lhs.is_bool() && rhs.is_bool() + } + + BinOpCategory::Comparison => { + lhs.references_error() || rhs.references_error() || lhs.is_scalar() && rhs.is_scalar() + } + } +} + +struct TypeParamVisitor<'tcx>(Vec>); + +impl<'tcx> TypeVisitor<'tcx> for TypeParamVisitor<'tcx> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { + if let ty::Param(_) = ty.kind() { + self.0.push(ty); + } + ty.super_visit_with(self) + } +} + +struct TypeParamEraser<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, Span); + +impl<'tcx> TypeFolder<'tcx> for TypeParamEraser<'_, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.0.tcx + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + match ty.kind() { + ty::Param(_) => self.0.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::MiscVariable, + span: self.1, + }), + _ => ty.super_fold_with(self), + } + } +} diff --git a/compiler/rustc_typeck/src/check/pat.rs b/compiler/rustc_typeck/src/check/pat.rs new file mode 100644 index 000000000..837c32355 --- /dev/null +++ b/compiler/rustc_typeck/src/check/pat.rs @@ -0,0 +1,2142 @@ +use crate::check::FnCtxt; +use rustc_ast as ast; + +use rustc_data_structures::fx::FxHashMap; +use rustc_errors::{ + pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, + MultiSpan, +}; +use rustc_hir as hir; +use rustc_hir::def::{CtorKind, DefKind, Res}; +use rustc_hir::pat_util::EnumerateAndAdjustIterator; +use rustc_hir::{HirId, Pat, PatKind}; +use rustc_infer::infer; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_middle::middle::stability::EvalResult; +use rustc_middle::ty::{self, Adt, BindingMode, Ty, TypeVisitable}; +use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS; +use rustc_span::hygiene::DesugaringKind; +use rustc_span::lev_distance::find_best_match_for_name; +use rustc_span::source_map::{Span, Spanned}; +use rustc_span::symbol::{kw, sym, Ident}; +use rustc_span::{BytePos, DUMMY_SP}; +use rustc_trait_selection::autoderef::Autoderef; +use rustc_trait_selection::traits::{ObligationCause, Pattern}; +use ty::VariantDef; + +use std::cmp; +use std::collections::hash_map::Entry::{Occupied, Vacant}; + +use super::report_unexpected_variant_res; + +const CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ: &str = "\ +This error indicates that a pointer to a trait type cannot be implicitly dereferenced by a \ +pattern. Every trait defines a type, but because the size of trait implementors isn't fixed, \ +this type has no compile-time size. Therefore, all accesses to trait types must be through \ +pointers. If you encounter this error you should try to avoid dereferencing the pointer. + +You can read more about trait objects in the Trait Objects section of the Reference: \ +https://doc.rust-lang.org/reference/types.html#trait-objects"; + +/// Information about the expected type at the top level of type checking a pattern. +/// +/// **NOTE:** This is only for use by diagnostics. Do NOT use for type checking logic! +#[derive(Copy, Clone)] +struct TopInfo<'tcx> { + /// The `expected` type at the top level of type checking a pattern. + expected: Ty<'tcx>, + /// Was the origin of the `span` from a scrutinee expression? + /// + /// Otherwise there is no scrutinee and it could be e.g. from the type of a formal parameter. + origin_expr: bool, + /// The span giving rise to the `expected` type, if one could be provided. + /// + /// If `origin_expr` is `true`, then this is the span of the scrutinee as in: + /// + /// - `match scrutinee { ... }` + /// - `let _ = scrutinee;` + /// + /// This is used to point to add context in type errors. + /// In the following example, `span` corresponds to the `a + b` expression: + /// + /// ```text + /// error[E0308]: mismatched types + /// --> src/main.rs:L:C + /// | + /// L | let temp: usize = match a + b { + /// | ----- this expression has type `usize` + /// L | Ok(num) => num, + /// | ^^^^^^^ expected `usize`, found enum `std::result::Result` + /// | + /// = note: expected type `usize` + /// found type `std::result::Result<_, _>` + /// ``` + span: Option, +} + +impl<'tcx> FnCtxt<'_, 'tcx> { + fn pattern_cause(&self, ti: TopInfo<'tcx>, cause_span: Span) -> ObligationCause<'tcx> { + let code = Pattern { span: ti.span, root_ty: ti.expected, origin_expr: ti.origin_expr }; + self.cause(cause_span, code) + } + + fn demand_eqtype_pat_diag( + &self, + cause_span: Span, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + ti: TopInfo<'tcx>, + ) -> Option> { + self.demand_eqtype_with_origin(&self.pattern_cause(ti, cause_span), expected, actual) + } + + fn demand_eqtype_pat( + &self, + cause_span: Span, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + ti: TopInfo<'tcx>, + ) { + if let Some(mut err) = self.demand_eqtype_pat_diag(cause_span, expected, actual, ti) { + err.emit(); + } + } +} + +const INITIAL_BM: BindingMode = BindingMode::BindByValue(hir::Mutability::Not); + +/// Mode for adjusting the expected type and binding mode. +enum AdjustMode { + /// Peel off all immediate reference types. + Peel, + /// Reset binding mode to the initial mode. + Reset, + /// Pass on the input binding mode and expected type. + Pass, +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Type check the given top level pattern against the `expected` type. + /// + /// If a `Some(span)` is provided and `origin_expr` holds, + /// then the `span` represents the scrutinee's span. + /// The scrutinee is found in e.g. `match scrutinee { ... }` and `let pat = scrutinee;`. + /// + /// Otherwise, `Some(span)` represents the span of a type expression + /// which originated the `expected` type. + pub fn check_pat_top( + &self, + pat: &'tcx Pat<'tcx>, + expected: Ty<'tcx>, + span: Option, + origin_expr: bool, + ) { + let info = TopInfo { expected, origin_expr, span }; + self.check_pat(pat, expected, INITIAL_BM, info); + } + + /// Type check the given `pat` against the `expected` type + /// with the provided `def_bm` (default binding mode). + /// + /// Outside of this module, `check_pat_top` should always be used. + /// Conversely, inside this module, `check_pat_top` should never be used. + #[instrument(level = "debug", skip(self, ti))] + fn check_pat( + &self, + pat: &'tcx Pat<'tcx>, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) { + let path_res = match &pat.kind { + PatKind::Path(qpath) => { + Some(self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span)) + } + _ => None, + }; + let adjust_mode = self.calc_adjust_mode(pat, path_res.map(|(res, ..)| res)); + let (expected, def_bm) = self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode); + + let ty = match pat.kind { + PatKind::Wild => expected, + PatKind::Lit(lt) => self.check_pat_lit(pat.span, lt, expected, ti), + PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti), + PatKind::Binding(ba, var_id, _, sub) => { + self.check_pat_ident(pat, ba, var_id, sub, expected, def_bm, ti) + } + PatKind::TupleStruct(ref qpath, subpats, ddpos) => { + self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, def_bm, ti) + } + PatKind::Path(ref qpath) => { + self.check_pat_path(pat, qpath, path_res.unwrap(), expected, ti) + } + PatKind::Struct(ref qpath, fields, has_rest_pat) => { + self.check_pat_struct(pat, qpath, fields, has_rest_pat, expected, def_bm, ti) + } + PatKind::Or(pats) => { + for pat in pats { + self.check_pat(pat, expected, def_bm, ti); + } + expected + } + PatKind::Tuple(elements, ddpos) => { + self.check_pat_tuple(pat.span, elements, ddpos, expected, def_bm, ti) + } + PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, def_bm, ti), + PatKind::Ref(inner, mutbl) => { + self.check_pat_ref(pat, inner, mutbl, expected, def_bm, ti) + } + PatKind::Slice(before, slice, after) => { + self.check_pat_slice(pat.span, before, slice, after, expected, def_bm, ti) + } + }; + + self.write_ty(pat.hir_id, ty); + + // (note_1): In most of the cases where (note_1) is referenced + // (literals and constants being the exception), we relate types + // using strict equality, even though subtyping would be sufficient. + // There are a few reasons for this, some of which are fairly subtle + // and which cost me (nmatsakis) an hour or two debugging to remember, + // so I thought I'd write them down this time. + // + // 1. There is no loss of expressiveness here, though it does + // cause some inconvenience. What we are saying is that the type + // of `x` becomes *exactly* what is expected. This can cause unnecessary + // errors in some cases, such as this one: + // + // ``` + // fn foo<'x>(x: &'x i32) { + // let a = 1; + // let mut z = x; + // z = &a; + // } + // ``` + // + // The reason we might get an error is that `z` might be + // assigned a type like `&'x i32`, and then we would have + // a problem when we try to assign `&a` to `z`, because + // the lifetime of `&a` (i.e., the enclosing block) is + // shorter than `'x`. + // + // HOWEVER, this code works fine. The reason is that the + // expected type here is whatever type the user wrote, not + // the initializer's type. In this case the user wrote + // nothing, so we are going to create a type variable `Z`. + // Then we will assign the type of the initializer (`&'x i32`) + // as a subtype of `Z`: `&'x i32 <: Z`. And hence we + // will instantiate `Z` as a type `&'0 i32` where `'0` is + // a fresh region variable, with the constraint that `'x : '0`. + // So basically we're all set. + // + // Note that there are two tests to check that this remains true + // (`regions-reassign-{match,let}-bound-pointer.rs`). + // + // 2. Things go horribly wrong if we use subtype. The reason for + // THIS is a fairly subtle case involving bound regions. See the + // `givens` field in `region_constraints`, as well as the test + // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`, + // for details. Short version is that we must sometimes detect + // relationships between specific region variables and regions + // bound in a closure signature, and that detection gets thrown + // off when we substitute fresh region variables here to enable + // subtyping. + } + + /// Compute the new expected type and default binding mode from the old ones + /// as well as the pattern form we are currently checking. + fn calc_default_binding_mode( + &self, + pat: &'tcx Pat<'tcx>, + expected: Ty<'tcx>, + def_bm: BindingMode, + adjust_mode: AdjustMode, + ) -> (Ty<'tcx>, BindingMode) { + match adjust_mode { + AdjustMode::Pass => (expected, def_bm), + AdjustMode::Reset => (expected, INITIAL_BM), + AdjustMode::Peel => self.peel_off_references(pat, expected, def_bm), + } + } + + /// How should the binding mode and expected type be adjusted? + /// + /// When the pattern is a path pattern, `opt_path_res` must be `Some(res)`. + fn calc_adjust_mode(&self, pat: &'tcx Pat<'tcx>, opt_path_res: Option) -> AdjustMode { + // When we perform destructuring assignment, we disable default match bindings, which are + // unintuitive in this context. + if !pat.default_binding_modes { + return AdjustMode::Reset; + } + match &pat.kind { + // Type checking these product-like types successfully always require + // that the expected type be of those types and not reference types. + PatKind::Struct(..) + | PatKind::TupleStruct(..) + | PatKind::Tuple(..) + | PatKind::Box(_) + | PatKind::Range(..) + | PatKind::Slice(..) => AdjustMode::Peel, + // String and byte-string literals result in types `&str` and `&[u8]` respectively. + // All other literals result in non-reference types. + // As a result, we allow `if let 0 = &&0 {}` but not `if let "foo" = &&"foo {}`. + // + // Call `resolve_vars_if_possible` here for inline const blocks. + PatKind::Lit(lt) => match self.resolve_vars_if_possible(self.check_expr(lt)).kind() { + ty::Ref(..) => AdjustMode::Pass, + _ => AdjustMode::Peel, + }, + PatKind::Path(_) => match opt_path_res.unwrap() { + // These constants can be of a reference type, e.g. `const X: &u8 = &0;`. + // Peeling the reference types too early will cause type checking failures. + // Although it would be possible to *also* peel the types of the constants too. + Res::Def(DefKind::Const | DefKind::AssocConst, _) => AdjustMode::Pass, + // In the `ValueNS`, we have `SelfCtor(..) | Ctor(_, Const), _)` remaining which + // could successfully compile. The former being `Self` requires a unit struct. + // In either case, and unlike constants, the pattern itself cannot be + // a reference type wherefore peeling doesn't give up any expressiveness. + _ => AdjustMode::Peel, + }, + // When encountering a `& mut? pat` pattern, reset to "by value". + // This is so that `x` and `y` here are by value, as they appear to be: + // + // ``` + // match &(&22, &44) { + // (&x, &y) => ... + // } + // ``` + // + // See issue #46688. + PatKind::Ref(..) => AdjustMode::Reset, + // A `_` pattern works with any expected type, so there's no need to do anything. + PatKind::Wild + // Bindings also work with whatever the expected type is, + // and moreover if we peel references off, that will give us the wrong binding type. + // Also, we can have a subpattern `binding @ pat`. + // Each side of the `@` should be treated independently (like with OR-patterns). + | PatKind::Binding(..) + // An OR-pattern just propagates to each individual alternative. + // This is maximally flexible, allowing e.g., `Some(mut x) | &Some(mut x)`. + // In that example, `Some(mut x)` results in `Peel` whereas `&Some(mut x)` in `Reset`. + | PatKind::Or(_) => AdjustMode::Pass, + } + } + + /// Peel off as many immediately nested `& mut?` from the expected type as possible + /// and return the new expected type and binding default binding mode. + /// The adjustments vector, if non-empty is stored in a table. + fn peel_off_references( + &self, + pat: &'tcx Pat<'tcx>, + expected: Ty<'tcx>, + mut def_bm: BindingMode, + ) -> (Ty<'tcx>, BindingMode) { + let mut expected = self.resolve_vars_with_obligations(expected); + + // Peel off as many `&` or `&mut` from the scrutinee type as possible. For example, + // for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches + // the `Some(5)` which is not of type Ref. + // + // For each ampersand peeled off, update the binding mode and push the original + // type into the adjustments vector. + // + // See the examples in `ui/match-defbm*.rs`. + let mut pat_adjustments = vec![]; + while let ty::Ref(_, inner_ty, inner_mutability) = *expected.kind() { + debug!("inspecting {:?}", expected); + + debug!("current discriminant is Ref, inserting implicit deref"); + // Preserve the reference type. We'll need it later during THIR lowering. + pat_adjustments.push(expected); + + expected = inner_ty; + def_bm = ty::BindByReference(match def_bm { + // If default binding mode is by value, make it `ref` or `ref mut` + // (depending on whether we observe `&` or `&mut`). + ty::BindByValue(_) | + // When `ref mut`, stay a `ref mut` (on `&mut`) or downgrade to `ref` (on `&`). + ty::BindByReference(hir::Mutability::Mut) => inner_mutability, + // Once a `ref`, always a `ref`. + // This is because a `& &mut` cannot mutate the underlying value. + ty::BindByReference(m @ hir::Mutability::Not) => m, + }); + } + + if !pat_adjustments.is_empty() { + debug!("default binding mode is now {:?}", def_bm); + self.inh + .typeck_results + .borrow_mut() + .pat_adjustments_mut() + .insert(pat.hir_id, pat_adjustments); + } + + (expected, def_bm) + } + + fn check_pat_lit( + &self, + span: Span, + lt: &hir::Expr<'tcx>, + expected: Ty<'tcx>, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + // We've already computed the type above (when checking for a non-ref pat), + // so avoid computing it again. + let ty = self.node_ty(lt.hir_id); + + // Byte string patterns behave the same way as array patterns + // They can denote both statically and dynamically-sized byte arrays. + let mut pat_ty = ty; + if let hir::ExprKind::Lit(Spanned { node: ast::LitKind::ByteStr(_), .. }) = lt.kind { + let expected = self.structurally_resolved_type(span, expected); + if let ty::Ref(_, inner_ty, _) = expected.kind() + && matches!(inner_ty.kind(), ty::Slice(_)) + { + let tcx = self.tcx; + trace!(?lt.hir_id.local_id, "polymorphic byte string lit"); + self.typeck_results + .borrow_mut() + .treat_byte_string_as_slice + .insert(lt.hir_id.local_id); + pat_ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_slice(tcx.types.u8)); + } + } + + // Somewhat surprising: in this case, the subtyping relation goes the + // opposite way as the other cases. Actually what we really want is not + // a subtyping relation at all but rather that there exists a LUB + // (so that they can be compared). However, in practice, constants are + // always scalars or strings. For scalars subtyping is irrelevant, + // and for strings `ty` is type is `&'static str`, so if we say that + // + // &'static str <: expected + // + // then that's equivalent to there existing a LUB. + let cause = self.pattern_cause(ti, span); + if let Some(mut err) = self.demand_suptype_with_origin(&cause, expected, pat_ty) { + err.emit_unless( + ti.span + .filter(|&s| { + // In the case of `if`- and `while`-expressions we've already checked + // that `scrutinee: bool`. We know that the pattern is `true`, + // so an error here would be a duplicate and from the wrong POV. + s.is_desugaring(DesugaringKind::CondTemporary) + }) + .is_some(), + ); + } + + pat_ty + } + + fn check_pat_range( + &self, + span: Span, + lhs: Option<&'tcx hir::Expr<'tcx>>, + rhs: Option<&'tcx hir::Expr<'tcx>>, + expected: Ty<'tcx>, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let calc_side = |opt_expr: Option<&'tcx hir::Expr<'tcx>>| match opt_expr { + None => None, + Some(expr) => { + let ty = self.check_expr(expr); + // Check that the end-point is possibly of numeric or char type. + // The early check here is not for correctness, but rather better + // diagnostics (e.g. when `&str` is being matched, `expected` will + // be peeled to `str` while ty here is still `&str`, if we don't + // err early here, a rather confusing unification error will be + // emitted instead). + let fail = + !(ty.is_numeric() || ty.is_char() || ty.is_ty_var() || ty.references_error()); + Some((fail, ty, expr.span)) + } + }; + let mut lhs = calc_side(lhs); + let mut rhs = calc_side(rhs); + + if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) { + // There exists a side that didn't meet our criteria that the end-point + // be of a numeric or char type, as checked in `calc_side` above. + self.emit_err_pat_range(span, lhs, rhs); + return self.tcx.ty_error(); + } + + // Unify each side with `expected`. + // Subtyping doesn't matter here, as the value is some kind of scalar. + let demand_eqtype = |x: &mut _, y| { + if let Some((ref mut fail, x_ty, x_span)) = *x + && let Some(mut err) = self.demand_eqtype_pat_diag(x_span, expected, x_ty, ti) + { + if let Some((_, y_ty, y_span)) = y { + self.endpoint_has_type(&mut err, y_span, y_ty); + } + err.emit(); + *fail = true; + } + }; + demand_eqtype(&mut lhs, rhs); + demand_eqtype(&mut rhs, lhs); + + if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) { + return self.tcx.ty_error(); + } + + // Find the unified type and check if it's of numeric or char type again. + // This check is needed if both sides are inference variables. + // We require types to be resolved here so that we emit inference failure + // rather than "_ is not a char or numeric". + let ty = self.structurally_resolved_type(span, expected); + if !(ty.is_numeric() || ty.is_char() || ty.references_error()) { + if let Some((ref mut fail, _, _)) = lhs { + *fail = true; + } + if let Some((ref mut fail, _, _)) = rhs { + *fail = true; + } + self.emit_err_pat_range(span, lhs, rhs); + return self.tcx.ty_error(); + } + ty + } + + fn endpoint_has_type(&self, err: &mut Diagnostic, span: Span, ty: Ty<'_>) { + if !ty.references_error() { + err.span_label(span, &format!("this is of type `{}`", ty)); + } + } + + fn emit_err_pat_range( + &self, + span: Span, + lhs: Option<(bool, Ty<'tcx>, Span)>, + rhs: Option<(bool, Ty<'tcx>, Span)>, + ) { + let span = match (lhs, rhs) { + (Some((true, ..)), Some((true, ..))) => span, + (Some((true, _, sp)), _) => sp, + (_, Some((true, _, sp))) => sp, + _ => span_bug!(span, "emit_err_pat_range: no side failed or exists but still error?"), + }; + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0029, + "only `char` and numeric types are allowed in range patterns" + ); + let msg = |ty| { + let ty = self.resolve_vars_if_possible(ty); + format!("this is of type `{}` but it should be `char` or numeric", ty) + }; + let mut one_side_err = |first_span, first_ty, second: Option<(bool, Ty<'tcx>, Span)>| { + err.span_label(first_span, &msg(first_ty)); + if let Some((_, ty, sp)) = second { + let ty = self.resolve_vars_if_possible(ty); + self.endpoint_has_type(&mut err, sp, ty); + } + }; + match (lhs, rhs) { + (Some((true, lhs_ty, lhs_sp)), Some((true, rhs_ty, rhs_sp))) => { + err.span_label(lhs_sp, &msg(lhs_ty)); + err.span_label(rhs_sp, &msg(rhs_ty)); + } + (Some((true, lhs_ty, lhs_sp)), rhs) => one_side_err(lhs_sp, lhs_ty, rhs), + (lhs, Some((true, rhs_ty, rhs_sp))) => one_side_err(rhs_sp, rhs_ty, lhs), + _ => span_bug!(span, "Impossible, verified above."), + } + if self.tcx.sess.teach(&err.get_code().unwrap()) { + err.note( + "In a match expression, only numbers and characters can be matched \ + against a range. This is because the compiler checks that the range \ + is non-empty at compile-time, and is unable to evaluate arbitrary \ + comparison functions. If you want to capture values of an orderable \ + type between two end-points, you can use a guard.", + ); + } + err.emit(); + } + + fn check_pat_ident( + &self, + pat: &'tcx Pat<'tcx>, + ba: hir::BindingAnnotation, + var_id: HirId, + sub: Option<&'tcx Pat<'tcx>>, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + // Determine the binding mode... + let bm = match ba { + hir::BindingAnnotation::Unannotated => def_bm, + _ => BindingMode::convert(ba), + }; + // ...and store it in a side table: + self.inh.typeck_results.borrow_mut().pat_binding_modes_mut().insert(pat.hir_id, bm); + + debug!("check_pat_ident: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); + + let local_ty = self.local_ty(pat.span, pat.hir_id).decl_ty; + let eq_ty = match bm { + ty::BindByReference(mutbl) => { + // If the binding is like `ref x | ref mut x`, + // then `x` is assigned a value of type `&M T` where M is the + // mutability and T is the expected type. + // + // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` + // is required. However, we use equality, which is stronger. + // See (note_1) for an explanation. + self.new_ref_ty(pat.span, mutbl, expected) + } + // Otherwise, the type of x is the expected type `T`. + ty::BindByValue(_) => { + // As above, `T <: typeof(x)` is required, but we use equality, see (note_1). + expected + } + }; + self.demand_eqtype_pat(pat.span, eq_ty, local_ty, ti); + + // If there are multiple arms, make sure they all agree on + // what the type of the binding `x` ought to be. + if var_id != pat.hir_id { + self.check_binding_alt_eq_ty(pat.span, var_id, local_ty, ti); + } + + if let Some(p) = sub { + self.check_pat(p, expected, def_bm, ti); + } + + local_ty + } + + fn check_binding_alt_eq_ty(&self, span: Span, var_id: HirId, ty: Ty<'tcx>, ti: TopInfo<'tcx>) { + let var_ty = self.local_ty(span, var_id).decl_ty; + if let Some(mut err) = self.demand_eqtype_pat_diag(span, var_ty, ty, ti) { + let hir = self.tcx.hir(); + let var_ty = self.resolve_vars_with_obligations(var_ty); + let msg = format!("first introduced with type `{var_ty}` here"); + err.span_label(hir.span(var_id), msg); + let in_match = hir.parent_iter(var_id).any(|(_, n)| { + matches!( + n, + hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Match(.., hir::MatchSource::Normal), + .. + }) + ) + }); + let pre = if in_match { "in the same arm, " } else { "" }; + err.note(&format!("{}a binding must have the same type in all alternatives", pre)); + // FIXME: check if `var_ty` and `ty` can be made the same type by adding or removing + // `ref` or `&` to the pattern. + err.emit(); + } + } + + // Precondition: pat is a Ref(_) pattern + fn borrow_pat_suggestion(&self, err: &mut Diagnostic, pat: &Pat<'_>) { + let tcx = self.tcx; + if let PatKind::Ref(inner, mutbl) = pat.kind + && let PatKind::Binding(_, _, binding, ..) = inner.kind { + let binding_parent_id = tcx.hir().get_parent_node(pat.hir_id); + let binding_parent = tcx.hir().get(binding_parent_id); + debug!(?inner, ?pat, ?binding_parent); + + let mutability = match mutbl { + ast::Mutability::Mut => "mut", + ast::Mutability::Not => "", + }; + + let mut_var_suggestion = 'block: { + if !matches!(mutbl, ast::Mutability::Mut) { + break 'block None; + } + + let ident_kind = match binding_parent { + hir::Node::Param(_) => "parameter", + hir::Node::Local(_) => "variable", + hir::Node::Arm(_) => "binding", + + // Provide diagnostics only if the parent pattern is struct-like, + // i.e. where `mut binding` makes sense + hir::Node::Pat(Pat { kind, .. }) => match kind { + PatKind::Struct(..) + | PatKind::TupleStruct(..) + | PatKind::Or(..) + | PatKind::Tuple(..) + | PatKind::Slice(..) => "binding", + + PatKind::Wild + | PatKind::Binding(..) + | PatKind::Path(..) + | PatKind::Box(..) + | PatKind::Ref(..) + | PatKind::Lit(..) + | PatKind::Range(..) => break 'block None, + }, + + // Don't provide suggestions in other cases + _ => break 'block None, + }; + + Some(( + pat.span, + format!("to declare a mutable {ident_kind} use"), + format!("mut {binding}"), + )) + + }; + + match binding_parent { + // Check that there is explicit type (ie this is not a closure param with inferred type) + // so we don't suggest moving something to the type that does not exist + hir::Node::Param(hir::Param { ty_span, .. }) if binding.span != *ty_span => { + err.multipart_suggestion_verbose( + format!("to take parameter `{binding}` by reference, move `&{mutability}` to the type"), + vec![ + (pat.span.until(inner.span), "".to_owned()), + (ty_span.shrink_to_lo(), format!("&{}", mutbl.prefix_str())), + ], + Applicability::MachineApplicable + ); + + if let Some((sp, msg, sugg)) = mut_var_suggestion { + err.span_note(sp, format!("{msg}: `{sugg}`")); + } + } + hir::Node::Param(_) | hir::Node::Arm(_) | hir::Node::Pat(_) => { + // rely on match ergonomics or it might be nested `&&pat` + err.span_suggestion_verbose( + pat.span.until(inner.span), + format!("consider removing `&{mutability}` from the pattern"), + "", + Applicability::MaybeIncorrect, + ); + + if let Some((sp, msg, sugg)) = mut_var_suggestion { + err.span_note(sp, format!("{msg}: `{sugg}`")); + } + } + _ if let Some((sp, msg, sugg)) = mut_var_suggestion => { + err.span_suggestion(sp, msg, sugg, Applicability::MachineApplicable); + } + _ => {} // don't provide suggestions in other cases #55175 + } + } + } + + pub fn check_dereferenceable(&self, span: Span, expected: Ty<'tcx>, inner: &Pat<'_>) -> bool { + if let PatKind::Binding(..) = inner.kind + && let Some(mt) = self.shallow_resolve(expected).builtin_deref(true) + && let ty::Dynamic(..) = mt.ty.kind() + { + // This is "x = SomeTrait" being reduced from + // "let &x = &SomeTrait" or "let box x = Box", an error. + let type_str = self.ty_to_string(expected); + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0033, + "type `{}` cannot be dereferenced", + type_str + ); + err.span_label(span, format!("type `{type_str}` cannot be dereferenced")); + if self.tcx.sess.teach(&err.get_code().unwrap()) { + err.note(CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ); + } + err.emit(); + return false; + } + true + } + + fn check_pat_struct( + &self, + pat: &'tcx Pat<'tcx>, + qpath: &hir::QPath<'_>, + fields: &'tcx [hir::PatField<'tcx>], + has_rest_pat: bool, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + // Resolve the path and check the definition for errors. + let Some((variant, pat_ty)) = self.check_struct_path(qpath, pat.hir_id) else { + let err = self.tcx.ty_error(); + for field in fields { + let ti = ti; + self.check_pat(field.pat, err, def_bm, ti); + } + return err; + }; + + // Type-check the path. + self.demand_eqtype_pat(pat.span, expected, pat_ty, ti); + + // Type-check subpatterns. + if self.check_struct_pat_fields(pat_ty, &pat, variant, fields, has_rest_pat, def_bm, ti) { + pat_ty + } else { + self.tcx.ty_error() + } + } + + fn check_pat_path( + &self, + pat: &Pat<'tcx>, + qpath: &hir::QPath<'_>, + path_resolution: (Res, Option>, &'tcx [hir::PathSegment<'tcx>]), + expected: Ty<'tcx>, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + + // We have already resolved the path. + let (res, opt_ty, segments) = path_resolution; + match res { + Res::Err => { + self.set_tainted_by_errors(); + return tcx.ty_error(); + } + Res::Def(DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fictive | CtorKind::Fn), _) => { + report_unexpected_variant_res(tcx, res, qpath, pat.span); + return tcx.ty_error(); + } + Res::SelfCtor(..) + | Res::Def( + DefKind::Ctor(_, CtorKind::Const) + | DefKind::Const + | DefKind::AssocConst + | DefKind::ConstParam, + _, + ) => {} // OK + _ => bug!("unexpected pattern resolution: {:?}", res), + } + + // Type-check the path. + let (pat_ty, pat_res) = + self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id); + if let Some(err) = + self.demand_suptype_with_origin(&self.pattern_cause(ti, pat.span), expected, pat_ty) + { + self.emit_bad_pat_path(err, pat, res, pat_res, pat_ty, segments); + } + pat_ty + } + + fn maybe_suggest_range_literal( + &self, + e: &mut Diagnostic, + opt_def_id: Option, + ident: Ident, + ) -> bool { + match opt_def_id { + Some(def_id) => match self.tcx.hir().get_if_local(def_id) { + Some(hir::Node::Item(hir::Item { + kind: hir::ItemKind::Const(_, body_id), .. + })) => match self.tcx.hir().get(body_id.hir_id) { + hir::Node::Expr(expr) => { + if hir::is_range_literal(expr) { + let span = self.tcx.hir().span(body_id.hir_id); + if let Ok(snip) = self.tcx.sess.source_map().span_to_snippet(span) { + e.span_suggestion_verbose( + ident.span, + "you may want to move the range into the match block", + snip, + Applicability::MachineApplicable, + ); + return true; + } + } + } + _ => (), + }, + _ => (), + }, + _ => (), + } + false + } + + fn emit_bad_pat_path( + &self, + mut e: DiagnosticBuilder<'_, ErrorGuaranteed>, + pat: &hir::Pat<'tcx>, + res: Res, + pat_res: Res, + pat_ty: Ty<'tcx>, + segments: &'tcx [hir::PathSegment<'tcx>], + ) { + let pat_span = pat.span; + if let Some(span) = self.tcx.hir().res_span(pat_res) { + e.span_label(span, &format!("{} defined here", res.descr())); + if let [hir::PathSegment { ident, .. }] = &*segments { + e.span_label( + pat_span, + &format!( + "`{}` is interpreted as {} {}, not a new binding", + ident, + res.article(), + res.descr(), + ), + ); + match self.tcx.hir().get(self.tcx.hir().get_parent_node(pat.hir_id)) { + hir::Node::Pat(Pat { kind: hir::PatKind::Struct(..), .. }) => { + e.span_suggestion_verbose( + ident.span.shrink_to_hi(), + "bind the struct field to a different name instead", + format!(": other_{}", ident.as_str().to_lowercase()), + Applicability::HasPlaceholders, + ); + } + _ => { + let (type_def_id, item_def_id) = match pat_ty.kind() { + Adt(def, _) => match res { + Res::Def(DefKind::Const, def_id) => (Some(def.did()), Some(def_id)), + _ => (None, None), + }, + _ => (None, None), + }; + + let ranges = &[ + self.tcx.lang_items().range_struct(), + self.tcx.lang_items().range_from_struct(), + self.tcx.lang_items().range_to_struct(), + self.tcx.lang_items().range_full_struct(), + self.tcx.lang_items().range_inclusive_struct(), + self.tcx.lang_items().range_to_inclusive_struct(), + ]; + if type_def_id != None && ranges.contains(&type_def_id) { + if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) { + let msg = "constants only support matching by type, \ + if you meant to match against a range of values, \ + consider using a range pattern like `min ..= max` in the match block"; + e.note(msg); + } + } else { + let msg = "introduce a new binding instead"; + let sugg = format!("other_{}", ident.as_str().to_lowercase()); + e.span_suggestion( + ident.span, + msg, + sugg, + Applicability::HasPlaceholders, + ); + } + } + }; + } + } + e.emit(); + } + + fn check_pat_tuple_struct( + &self, + pat: &'tcx Pat<'tcx>, + qpath: &'tcx hir::QPath<'tcx>, + subpats: &'tcx [Pat<'tcx>], + ddpos: Option, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + let on_error = || { + for pat in subpats { + self.check_pat(pat, tcx.ty_error(), def_bm, ti); + } + }; + let report_unexpected_res = |res: Res| { + let sm = tcx.sess.source_map(); + let path_str = sm + .span_to_snippet(sm.span_until_char(pat.span, '(')) + .map_or_else(|_| String::new(), |s| format!(" `{}`", s.trim_end())); + let msg = format!( + "expected tuple struct or tuple variant, found {}{}", + res.descr(), + path_str + ); + + let mut err = struct_span_err!(tcx.sess, pat.span, E0164, "{msg}"); + match res { + Res::Def(DefKind::Fn | DefKind::AssocFn, _) => { + err.span_label(pat.span, "`fn` calls are not allowed in patterns"); + err.help( + "for more information, visit \ + https://doc.rust-lang.org/book/ch18-00-patterns.html", + ); + } + _ => { + err.span_label(pat.span, "not a tuple variant or struct"); + } + } + err.emit(); + on_error(); + }; + + // Resolve the path and check the definition for errors. + let (res, opt_ty, segments) = + self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span); + if res == Res::Err { + self.set_tainted_by_errors(); + on_error(); + return self.tcx.ty_error(); + } + + // Type-check the path. + let (pat_ty, res) = + self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id); + if !pat_ty.is_fn() { + report_unexpected_res(res); + return tcx.ty_error(); + } + + let variant = match res { + Res::Err => { + self.set_tainted_by_errors(); + on_error(); + return tcx.ty_error(); + } + Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) => { + report_unexpected_res(res); + return tcx.ty_error(); + } + Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) => tcx.expect_variant_res(res), + _ => bug!("unexpected pattern resolution: {:?}", res), + }; + + // Replace constructor type with constructed type for tuple struct patterns. + let pat_ty = pat_ty.fn_sig(tcx).output(); + let pat_ty = pat_ty.no_bound_vars().expect("expected fn type"); + + // Type-check the tuple struct pattern against the expected type. + let diag = self.demand_eqtype_pat_diag(pat.span, expected, pat_ty, ti); + let had_err = if let Some(mut err) = diag { + err.emit(); + true + } else { + false + }; + + // Type-check subpatterns. + if subpats.len() == variant.fields.len() + || subpats.len() < variant.fields.len() && ddpos.is_some() + { + let ty::Adt(_, substs) = pat_ty.kind() else { + bug!("unexpected pattern type {:?}", pat_ty); + }; + for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { + let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs); + self.check_pat(subpat, field_ty, def_bm, ti); + + self.tcx.check_stability( + variant.fields[i].did, + Some(pat.hir_id), + subpat.span, + None, + ); + } + } else { + // Pattern has wrong number of fields. + self.e0023(pat.span, res, qpath, subpats, &variant.fields, expected, had_err); + on_error(); + return tcx.ty_error(); + } + pat_ty + } + + fn e0023( + &self, + pat_span: Span, + res: Res, + qpath: &hir::QPath<'_>, + subpats: &'tcx [Pat<'tcx>], + fields: &'tcx [ty::FieldDef], + expected: Ty<'tcx>, + had_err: bool, + ) { + let subpats_ending = pluralize!(subpats.len()); + let fields_ending = pluralize!(fields.len()); + + let subpat_spans = if subpats.is_empty() { + vec![pat_span] + } else { + subpats.iter().map(|p| p.span).collect() + }; + let last_subpat_span = *subpat_spans.last().unwrap(); + let res_span = self.tcx.def_span(res.def_id()); + let def_ident_span = self.tcx.def_ident_span(res.def_id()).unwrap_or(res_span); + let field_def_spans = if fields.is_empty() { + vec![res_span] + } else { + fields.iter().map(|f| f.ident(self.tcx).span).collect() + }; + let last_field_def_span = *field_def_spans.last().unwrap(); + + let mut err = struct_span_err!( + self.tcx.sess, + MultiSpan::from_spans(subpat_spans), + E0023, + "this pattern has {} field{}, but the corresponding {} has {} field{}", + subpats.len(), + subpats_ending, + res.descr(), + fields.len(), + fields_ending, + ); + err.span_label( + last_subpat_span, + &format!("expected {} field{}, found {}", fields.len(), fields_ending, subpats.len()), + ); + if self.tcx.sess.source_map().is_multiline(qpath.span().between(last_subpat_span)) { + err.span_label(qpath.span(), ""); + } + if self.tcx.sess.source_map().is_multiline(def_ident_span.between(last_field_def_span)) { + err.span_label(def_ident_span, format!("{} defined here", res.descr())); + } + for span in &field_def_spans[..field_def_spans.len() - 1] { + err.span_label(*span, ""); + } + err.span_label( + last_field_def_span, + &format!("{} has {} field{}", res.descr(), fields.len(), fields_ending), + ); + + // Identify the case `Some(x, y)` where the expected type is e.g. `Option<(T, U)>`. + // More generally, the expected type wants a tuple variant with one field of an + // N-arity-tuple, e.g., `V_i((p_0, .., p_N))`. Meanwhile, the user supplied a pattern + // with the subpatterns directly in the tuple variant pattern, e.g., `V_i(p_0, .., p_N)`. + let missing_parentheses = match (&expected.kind(), fields, had_err) { + // #67037: only do this if we could successfully type-check the expected type against + // the tuple struct pattern. Otherwise the substs could get out of range on e.g., + // `let P() = U;` where `P != U` with `struct P(T);`. + (ty::Adt(_, substs), [field], false) => { + let field_ty = self.field_ty(pat_span, field, substs); + match field_ty.kind() { + ty::Tuple(fields) => fields.len() == subpats.len(), + _ => false, + } + } + _ => false, + }; + if missing_parentheses { + let (left, right) = match subpats { + // This is the zero case; we aim to get the "hi" part of the `QPath`'s + // span as the "lo" and then the "hi" part of the pattern's span as the "hi". + // This looks like: + // + // help: missing parentheses + // | + // L | let A(()) = A(()); + // | ^ ^ + [] => (qpath.span().shrink_to_hi(), pat_span), + // Easy case. Just take the "lo" of the first sub-pattern and the "hi" of the + // last sub-pattern. In the case of `A(x)` the first and last may coincide. + // This looks like: + // + // help: missing parentheses + // | + // L | let A((x, y)) = A((1, 2)); + // | ^ ^ + [first, ..] => (first.span.shrink_to_lo(), subpats.last().unwrap().span), + }; + err.multipart_suggestion( + "missing parentheses", + vec![(left, "(".to_string()), (right.shrink_to_hi(), ")".to_string())], + Applicability::MachineApplicable, + ); + } else if fields.len() > subpats.len() && pat_span != DUMMY_SP { + let after_fields_span = pat_span.with_hi(pat_span.hi() - BytePos(1)).shrink_to_hi(); + let all_fields_span = match subpats { + [] => after_fields_span, + [field] => field.span, + [first, .., last] => first.span.to(last.span), + }; + + // Check if all the fields in the pattern are wildcards. + let all_wildcards = subpats.iter().all(|pat| matches!(pat.kind, PatKind::Wild)); + let first_tail_wildcard = + subpats.iter().enumerate().fold(None, |acc, (pos, pat)| match (acc, &pat.kind) { + (None, PatKind::Wild) => Some(pos), + (Some(_), PatKind::Wild) => acc, + _ => None, + }); + let tail_span = match first_tail_wildcard { + None => after_fields_span, + Some(0) => subpats[0].span.to(after_fields_span), + Some(pos) => subpats[pos - 1].span.shrink_to_hi().to(after_fields_span), + }; + + // FIXME: heuristic-based suggestion to check current types for where to add `_`. + let mut wildcard_sugg = vec!["_"; fields.len() - subpats.len()].join(", "); + if !subpats.is_empty() { + wildcard_sugg = String::from(", ") + &wildcard_sugg; + } + + err.span_suggestion_verbose( + after_fields_span, + "use `_` to explicitly ignore each field", + wildcard_sugg, + Applicability::MaybeIncorrect, + ); + + // Only suggest `..` if more than one field is missing + // or the pattern consists of all wildcards. + if fields.len() - subpats.len() > 1 || all_wildcards { + if subpats.is_empty() || all_wildcards { + err.span_suggestion_verbose( + all_fields_span, + "use `..` to ignore all fields", + "..", + Applicability::MaybeIncorrect, + ); + } else { + err.span_suggestion_verbose( + tail_span, + "use `..` to ignore the rest of the fields", + ", ..", + Applicability::MaybeIncorrect, + ); + } + } + } + + err.emit(); + } + + fn check_pat_tuple( + &self, + span: Span, + elements: &'tcx [Pat<'tcx>], + ddpos: Option, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + let mut expected_len = elements.len(); + if ddpos.is_some() { + // Require known type only when `..` is present. + if let ty::Tuple(tys) = self.structurally_resolved_type(span, expected).kind() { + expected_len = tys.len(); + } + } + let max_len = cmp::max(expected_len, elements.len()); + + let element_tys_iter = (0..max_len).map(|_| { + self.next_ty_var( + // FIXME: `MiscVariable` for now -- obtaining the span and name information + // from all tuple elements isn't trivial. + TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span }, + ) + }); + let element_tys = tcx.mk_type_list(element_tys_iter); + let pat_ty = tcx.mk_ty(ty::Tuple(element_tys)); + if let Some(mut err) = self.demand_eqtype_pat_diag(span, expected, pat_ty, ti) { + err.emit(); + // Walk subpatterns with an expected type of `err` in this case to silence + // further errors being emitted when using the bindings. #50333 + let element_tys_iter = (0..max_len).map(|_| tcx.ty_error()); + for (_, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { + self.check_pat(elem, tcx.ty_error(), def_bm, ti); + } + tcx.mk_tup(element_tys_iter) + } else { + for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { + self.check_pat(elem, element_tys[i], def_bm, ti); + } + pat_ty + } + } + + fn check_struct_pat_fields( + &self, + adt_ty: Ty<'tcx>, + pat: &'tcx Pat<'tcx>, + variant: &'tcx ty::VariantDef, + fields: &'tcx [hir::PatField<'tcx>], + has_rest_pat: bool, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> bool { + let tcx = self.tcx; + + let ty::Adt(adt, substs) = adt_ty.kind() else { + span_bug!(pat.span, "struct pattern is not an ADT"); + }; + + // Index the struct fields' types. + let field_map = variant + .fields + .iter() + .enumerate() + .map(|(i, field)| (field.ident(self.tcx).normalize_to_macros_2_0(), (i, field))) + .collect::>(); + + // Keep track of which fields have already appeared in the pattern. + let mut used_fields = FxHashMap::default(); + let mut no_field_errors = true; + + let mut inexistent_fields = vec![]; + // Typecheck each field. + for field in fields { + let span = field.span; + let ident = tcx.adjust_ident(field.ident, variant.def_id); + let field_ty = match used_fields.entry(ident) { + Occupied(occupied) => { + self.error_field_already_bound(span, field.ident, *occupied.get()); + no_field_errors = false; + tcx.ty_error() + } + Vacant(vacant) => { + vacant.insert(span); + field_map + .get(&ident) + .map(|(i, f)| { + self.write_field_index(field.hir_id, *i); + self.tcx.check_stability(f.did, Some(pat.hir_id), span, None); + self.field_ty(span, f, substs) + }) + .unwrap_or_else(|| { + inexistent_fields.push(field); + no_field_errors = false; + tcx.ty_error() + }) + } + }; + + self.check_pat(field.pat, field_ty, def_bm, ti); + } + + let mut unmentioned_fields = variant + .fields + .iter() + .map(|field| (field, field.ident(self.tcx).normalize_to_macros_2_0())) + .filter(|(_, ident)| !used_fields.contains_key(ident)) + .collect::>(); + + let inexistent_fields_err = if !(inexistent_fields.is_empty() || variant.is_recovered()) + && !inexistent_fields.iter().any(|field| field.ident.name == kw::Underscore) + { + Some(self.error_inexistent_fields( + adt.variant_descr(), + &inexistent_fields, + &mut unmentioned_fields, + variant, + substs, + )) + } else { + None + }; + + // Require `..` if struct has non_exhaustive attribute. + let non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local(); + if non_exhaustive && !has_rest_pat { + self.error_foreign_non_exhaustive_spat(pat, adt.variant_descr(), fields.is_empty()); + } + + let mut unmentioned_err = None; + // Report an error if an incorrect number of fields was specified. + if adt.is_union() { + if fields.len() != 1 { + tcx.sess + .struct_span_err(pat.span, "union patterns should have exactly one field") + .emit(); + } + if has_rest_pat { + tcx.sess.struct_span_err(pat.span, "`..` cannot be used in union patterns").emit(); + } + } else if !unmentioned_fields.is_empty() { + let accessible_unmentioned_fields: Vec<_> = unmentioned_fields + .iter() + .copied() + .filter(|(field, _)| { + field.vis.is_accessible_from(tcx.parent_module(pat.hir_id).to_def_id(), tcx) + && !matches!( + tcx.eval_stability(field.did, None, DUMMY_SP, None), + EvalResult::Deny { .. } + ) + // We only want to report the error if it is hidden and not local + && !(tcx.is_doc_hidden(field.did) && !field.did.is_local()) + }) + .collect(); + + if !has_rest_pat { + if accessible_unmentioned_fields.is_empty() { + unmentioned_err = Some(self.error_no_accessible_fields(pat, fields)); + } else { + unmentioned_err = Some(self.error_unmentioned_fields( + pat, + &accessible_unmentioned_fields, + accessible_unmentioned_fields.len() != unmentioned_fields.len(), + fields, + )); + } + } else if non_exhaustive && !accessible_unmentioned_fields.is_empty() { + self.lint_non_exhaustive_omitted_patterns( + pat, + &accessible_unmentioned_fields, + adt_ty, + ) + } + } + match (inexistent_fields_err, unmentioned_err) { + (Some(mut i), Some(mut u)) => { + if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) { + // We don't want to show the nonexistent fields error when this was + // `Foo { a, b }` when it should have been `Foo(a, b)`. + i.delay_as_bug(); + u.delay_as_bug(); + e.emit(); + } else { + i.emit(); + u.emit(); + } + } + (None, Some(mut u)) => { + if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) { + u.delay_as_bug(); + e.emit(); + } else { + u.emit(); + } + } + (Some(mut err), None) => { + err.emit(); + } + (None, None) if let Some(mut err) = + self.error_tuple_variant_index_shorthand(variant, pat, fields) => + { + err.emit(); + } + (None, None) => {} + } + no_field_errors + } + + fn error_tuple_variant_index_shorthand( + &self, + variant: &VariantDef, + pat: &'_ Pat<'_>, + fields: &[hir::PatField<'_>], + ) -> Option> { + // if this is a tuple struct, then all field names will be numbers + // so if any fields in a struct pattern use shorthand syntax, they will + // be invalid identifiers (for example, Foo { 0, 1 }). + if let (CtorKind::Fn, PatKind::Struct(qpath, field_patterns, ..)) = + (variant.ctor_kind, &pat.kind) + { + let has_shorthand_field_name = field_patterns.iter().any(|field| field.is_shorthand); + if has_shorthand_field_name { + let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| { + s.print_qpath(qpath, false) + }); + let mut err = struct_span_err!( + self.tcx.sess, + pat.span, + E0769, + "tuple variant `{path}` written as struct variant", + ); + err.span_suggestion_verbose( + qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()), + "use the tuple variant pattern syntax instead", + format!("({})", self.get_suggested_tuple_struct_pattern(fields, variant)), + Applicability::MaybeIncorrect, + ); + return Some(err); + } + } + None + } + + fn error_foreign_non_exhaustive_spat(&self, pat: &Pat<'_>, descr: &str, no_fields: bool) { + let sess = self.tcx.sess; + let sm = sess.source_map(); + let sp_brace = sm.end_point(pat.span); + let sp_comma = sm.end_point(pat.span.with_hi(sp_brace.hi())); + let sugg = if no_fields || sp_brace != sp_comma { ".. }" } else { ", .. }" }; + + let mut err = struct_span_err!( + sess, + pat.span, + E0638, + "`..` required with {descr} marked as non-exhaustive", + ); + err.span_suggestion_verbose( + sp_comma, + "add `..` at the end of the field list to ignore all other fields", + sugg, + Applicability::MachineApplicable, + ); + err.emit(); + } + + fn error_field_already_bound(&self, span: Span, ident: Ident, other_field: Span) { + struct_span_err!( + self.tcx.sess, + span, + E0025, + "field `{}` bound multiple times in the pattern", + ident + ) + .span_label(span, format!("multiple uses of `{ident}` in pattern")) + .span_label(other_field, format!("first use of `{ident}`")) + .emit(); + } + + fn error_inexistent_fields( + &self, + kind_name: &str, + inexistent_fields: &[&hir::PatField<'tcx>], + unmentioned_fields: &mut Vec<(&'tcx ty::FieldDef, Ident)>, + variant: &ty::VariantDef, + substs: &'tcx ty::List>, + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let tcx = self.tcx; + let (field_names, t, plural) = if inexistent_fields.len() == 1 { + (format!("a field named `{}`", inexistent_fields[0].ident), "this", "") + } else { + ( + format!( + "fields named {}", + inexistent_fields + .iter() + .map(|field| format!("`{}`", field.ident)) + .collect::>() + .join(", ") + ), + "these", + "s", + ) + }; + let spans = inexistent_fields.iter().map(|field| field.ident.span).collect::>(); + let mut err = struct_span_err!( + tcx.sess, + spans, + E0026, + "{} `{}` does not have {}", + kind_name, + tcx.def_path_str(variant.def_id), + field_names + ); + if let Some(pat_field) = inexistent_fields.last() { + err.span_label( + pat_field.ident.span, + format!( + "{} `{}` does not have {} field{}", + kind_name, + tcx.def_path_str(variant.def_id), + t, + plural + ), + ); + + if unmentioned_fields.len() == 1 { + let input = + unmentioned_fields.iter().map(|(_, field)| field.name).collect::>(); + let suggested_name = find_best_match_for_name(&input, pat_field.ident.name, None); + if let Some(suggested_name) = suggested_name { + err.span_suggestion( + pat_field.ident.span, + "a field with a similar name exists", + suggested_name, + Applicability::MaybeIncorrect, + ); + + // When we have a tuple struct used with struct we don't want to suggest using + // the (valid) struct syntax with numeric field names. Instead we want to + // suggest the expected syntax. We infer that this is the case by parsing the + // `Ident` into an unsized integer. The suggestion will be emitted elsewhere in + // `smart_resolve_context_dependent_help`. + if suggested_name.to_ident_string().parse::().is_err() { + // We don't want to throw `E0027` in case we have thrown `E0026` for them. + unmentioned_fields.retain(|&(_, x)| x.name != suggested_name); + } + } else if inexistent_fields.len() == 1 { + match pat_field.pat.kind { + PatKind::Lit(expr) + if !self.can_coerce( + self.typeck_results.borrow().expr_ty(expr), + self.field_ty( + unmentioned_fields[0].1.span, + unmentioned_fields[0].0, + substs, + ), + ) => {} + _ => { + let unmentioned_field = unmentioned_fields[0].1.name; + err.span_suggestion_short( + pat_field.ident.span, + &format!( + "`{}` has a field named `{}`", + tcx.def_path_str(variant.def_id), + unmentioned_field + ), + unmentioned_field.to_string(), + Applicability::MaybeIncorrect, + ); + } + } + } + } + } + if tcx.sess.teach(&err.get_code().unwrap()) { + err.note( + "This error indicates that a struct pattern attempted to \ + extract a non-existent field from a struct. Struct fields \ + are identified by the name used before the colon : so struct \ + patterns should resemble the declaration of the struct type \ + being matched.\n\n\ + If you are using shorthand field patterns but want to refer \ + to the struct field by a different name, you should rename \ + it explicitly.", + ); + } + err + } + + fn error_tuple_variant_as_struct_pat( + &self, + pat: &Pat<'_>, + fields: &'tcx [hir::PatField<'tcx>], + variant: &ty::VariantDef, + ) -> Option> { + if let (CtorKind::Fn, PatKind::Struct(qpath, ..)) = (variant.ctor_kind, &pat.kind) { + let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| { + s.print_qpath(qpath, false) + }); + let mut err = struct_span_err!( + self.tcx.sess, + pat.span, + E0769, + "tuple variant `{}` written as struct variant", + path + ); + let (sugg, appl) = if fields.len() == variant.fields.len() { + ( + self.get_suggested_tuple_struct_pattern(fields, variant), + Applicability::MachineApplicable, + ) + } else { + ( + variant.fields.iter().map(|_| "_").collect::>().join(", "), + Applicability::MaybeIncorrect, + ) + }; + err.span_suggestion_verbose( + qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()), + "use the tuple variant pattern syntax instead", + format!("({})", sugg), + appl, + ); + return Some(err); + } + None + } + + fn get_suggested_tuple_struct_pattern( + &self, + fields: &[hir::PatField<'_>], + variant: &VariantDef, + ) -> String { + let variant_field_idents = + variant.fields.iter().map(|f| f.ident(self.tcx)).collect::>(); + fields + .iter() + .map(|field| { + match self.tcx.sess.source_map().span_to_snippet(field.pat.span) { + Ok(f) => { + // Field names are numbers, but numbers + // are not valid identifiers + if variant_field_idents.contains(&field.ident) { + String::from("_") + } else { + f + } + } + Err(_) => rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| { + s.print_pat(field.pat) + }), + } + }) + .collect::>() + .join(", ") + } + + /// Returns a diagnostic reporting a struct pattern which is missing an `..` due to + /// inaccessible fields. + /// + /// ```text + /// error: pattern requires `..` due to inaccessible fields + /// --> src/main.rs:10:9 + /// | + /// LL | let foo::Foo {} = foo::Foo::default(); + /// | ^^^^^^^^^^^ + /// | + /// help: add a `..` + /// | + /// LL | let foo::Foo { .. } = foo::Foo::default(); + /// | ^^^^^^ + /// ``` + fn error_no_accessible_fields( + &self, + pat: &Pat<'_>, + fields: &'tcx [hir::PatField<'tcx>], + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let mut err = self + .tcx + .sess + .struct_span_err(pat.span, "pattern requires `..` due to inaccessible fields"); + + if let Some(field) = fields.last() { + err.span_suggestion_verbose( + field.span.shrink_to_hi(), + "ignore the inaccessible and unused fields", + ", ..", + Applicability::MachineApplicable, + ); + } else { + let qpath_span = if let PatKind::Struct(qpath, ..) = &pat.kind { + qpath.span() + } else { + bug!("`error_no_accessible_fields` called on non-struct pattern"); + }; + + // Shrink the span to exclude the `foo:Foo` in `foo::Foo { }`. + let span = pat.span.with_lo(qpath_span.shrink_to_hi().hi()); + err.span_suggestion_verbose( + span, + "ignore the inaccessible and unused fields", + " { .. }", + Applicability::MachineApplicable, + ); + } + err + } + + /// Report that a pattern for a `#[non_exhaustive]` struct marked with `non_exhaustive_omitted_patterns` + /// is not exhaustive enough. + /// + /// Nb: the partner lint for enums lives in `compiler/rustc_mir_build/src/thir/pattern/usefulness.rs`. + fn lint_non_exhaustive_omitted_patterns( + &self, + pat: &Pat<'_>, + unmentioned_fields: &[(&ty::FieldDef, Ident)], + ty: Ty<'tcx>, + ) { + fn joined_uncovered_patterns(witnesses: &[&Ident]) -> String { + const LIMIT: usize = 3; + match witnesses { + [] => bug!(), + [witness] => format!("`{}`", witness), + [head @ .., tail] if head.len() < LIMIT => { + let head: Vec<_> = head.iter().map(<_>::to_string).collect(); + format!("`{}` and `{}`", head.join("`, `"), tail) + } + _ => { + let (head, tail) = witnesses.split_at(LIMIT); + let head: Vec<_> = head.iter().map(<_>::to_string).collect(); + format!("`{}` and {} more", head.join("`, `"), tail.len()) + } + } + } + let joined_patterns = joined_uncovered_patterns( + &unmentioned_fields.iter().map(|(_, i)| i).collect::>(), + ); + + self.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, pat.hir_id, pat.span, |build| { + let mut lint = build.build("some fields are not explicitly listed"); + lint.span_label(pat.span, format!("field{} {} not listed", rustc_errors::pluralize!(unmentioned_fields.len()), joined_patterns)); + + lint.help( + "ensure that all fields are mentioned explicitly by adding the suggested fields", + ); + lint.note(&format!( + "the pattern is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found", + ty, + )); + lint.emit(); + }); + } + + /// Returns a diagnostic reporting a struct pattern which does not mention some fields. + /// + /// ```text + /// error[E0027]: pattern does not mention field `bar` + /// --> src/main.rs:15:9 + /// | + /// LL | let foo::Foo {} = foo::Foo::new(); + /// | ^^^^^^^^^^^ missing field `bar` + /// ``` + fn error_unmentioned_fields( + &self, + pat: &Pat<'_>, + unmentioned_fields: &[(&ty::FieldDef, Ident)], + have_inaccessible_fields: bool, + fields: &'tcx [hir::PatField<'tcx>], + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let inaccessible = if have_inaccessible_fields { " and inaccessible fields" } else { "" }; + let field_names = if unmentioned_fields.len() == 1 { + format!("field `{}`{}", unmentioned_fields[0].1, inaccessible) + } else { + let fields = unmentioned_fields + .iter() + .map(|(_, name)| format!("`{}`", name)) + .collect::>() + .join(", "); + format!("fields {}{}", fields, inaccessible) + }; + let mut err = struct_span_err!( + self.tcx.sess, + pat.span, + E0027, + "pattern does not mention {}", + field_names + ); + err.span_label(pat.span, format!("missing {}", field_names)); + let len = unmentioned_fields.len(); + let (prefix, postfix, sp) = match fields { + [] => match &pat.kind { + PatKind::Struct(path, [], false) => { + (" { ", " }", path.span().shrink_to_hi().until(pat.span.shrink_to_hi())) + } + _ => return err, + }, + [.., field] => { + // Account for last field having a trailing comma or parse recovery at the tail of + // the pattern to avoid invalid suggestion (#78511). + let tail = field.span.shrink_to_hi().with_hi(pat.span.hi()); + match &pat.kind { + PatKind::Struct(..) => (", ", " }", tail), + _ => return err, + } + } + }; + err.span_suggestion( + sp, + &format!( + "include the missing field{} in the pattern{}", + pluralize!(len), + if have_inaccessible_fields { " and ignore the inaccessible fields" } else { "" } + ), + format!( + "{}{}{}{}", + prefix, + unmentioned_fields + .iter() + .map(|(_, name)| name.to_string()) + .collect::>() + .join(", "), + if have_inaccessible_fields { ", .." } else { "" }, + postfix, + ), + Applicability::MachineApplicable, + ); + err.span_suggestion( + sp, + &format!( + "if you don't care about {these} missing field{s}, you can explicitly ignore {them}", + these = pluralize!("this", len), + s = pluralize!(len), + them = if len == 1 { "it" } else { "them" }, + ), + format!("{}..{}", prefix, postfix), + Applicability::MachineApplicable, + ); + err + } + + fn check_pat_box( + &self, + span: Span, + inner: &'tcx Pat<'tcx>, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + let (box_ty, inner_ty) = if self.check_dereferenceable(span, expected, inner) { + // Here, `demand::subtype` is good enough, but I don't + // think any errors can be introduced by using `demand::eqtype`. + let inner_ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span: inner.span, + }); + let box_ty = tcx.mk_box(inner_ty); + self.demand_eqtype_pat(span, expected, box_ty, ti); + (box_ty, inner_ty) + } else { + let err = tcx.ty_error(); + (err, err) + }; + self.check_pat(inner, inner_ty, def_bm, ti); + box_ty + } + + // Precondition: Pat is Ref(inner) + fn check_pat_ref( + &self, + pat: &'tcx Pat<'tcx>, + inner: &'tcx Pat<'tcx>, + mutbl: hir::Mutability, + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let tcx = self.tcx; + let expected = self.shallow_resolve(expected); + let (rptr_ty, inner_ty) = if self.check_dereferenceable(pat.span, expected, inner) { + // `demand::subtype` would be good enough, but using `eqtype` turns + // out to be equally general. See (note_1) for details. + + // Take region, inner-type from expected type if we can, + // to avoid creating needless variables. This also helps with + // the bad interactions of the given hack detailed in (note_1). + debug!("check_pat_ref: expected={:?}", expected); + match *expected.kind() { + ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => (expected, r_ty), + _ => { + let inner_ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span: inner.span, + }); + let rptr_ty = self.new_ref_ty(pat.span, mutbl, inner_ty); + debug!("check_pat_ref: demanding {:?} = {:?}", expected, rptr_ty); + let err = self.demand_eqtype_pat_diag(pat.span, expected, rptr_ty, ti); + + // Look for a case like `fn foo(&foo: u32)` and suggest + // `fn foo(foo: &u32)` + if let Some(mut err) = err { + self.borrow_pat_suggestion(&mut err, pat); + err.emit(); + } + (rptr_ty, inner_ty) + } + } + } else { + let err = tcx.ty_error(); + (err, err) + }; + self.check_pat(inner, inner_ty, def_bm, ti); + rptr_ty + } + + /// Create a reference type with a fresh region variable. + fn new_ref_ty(&self, span: Span, mutbl: hir::Mutability, ty: Ty<'tcx>) -> Ty<'tcx> { + let region = self.next_region_var(infer::PatternRegion(span)); + let mt = ty::TypeAndMut { ty, mutbl }; + self.tcx.mk_ref(region, mt) + } + + /// Type check a slice pattern. + /// + /// Syntactically, these look like `[pat_0, ..., pat_n]`. + /// Semantically, we are type checking a pattern with structure: + /// ```ignore (not-rust) + /// [before_0, ..., before_n, (slice, after_0, ... after_n)?] + /// ``` + /// The type of `slice`, if it is present, depends on the `expected` type. + /// If `slice` is missing, then so is `after_i`. + /// If `slice` is present, it can still represent 0 elements. + fn check_pat_slice( + &self, + span: Span, + before: &'tcx [Pat<'tcx>], + slice: Option<&'tcx Pat<'tcx>>, + after: &'tcx [Pat<'tcx>], + expected: Ty<'tcx>, + def_bm: BindingMode, + ti: TopInfo<'tcx>, + ) -> Ty<'tcx> { + let expected = self.structurally_resolved_type(span, expected); + let (element_ty, opt_slice_ty, inferred) = match *expected.kind() { + // An array, so we might have something like `let [a, b, c] = [0, 1, 2];`. + ty::Array(element_ty, len) => { + let min = before.len() as u64 + after.len() as u64; + let (opt_slice_ty, expected) = + self.check_array_pat_len(span, element_ty, expected, slice, len, min); + // `opt_slice_ty.is_none()` => `slice.is_none()`. + // Note, though, that opt_slice_ty could be `Some(error_ty)`. + assert!(opt_slice_ty.is_some() || slice.is_none()); + (element_ty, opt_slice_ty, expected) + } + ty::Slice(element_ty) => (element_ty, Some(expected), expected), + // The expected type must be an array or slice, but was neither, so error. + _ => { + if !expected.references_error() { + self.error_expected_array_or_slice(span, expected, ti); + } + let err = self.tcx.ty_error(); + (err, Some(err), err) + } + }; + + // Type check all the patterns before `slice`. + for elt in before { + self.check_pat(elt, element_ty, def_bm, ti); + } + // Type check the `slice`, if present, against its expected type. + if let Some(slice) = slice { + self.check_pat(slice, opt_slice_ty.unwrap(), def_bm, ti); + } + // Type check the elements after `slice`, if present. + for elt in after { + self.check_pat(elt, element_ty, def_bm, ti); + } + inferred + } + + /// Type check the length of an array pattern. + /// + /// Returns both the type of the variable length pattern (or `None`), and the potentially + /// inferred array type. We only return `None` for the slice type if `slice.is_none()`. + fn check_array_pat_len( + &self, + span: Span, + element_ty: Ty<'tcx>, + arr_ty: Ty<'tcx>, + slice: Option<&'tcx Pat<'tcx>>, + len: ty::Const<'tcx>, + min_len: u64, + ) -> (Option>, Ty<'tcx>) { + if let Some(len) = len.try_eval_usize(self.tcx, self.param_env) { + // Now we know the length... + if slice.is_none() { + // ...and since there is no variable-length pattern, + // we require an exact match between the number of elements + // in the array pattern and as provided by the matched type. + if min_len == len { + return (None, arr_ty); + } + + self.error_scrutinee_inconsistent_length(span, min_len, len); + } else if let Some(pat_len) = len.checked_sub(min_len) { + // The variable-length pattern was there, + // so it has an array type with the remaining elements left as its size... + return (Some(self.tcx.mk_array(element_ty, pat_len)), arr_ty); + } else { + // ...however, in this case, there were no remaining elements. + // That is, the slice pattern requires more than the array type offers. + self.error_scrutinee_with_rest_inconsistent_length(span, min_len, len); + } + } else if slice.is_none() { + // We have a pattern with a fixed length, + // which we can use to infer the length of the array. + let updated_arr_ty = self.tcx.mk_array(element_ty, min_len); + self.demand_eqtype(span, updated_arr_ty, arr_ty); + return (None, updated_arr_ty); + } else { + // We have a variable-length pattern and don't know the array length. + // This happens if we have e.g., + // `let [a, b, ..] = arr` where `arr: [T; N]` where `const N: usize`. + self.error_scrutinee_unfixed_length(span); + } + + // If we get here, we must have emitted an error. + (Some(self.tcx.ty_error()), arr_ty) + } + + fn error_scrutinee_inconsistent_length(&self, span: Span, min_len: u64, size: u64) { + struct_span_err!( + self.tcx.sess, + span, + E0527, + "pattern requires {} element{} but array has {}", + min_len, + pluralize!(min_len), + size, + ) + .span_label(span, format!("expected {} element{}", size, pluralize!(size))) + .emit(); + } + + fn error_scrutinee_with_rest_inconsistent_length(&self, span: Span, min_len: u64, size: u64) { + struct_span_err!( + self.tcx.sess, + span, + E0528, + "pattern requires at least {} element{} but array has {}", + min_len, + pluralize!(min_len), + size, + ) + .span_label( + span, + format!("pattern cannot match array of {} element{}", size, pluralize!(size),), + ) + .emit(); + } + + fn error_scrutinee_unfixed_length(&self, span: Span) { + struct_span_err!( + self.tcx.sess, + span, + E0730, + "cannot pattern-match on an array without a fixed length", + ) + .emit(); + } + + fn error_expected_array_or_slice(&self, span: Span, expected_ty: Ty<'tcx>, ti: TopInfo<'tcx>) { + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0529, + "expected an array or slice, found `{expected_ty}`" + ); + if let ty::Ref(_, ty, _) = expected_ty.kind() + && let ty::Array(..) | ty::Slice(..) = ty.kind() + { + err.help("the semantics of slice patterns changed recently; see issue #62254"); + } else if Autoderef::new(&self.infcx, self.param_env, self.body_id, span, expected_ty, span) + .any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..))) + && let (Some(span), true) = (ti.span, ti.origin_expr) + && let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) + { + let ty = self.resolve_vars_if_possible(ti.expected); + let is_slice_or_array_or_vector = self.is_slice_or_array_or_vector(&mut err, snippet.clone(), ty); + match is_slice_or_array_or_vector.1.kind() { + ty::Adt(adt_def, _) + if self.tcx.is_diagnostic_item(sym::Option, adt_def.did()) + || self.tcx.is_diagnostic_item(sym::Result, adt_def.did()) => + { + // Slicing won't work here, but `.as_deref()` might (issue #91328). + err.span_suggestion( + span, + "consider using `as_deref` here", + format!("{snippet}.as_deref()"), + Applicability::MaybeIncorrect, + ); + } + _ => () + } + if is_slice_or_array_or_vector.0 { + err.span_suggestion( + span, + "consider slicing here", + format!("{snippet}[..]"), + Applicability::MachineApplicable, + ); + } + } + err.span_label(span, format!("pattern cannot match with input type `{expected_ty}`")); + err.emit(); + } + + fn is_slice_or_array_or_vector( + &self, + err: &mut Diagnostic, + snippet: String, + ty: Ty<'tcx>, + ) -> (bool, Ty<'tcx>) { + match ty.kind() { + ty::Adt(adt_def, _) if self.tcx.is_diagnostic_item(sym::Vec, adt_def.did()) => { + (true, ty) + } + ty::Ref(_, ty, _) => self.is_slice_or_array_or_vector(err, snippet, *ty), + ty::Slice(..) | ty::Array(..) => (true, ty), + _ => (false, ty), + } + } +} diff --git a/compiler/rustc_typeck/src/check/place_op.rs b/compiler/rustc_typeck/src/check/place_op.rs new file mode 100644 index 000000000..2e0f37eba --- /dev/null +++ b/compiler/rustc_typeck/src/check/place_op.rs @@ -0,0 +1,451 @@ +use crate::check::method::MethodCallee; +use crate::check::{has_expected_num_generic_args, FnCtxt, PlaceOp}; +use rustc_ast as ast; +use rustc_errors::Applicability; +use rustc_hir as hir; +use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; +use rustc_infer::infer::InferOk; +use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref, PointerCast}; +use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; +use rustc_middle::ty::{self, Ty}; +use rustc_span::symbol::{sym, Ident}; +use rustc_span::Span; +use rustc_trait_selection::autoderef::Autoderef; +use std::slice; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Type-check `*oprnd_expr` with `oprnd_expr` type-checked already. + pub(super) fn lookup_derefing( + &self, + expr: &hir::Expr<'_>, + oprnd_expr: &'tcx hir::Expr<'tcx>, + oprnd_ty: Ty<'tcx>, + ) -> Option> { + if let Some(mt) = oprnd_ty.builtin_deref(true) { + return Some(mt.ty); + } + + let ok = self.try_overloaded_deref(expr.span, oprnd_ty)?; + let method = self.register_infer_ok_obligations(ok); + if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() { + self.apply_adjustments( + oprnd_expr, + vec![Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)), + target: method.sig.inputs()[0], + }], + ); + } else { + span_bug!(expr.span, "input to deref is not a ref?"); + } + let ty = self.make_overloaded_place_return_type(method).ty; + self.write_method_call(expr.hir_id, method); + Some(ty) + } + + /// Type-check `*base_expr[index_expr]` with `base_expr` and `index_expr` type-checked already. + pub(super) fn lookup_indexing( + &self, + expr: &hir::Expr<'_>, + base_expr: &'tcx hir::Expr<'tcx>, + base_ty: Ty<'tcx>, + index_expr: &'tcx hir::Expr<'tcx>, + idx_ty: Ty<'tcx>, + ) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> { + // FIXME(#18741) -- this is almost but not quite the same as the + // autoderef that normal method probing does. They could likely be + // consolidated. + + let mut autoderef = self.autoderef(base_expr.span, base_ty); + let mut result = None; + while result.is_none() && autoderef.next().is_some() { + result = self.try_index_step(expr, base_expr, &autoderef, idx_ty, index_expr); + } + self.register_predicates(autoderef.into_obligations()); + result + } + + fn negative_index( + &self, + ty: Ty<'tcx>, + span: Span, + base_expr: &hir::Expr<'_>, + ) -> Option<(Ty<'tcx>, Ty<'tcx>)> { + let ty = self.resolve_vars_if_possible(ty); + let mut err = self.tcx.sess.struct_span_err( + span, + &format!("negative integers cannot be used to index on a `{ty}`"), + ); + err.span_label(span, &format!("cannot use a negative integer for indexing on `{ty}`")); + if let (hir::ExprKind::Path(..), Ok(snippet)) = + (&base_expr.kind, self.tcx.sess.source_map().span_to_snippet(base_expr.span)) + { + // `foo[-1]` to `foo[foo.len() - 1]` + err.span_suggestion_verbose( + span.shrink_to_lo(), + &format!( + "to access an element starting from the end of the `{ty}`, compute the index", + ), + format!("{snippet}.len() "), + Applicability::MachineApplicable, + ); + } + err.emit(); + Some((self.tcx.ty_error(), self.tcx.ty_error())) + } + + /// To type-check `base_expr[index_expr]`, we progressively autoderef + /// (and otherwise adjust) `base_expr`, looking for a type which either + /// supports builtin indexing or overloaded indexing. + /// This loop implements one step in that search; the autoderef loop + /// is implemented by `lookup_indexing`. + fn try_index_step( + &self, + expr: &hir::Expr<'_>, + base_expr: &hir::Expr<'_>, + autoderef: &Autoderef<'a, 'tcx>, + index_ty: Ty<'tcx>, + index_expr: &hir::Expr<'_>, + ) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> { + let adjusted_ty = + self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false)); + debug!( + "try_index_step(expr={:?}, base_expr={:?}, adjusted_ty={:?}, \ + index_ty={:?})", + expr, base_expr, adjusted_ty, index_ty + ); + + if let hir::ExprKind::Unary( + hir::UnOp::Neg, + hir::Expr { + kind: hir::ExprKind::Lit(hir::Lit { node: ast::LitKind::Int(..), .. }), + .. + }, + ) = index_expr.kind + { + match adjusted_ty.kind() { + ty::Adt(def, _) if self.tcx.is_diagnostic_item(sym::Vec, def.did()) => { + return self.negative_index(adjusted_ty, index_expr.span, base_expr); + } + ty::Slice(_) | ty::Array(_, _) => { + return self.negative_index(adjusted_ty, index_expr.span, base_expr); + } + _ => {} + } + } + + for unsize in [false, true] { + let mut self_ty = adjusted_ty; + if unsize { + // We only unsize arrays here. + if let ty::Array(element_ty, _) = adjusted_ty.kind() { + self_ty = self.tcx.mk_slice(*element_ty); + } else { + continue; + } + } + + // If some lookup succeeds, write callee into table and extract index/element + // type from the method signature. + // If some lookup succeeded, install method in table + let input_ty = self.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::AutoDeref, + span: base_expr.span, + }); + let method = + self.try_overloaded_place_op(expr.span, self_ty, &[input_ty], PlaceOp::Index); + + if let Some(result) = method { + debug!("try_index_step: success, using overloaded indexing"); + let method = self.register_infer_ok_obligations(result); + + let mut adjustments = self.adjust_steps(autoderef); + if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() { + adjustments.push(Adjustment { + kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)), + target: self.tcx.mk_ref( + *region, + ty::TypeAndMut { mutbl: hir::Mutability::Not, ty: adjusted_ty }, + ), + }); + } else { + span_bug!(expr.span, "input to index is not a ref?"); + } + if unsize { + adjustments.push(Adjustment { + kind: Adjust::Pointer(PointerCast::Unsize), + target: method.sig.inputs()[0], + }); + } + self.apply_adjustments(base_expr, adjustments); + + self.write_method_call(expr.hir_id, method); + + return Some((input_ty, self.make_overloaded_place_return_type(method).ty)); + } + } + + None + } + + /// Try to resolve an overloaded place op. We only deal with the immutable + /// variant here (Deref/Index). In some contexts we would need the mutable + /// variant (DerefMut/IndexMut); those would be later converted by + /// `convert_place_derefs_to_mutable`. + pub(super) fn try_overloaded_place_op( + &self, + span: Span, + base_ty: Ty<'tcx>, + arg_tys: &[Ty<'tcx>], + op: PlaceOp, + ) -> Option>> { + debug!("try_overloaded_place_op({:?},{:?},{:?})", span, base_ty, op); + + let (imm_tr, imm_op) = match op { + PlaceOp::Deref => (self.tcx.lang_items().deref_trait(), sym::deref), + PlaceOp::Index => (self.tcx.lang_items().index_trait(), sym::index), + }; + + // If the lang item was declared incorrectly, stop here so that we don't + // run into an ICE (#83893). The error is reported where the lang item is + // declared. + if !has_expected_num_generic_args( + self.tcx, + imm_tr, + match op { + PlaceOp::Deref => 0, + PlaceOp::Index => 1, + }, + ) { + return None; + } + + imm_tr.and_then(|trait_did| { + self.lookup_method_in_trait( + span, + Ident::with_dummy_span(imm_op), + trait_did, + base_ty, + Some(arg_tys), + ) + }) + } + + fn try_mutable_overloaded_place_op( + &self, + span: Span, + base_ty: Ty<'tcx>, + arg_tys: &[Ty<'tcx>], + op: PlaceOp, + ) -> Option>> { + debug!("try_mutable_overloaded_place_op({:?},{:?},{:?})", span, base_ty, op); + + let (mut_tr, mut_op) = match op { + PlaceOp::Deref => (self.tcx.lang_items().deref_mut_trait(), sym::deref_mut), + PlaceOp::Index => (self.tcx.lang_items().index_mut_trait(), sym::index_mut), + }; + + // If the lang item was declared incorrectly, stop here so that we don't + // run into an ICE (#83893). The error is reported where the lang item is + // declared. + if !has_expected_num_generic_args( + self.tcx, + mut_tr, + match op { + PlaceOp::Deref => 0, + PlaceOp::Index => 1, + }, + ) { + return None; + } + + mut_tr.and_then(|trait_did| { + self.lookup_method_in_trait( + span, + Ident::with_dummy_span(mut_op), + trait_did, + base_ty, + Some(arg_tys), + ) + }) + } + + /// Convert auto-derefs, indices, etc of an expression from `Deref` and `Index` + /// into `DerefMut` and `IndexMut` respectively. + /// + /// This is a second pass of typechecking derefs/indices. We need this because we do not + /// always know whether a place needs to be mutable or not in the first pass. + /// This happens whether there is an implicit mutable reborrow, e.g. when the type + /// is used as the receiver of a method call. + pub fn convert_place_derefs_to_mutable(&self, expr: &hir::Expr<'_>) { + // Gather up expressions we want to munge. + let mut exprs = vec![expr]; + + while let hir::ExprKind::Field(ref expr, _) + | hir::ExprKind::Index(ref expr, _) + | hir::ExprKind::Unary(hir::UnOp::Deref, ref expr) = exprs.last().unwrap().kind + { + exprs.push(expr); + } + + debug!("convert_place_derefs_to_mutable: exprs={:?}", exprs); + + // Fix up autoderefs and derefs. + let mut inside_union = false; + for (i, &expr) in exprs.iter().rev().enumerate() { + debug!("convert_place_derefs_to_mutable: i={} expr={:?}", i, expr); + + let mut source = self.node_ty(expr.hir_id); + if matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Deref, _)) { + // Clear previous flag; after a pointer indirection it does not apply any more. + inside_union = false; + } + if source.is_union() { + inside_union = true; + } + // Fix up the autoderefs. Autorefs can only occur immediately preceding + // overloaded place ops, and will be fixed by them in order to get + // the correct region. + // Do not mutate adjustments in place, but rather take them, + // and replace them after mutating them, to avoid having the + // typeck results borrowed during (`deref_mut`) method resolution. + let previous_adjustments = + self.typeck_results.borrow_mut().adjustments_mut().remove(expr.hir_id); + if let Some(mut adjustments) = previous_adjustments { + for adjustment in &mut adjustments { + if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind + && let Some(ok) = self.try_mutable_overloaded_place_op( + expr.span, + source, + &[], + PlaceOp::Deref, + ) + { + let method = self.register_infer_ok_obligations(ok); + if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() { + *deref = OverloadedDeref { region, mutbl, span: deref.span }; + } + // If this is a union field, also throw an error for `DerefMut` of `ManuallyDrop` (see RFC 2514). + // This helps avoid accidental drops. + if inside_union + && source.ty_adt_def().map_or(false, |adt| adt.is_manually_drop()) + { + let mut err = self.tcx.sess.struct_span_err( + expr.span, + "not automatically applying `DerefMut` on `ManuallyDrop` union field", + ); + err.help( + "writing to this reference calls the destructor for the old value", + ); + err.help("add an explicit `*` if that is desired, or call `ptr::write` to not run the destructor"); + err.emit(); + } + } + source = adjustment.target; + } + self.typeck_results.borrow_mut().adjustments_mut().insert(expr.hir_id, adjustments); + } + + match expr.kind { + hir::ExprKind::Index(base_expr, ..) => { + self.convert_place_op_to_mutable(PlaceOp::Index, expr, base_expr); + } + hir::ExprKind::Unary(hir::UnOp::Deref, base_expr) => { + self.convert_place_op_to_mutable(PlaceOp::Deref, expr, base_expr); + } + _ => {} + } + } + } + + fn convert_place_op_to_mutable( + &self, + op: PlaceOp, + expr: &hir::Expr<'_>, + base_expr: &hir::Expr<'_>, + ) { + debug!("convert_place_op_to_mutable({:?}, {:?}, {:?})", op, expr, base_expr); + if !self.typeck_results.borrow().is_method_call(expr) { + debug!("convert_place_op_to_mutable - builtin, nothing to do"); + return; + } + + // Need to deref because overloaded place ops take self by-reference. + let base_ty = self + .typeck_results + .borrow() + .expr_ty_adjusted(base_expr) + .builtin_deref(false) + .expect("place op takes something that is not a ref") + .ty; + + let arg_ty = match op { + PlaceOp::Deref => None, + PlaceOp::Index => { + // We would need to recover the `T` used when we resolve `<_ as Index>::index` + // in try_index_step. This is the subst at index 1. + // + // Note: we should *not* use `expr_ty` of index_expr here because autoderef + // during coercions can cause type of index_expr to differ from `T` (#72002). + // We also could not use `expr_ty_adjusted` of index_expr because reborrowing + // during coercions can also cause type of index_expr to differ from `T`, + // which can potentially cause regionck failure (#74933). + Some(self.typeck_results.borrow().node_substs(expr.hir_id).type_at(1)) + } + }; + let arg_tys = match arg_ty { + None => &[], + Some(ref ty) => slice::from_ref(ty), + }; + + let method = self.try_mutable_overloaded_place_op(expr.span, base_ty, arg_tys, op); + let method = match method { + Some(ok) => self.register_infer_ok_obligations(ok), + // Couldn't find the mutable variant of the place op, keep the + // current, immutable version. + None => return, + }; + debug!("convert_place_op_to_mutable: method={:?}", method); + self.write_method_call(expr.hir_id, method); + + let ty::Ref(region, _, hir::Mutability::Mut) = method.sig.inputs()[0].kind() else { + span_bug!(expr.span, "input to mutable place op is not a mut ref?"); + }; + + // Convert the autoref in the base expr to mutable with the correct + // region and mutability. + let base_expr_ty = self.node_ty(base_expr.hir_id); + if let Some(adjustments) = + self.typeck_results.borrow_mut().adjustments_mut().get_mut(base_expr.hir_id) + { + let mut source = base_expr_ty; + for adjustment in &mut adjustments[..] { + if let Adjust::Borrow(AutoBorrow::Ref(..)) = adjustment.kind { + debug!("convert_place_op_to_mutable: converting autoref {:?}", adjustment); + let mutbl = AutoBorrowMutability::Mut { + // Deref/indexing can be desugared to a method call, + // so maybe we could use two-phase here. + // See the documentation of AllowTwoPhase for why that's + // not the case today. + allow_two_phase_borrow: AllowTwoPhase::No, + }; + adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)); + adjustment.target = self + .tcx + .mk_ref(*region, ty::TypeAndMut { ty: source, mutbl: mutbl.into() }); + } + source = adjustment.target; + } + + // If we have an autoref followed by unsizing at the end, fix the unsize target. + if let [ + .., + Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. }, + Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), ref mut target }, + ] = adjustments[..] + { + *target = method.sig.inputs()[0]; + } + } + } +} diff --git a/compiler/rustc_typeck/src/check/region.rs b/compiler/rustc_typeck/src/check/region.rs new file mode 100644 index 000000000..0081e9049 --- /dev/null +++ b/compiler/rustc_typeck/src/check/region.rs @@ -0,0 +1,837 @@ +//! This file builds up the `ScopeTree`, which describes +//! the parent links in the region hierarchy. +//! +//! For more information about how MIR-based region-checking works, +//! see the [rustc dev guide]. +//! +//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html + +use rustc_ast::walk_list; +use rustc_data_structures::fx::FxHashSet; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_hir::{Arm, Block, Expr, Local, Pat, PatKind, Stmt}; +use rustc_index::vec::Idx; +use rustc_middle::middle::region::*; +use rustc_middle::ty::TyCtxt; +use rustc_span::source_map; +use rustc_span::Span; + +use std::mem; + +#[derive(Debug, Copy, Clone)] +pub struct Context { + /// The scope that contains any new variables declared, plus its depth in + /// the scope tree. + var_parent: Option<(Scope, ScopeDepth)>, + + /// Region parent of expressions, etc., plus its depth in the scope tree. + parent: Option<(Scope, ScopeDepth)>, +} + +struct RegionResolutionVisitor<'tcx> { + tcx: TyCtxt<'tcx>, + + // The number of expressions and patterns visited in the current body. + expr_and_pat_count: usize, + // When this is `true`, we record the `Scopes` we encounter + // when processing a Yield expression. This allows us to fix + // up their indices. + pessimistic_yield: bool, + // Stores scopes when `pessimistic_yield` is `true`. + fixup_scopes: Vec, + // The generated scope tree. + scope_tree: ScopeTree, + + cx: Context, + + /// `terminating_scopes` is a set containing the ids of each + /// statement, or conditional/repeating expression. These scopes + /// are calling "terminating scopes" because, when attempting to + /// find the scope of a temporary, by default we search up the + /// enclosing scopes until we encounter the terminating scope. A + /// conditional/repeating expression is one which is not + /// guaranteed to execute exactly once upon entering the parent + /// scope. This could be because the expression only executes + /// conditionally, such as the expression `b` in `a && b`, or + /// because the expression may execute many times, such as a loop + /// body. The reason that we distinguish such expressions is that, + /// upon exiting the parent scope, we cannot statically know how + /// many times the expression executed, and thus if the expression + /// creates temporaries we cannot know statically how many such + /// temporaries we would have to cleanup. Therefore, we ensure that + /// the temporaries never outlast the conditional/repeating + /// expression, preventing the need for dynamic checks and/or + /// arbitrary amounts of stack space. Terminating scopes end + /// up being contained in a DestructionScope that contains the + /// destructor's execution. + terminating_scopes: FxHashSet, +} + +/// Records the lifetime of a local variable as `cx.var_parent` +fn record_var_lifetime( + visitor: &mut RegionResolutionVisitor<'_>, + var_id: hir::ItemLocalId, + _sp: Span, +) { + match visitor.cx.var_parent { + None => { + // this can happen in extern fn declarations like + // + // extern fn isalnum(c: c_int) -> c_int + } + Some((parent_scope, _)) => visitor.scope_tree.record_var_scope(var_id, parent_scope), + } +} + +fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx hir::Block<'tcx>) { + debug!("resolve_block(blk.hir_id={:?})", blk.hir_id); + + let prev_cx = visitor.cx; + + // We treat the tail expression in the block (if any) somewhat + // differently from the statements. The issue has to do with + // temporary lifetimes. Consider the following: + // + // quux({ + // let inner = ... (&bar()) ...; + // + // (... (&foo()) ...) // (the tail expression) + // }, other_argument()); + // + // Each of the statements within the block is a terminating + // scope, and thus a temporary (e.g., the result of calling + // `bar()` in the initializer expression for `let inner = ...;`) + // will be cleaned up immediately after its corresponding + // statement (i.e., `let inner = ...;`) executes. + // + // On the other hand, temporaries associated with evaluating the + // tail expression for the block are assigned lifetimes so that + // they will be cleaned up as part of the terminating scope + // *surrounding* the block expression. Here, the terminating + // scope for the block expression is the `quux(..)` call; so + // those temporaries will only be cleaned up *after* both + // `other_argument()` has run and also the call to `quux(..)` + // itself has returned. + + visitor.enter_node_scope_with_dtor(blk.hir_id.local_id); + visitor.cx.var_parent = visitor.cx.parent; + + { + // This block should be kept approximately in sync with + // `intravisit::walk_block`. (We manually walk the block, rather + // than call `walk_block`, in order to maintain precise + // index information.) + + for (i, statement) in blk.stmts.iter().enumerate() { + match statement.kind { + hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => { + // Each declaration introduces a subscope for bindings + // introduced by the declaration; this subscope covers a + // suffix of the block. Each subscope in a block has the + // previous subscope in the block as a parent, except for + // the first such subscope, which has the block itself as a + // parent. + visitor.enter_scope(Scope { + id: blk.hir_id.local_id, + data: ScopeData::Remainder(FirstStatementIndex::new(i)), + }); + visitor.cx.var_parent = visitor.cx.parent; + } + hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {} + } + visitor.visit_stmt(statement) + } + walk_list!(visitor, visit_expr, &blk.expr); + } + + visitor.cx = prev_cx; +} + +fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir::Arm<'tcx>) { + let prev_cx = visitor.cx; + + visitor.enter_scope(Scope { id: arm.hir_id.local_id, data: ScopeData::Node }); + visitor.cx.var_parent = visitor.cx.parent; + + visitor.terminating_scopes.insert(arm.body.hir_id.local_id); + + if let Some(hir::Guard::If(ref expr)) = arm.guard { + visitor.terminating_scopes.insert(expr.hir_id.local_id); + } + + intravisit::walk_arm(visitor, arm); + + visitor.cx = prev_cx; +} + +fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) { + visitor.record_child_scope(Scope { id: pat.hir_id.local_id, data: ScopeData::Node }); + + // If this is a binding then record the lifetime of that binding. + if let PatKind::Binding(..) = pat.kind { + record_var_lifetime(visitor, pat.hir_id.local_id, pat.span); + } + + debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); + + intravisit::walk_pat(visitor, pat); + + visitor.expr_and_pat_count += 1; + + debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); +} + +fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) { + let stmt_id = stmt.hir_id.local_id; + debug!("resolve_stmt(stmt.id={:?})", stmt_id); + + // Every statement will clean up the temporaries created during + // execution of that statement. Therefore each statement has an + // associated destruction scope that represents the scope of the + // statement plus its destructors, and thus the scope for which + // regions referenced by the destructors need to survive. + visitor.terminating_scopes.insert(stmt_id); + + let prev_parent = visitor.cx.parent; + visitor.enter_node_scope_with_dtor(stmt_id); + + intravisit::walk_stmt(visitor, stmt); + + visitor.cx.parent = prev_parent; +} + +fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx hir::Expr<'tcx>) { + debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr); + + let prev_cx = visitor.cx; + visitor.enter_node_scope_with_dtor(expr.hir_id.local_id); + + { + let terminating_scopes = &mut visitor.terminating_scopes; + let mut terminating = |id: hir::ItemLocalId| { + terminating_scopes.insert(id); + }; + match expr.kind { + // Conditional or repeating scopes are always terminating + // scopes, meaning that temporaries cannot outlive them. + // This ensures fixed size stacks. + hir::ExprKind::Binary( + source_map::Spanned { node: hir::BinOpKind::And, .. }, + _, + ref r, + ) + | hir::ExprKind::Binary( + source_map::Spanned { node: hir::BinOpKind::Or, .. }, + _, + ref r, + ) => { + // For shortcircuiting operators, mark the RHS as a terminating + // scope since it only executes conditionally. + terminating(r.hir_id.local_id); + } + + hir::ExprKind::If(_, ref then, Some(ref otherwise)) => { + terminating(then.hir_id.local_id); + terminating(otherwise.hir_id.local_id); + } + + hir::ExprKind::If(_, ref then, None) => { + terminating(then.hir_id.local_id); + } + + hir::ExprKind::Loop(ref body, _, _, _) => { + terminating(body.hir_id.local_id); + } + + hir::ExprKind::DropTemps(ref expr) => { + // `DropTemps(expr)` does not denote a conditional scope. + // Rather, we want to achieve the same behavior as `{ let _t = expr; _t }`. + terminating(expr.hir_id.local_id); + } + + hir::ExprKind::AssignOp(..) + | hir::ExprKind::Index(..) + | hir::ExprKind::Unary(..) + | hir::ExprKind::Call(..) + | hir::ExprKind::MethodCall(..) => { + // FIXME(https://github.com/rust-lang/rfcs/issues/811) Nested method calls + // + // The lifetimes for a call or method call look as follows: + // + // call.id + // - arg0.id + // - ... + // - argN.id + // - call.callee_id + // + // The idea is that call.callee_id represents *the time when + // the invoked function is actually running* and call.id + // represents *the time to prepare the arguments and make the + // call*. See the section "Borrows in Calls" borrowck/README.md + // for an extended explanation of why this distinction is + // important. + // + // record_superlifetime(new_cx, expr.callee_id); + } + + _ => {} + } + } + + let prev_pessimistic = visitor.pessimistic_yield; + + // Ordinarily, we can rely on the visit order of HIR intravisit + // to correspond to the actual execution order of statements. + // However, there's a weird corner case with compound assignment + // operators (e.g. `a += b`). The evaluation order depends on whether + // or not the operator is overloaded (e.g. whether or not a trait + // like AddAssign is implemented). + + // For primitive types (which, despite having a trait impl, don't actually + // end up calling it), the evaluation order is right-to-left. For example, + // the following code snippet: + // + // let y = &mut 0; + // *{println!("LHS!"); y} += {println!("RHS!"); 1}; + // + // will print: + // + // RHS! + // LHS! + // + // However, if the operator is used on a non-primitive type, + // the evaluation order will be left-to-right, since the operator + // actually get desugared to a method call. For example, this + // nearly identical code snippet: + // + // let y = &mut String::new(); + // *{println!("LHS String"); y} += {println!("RHS String"); "hi"}; + // + // will print: + // LHS String + // RHS String + // + // To determine the actual execution order, we need to perform + // trait resolution. Unfortunately, we need to be able to compute + // yield_in_scope before type checking is even done, as it gets + // used by AST borrowcheck. + // + // Fortunately, we don't need to know the actual execution order. + // It suffices to know the 'worst case' order with respect to yields. + // Specifically, we need to know the highest 'expr_and_pat_count' + // that we could assign to the yield expression. To do this, + // we pick the greater of the two values from the left-hand + // and right-hand expressions. This makes us overly conservative + // about what types could possibly live across yield points, + // but we will never fail to detect that a type does actually + // live across a yield point. The latter part is critical - + // we're already overly conservative about what types will live + // across yield points, as the generated MIR will determine + // when things are actually live. However, for typecheck to work + // properly, we can't miss any types. + + match expr.kind { + // Manually recurse over closures and inline consts, because they are the only + // case of nested bodies that share the parent environment. + hir::ExprKind::Closure(&hir::Closure { body, .. }) + | hir::ExprKind::ConstBlock(hir::AnonConst { body, .. }) => { + let body = visitor.tcx.hir().body(body); + visitor.visit_body(body); + } + hir::ExprKind::AssignOp(_, ref left_expr, ref right_expr) => { + debug!( + "resolve_expr - enabling pessimistic_yield, was previously {}", + prev_pessimistic + ); + + let start_point = visitor.fixup_scopes.len(); + visitor.pessimistic_yield = true; + + // If the actual execution order turns out to be right-to-left, + // then we're fine. However, if the actual execution order is left-to-right, + // then we'll assign too low a count to any `yield` expressions + // we encounter in 'right_expression' - they should really occur after all of the + // expressions in 'left_expression'. + visitor.visit_expr(&right_expr); + visitor.pessimistic_yield = prev_pessimistic; + + debug!("resolve_expr - restoring pessimistic_yield to {}", prev_pessimistic); + visitor.visit_expr(&left_expr); + debug!("resolve_expr - fixing up counts to {}", visitor.expr_and_pat_count); + + // Remove and process any scopes pushed by the visitor + let target_scopes = visitor.fixup_scopes.drain(start_point..); + + for scope in target_scopes { + let mut yield_data = + visitor.scope_tree.yield_in_scope.get_mut(&scope).unwrap().last_mut().unwrap(); + let count = yield_data.expr_and_pat_count; + let span = yield_data.span; + + // expr_and_pat_count never decreases. Since we recorded counts in yield_in_scope + // before walking the left-hand side, it should be impossible for the recorded + // count to be greater than the left-hand side count. + if count > visitor.expr_and_pat_count { + bug!( + "Encountered greater count {} at span {:?} - expected no greater than {}", + count, + span, + visitor.expr_and_pat_count + ); + } + let new_count = visitor.expr_and_pat_count; + debug!( + "resolve_expr - increasing count for scope {:?} from {} to {} at span {:?}", + scope, count, new_count, span + ); + + yield_data.expr_and_pat_count = new_count; + } + } + + hir::ExprKind::If(ref cond, ref then, Some(ref otherwise)) => { + let expr_cx = visitor.cx; + visitor.enter_scope(Scope { id: then.hir_id.local_id, data: ScopeData::IfThen }); + visitor.cx.var_parent = visitor.cx.parent; + visitor.visit_expr(cond); + visitor.visit_expr(then); + visitor.cx = expr_cx; + visitor.visit_expr(otherwise); + } + + hir::ExprKind::If(ref cond, ref then, None) => { + let expr_cx = visitor.cx; + visitor.enter_scope(Scope { id: then.hir_id.local_id, data: ScopeData::IfThen }); + visitor.cx.var_parent = visitor.cx.parent; + visitor.visit_expr(cond); + visitor.visit_expr(then); + visitor.cx = expr_cx; + } + + _ => intravisit::walk_expr(visitor, expr), + } + + visitor.expr_and_pat_count += 1; + + debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr); + + if let hir::ExprKind::Yield(_, source) = &expr.kind { + // Mark this expr's scope and all parent scopes as containing `yield`. + let mut scope = Scope { id: expr.hir_id.local_id, data: ScopeData::Node }; + loop { + let span = match expr.kind { + hir::ExprKind::Yield(expr, hir::YieldSource::Await { .. }) => { + expr.span.shrink_to_hi().to(expr.span) + } + _ => expr.span, + }; + let data = + YieldData { span, expr_and_pat_count: visitor.expr_and_pat_count, source: *source }; + match visitor.scope_tree.yield_in_scope.get_mut(&scope) { + Some(yields) => yields.push(data), + None => { + visitor.scope_tree.yield_in_scope.insert(scope, vec![data]); + } + } + + if visitor.pessimistic_yield { + debug!("resolve_expr in pessimistic_yield - marking scope {:?} for fixup", scope); + visitor.fixup_scopes.push(scope); + } + + // Keep traversing up while we can. + match visitor.scope_tree.parent_map.get(&scope) { + // Don't cross from closure bodies to their parent. + Some(&(superscope, _)) => match superscope.data { + ScopeData::CallSite => break, + _ => scope = superscope, + }, + None => break, + } + } + } + + visitor.cx = prev_cx; +} + +fn resolve_local<'tcx>( + visitor: &mut RegionResolutionVisitor<'tcx>, + pat: Option<&'tcx hir::Pat<'tcx>>, + init: Option<&'tcx hir::Expr<'tcx>>, + els: Option<&'tcx hir::Block<'tcx>>, +) { + debug!("resolve_local(pat={:?}, init={:?})", pat, init); + + let blk_scope = visitor.cx.var_parent.map(|(p, _)| p); + + // As an exception to the normal rules governing temporary + // lifetimes, initializers in a let have a temporary lifetime + // of the enclosing block. This means that e.g., a program + // like the following is legal: + // + // let ref x = HashMap::new(); + // + // Because the hash map will be freed in the enclosing block. + // + // We express the rules more formally based on 3 grammars (defined + // fully in the helpers below that implement them): + // + // 1. `E&`, which matches expressions like `&` that + // own a pointer into the stack. + // + // 2. `P&`, which matches patterns like `ref x` or `(ref x, ref + // y)` that produce ref bindings into the value they are + // matched against or something (at least partially) owned by + // the value they are matched against. (By partially owned, + // I mean that creating a binding into a ref-counted or managed value + // would still count.) + // + // 3. `ET`, which matches both rvalues like `foo()` as well as places + // based on rvalues like `foo().x[2].y`. + // + // A subexpression `` that appears in a let initializer + // `let pat [: ty] = expr` has an extended temporary lifetime if + // any of the following conditions are met: + // + // A. `pat` matches `P&` and `expr` matches `ET` + // (covers cases where `pat` creates ref bindings into an rvalue + // produced by `expr`) + // B. `ty` is a borrowed pointer and `expr` matches `ET` + // (covers cases where coercion creates a borrow) + // C. `expr` matches `E&` + // (covers cases `expr` borrows an rvalue that is then assigned + // to memory (at least partially) owned by the binding) + // + // Here are some examples hopefully giving an intuition where each + // rule comes into play and why: + // + // Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)` + // would have an extended lifetime, but not `foo()`. + // + // Rule B. `let x = &foo().x`. The rvalue `foo()` would have extended + // lifetime. + // + // In some cases, multiple rules may apply (though not to the same + // rvalue). For example: + // + // let ref x = [&a(), &b()]; + // + // Here, the expression `[...]` has an extended lifetime due to rule + // A, but the inner rvalues `a()` and `b()` have an extended lifetime + // due to rule C. + + if let Some(expr) = init { + record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope); + + if let Some(pat) = pat { + if is_binding_pat(pat) { + visitor.scope_tree.record_rvalue_candidate( + expr.hir_id, + RvalueCandidateType::Pattern { + target: expr.hir_id.local_id, + lifetime: blk_scope, + }, + ); + } + } + } + + // Make sure we visit the initializer first, so expr_and_pat_count remains correct. + // The correct order, as shared between generator_interior, drop_ranges and intravisitor, + // is to walk initializer, followed by pattern bindings, finally followed by the `else` block. + if let Some(expr) = init { + visitor.visit_expr(expr); + } + if let Some(pat) = pat { + visitor.visit_pat(pat); + } + if let Some(els) = els { + visitor.visit_block(els); + } + + /// Returns `true` if `pat` match the `P&` non-terminal. + /// + /// ```text + /// P& = ref X + /// | StructName { ..., P&, ... } + /// | VariantName(..., P&, ...) + /// | [ ..., P&, ... ] + /// | ( ..., P&, ... ) + /// | ... "|" P& "|" ... + /// | box P& + /// ``` + fn is_binding_pat(pat: &hir::Pat<'_>) -> bool { + // Note that the code below looks for *explicit* refs only, that is, it won't + // know about *implicit* refs as introduced in #42640. + // + // This is not a problem. For example, consider + // + // let (ref x, ref y) = (Foo { .. }, Bar { .. }); + // + // Due to the explicit refs on the left hand side, the below code would signal + // that the temporary value on the right hand side should live until the end of + // the enclosing block (as opposed to being dropped after the let is complete). + // + // To create an implicit ref, however, you must have a borrowed value on the RHS + // already, as in this example (which won't compile before #42640): + // + // let Foo { x, .. } = &Foo { x: ..., ... }; + // + // in place of + // + // let Foo { ref x, .. } = Foo { ... }; + // + // In the former case (the implicit ref version), the temporary is created by the + // & expression, and its lifetime would be extended to the end of the block (due + // to a different rule, not the below code). + match pat.kind { + PatKind::Binding(hir::BindingAnnotation::Ref, ..) + | PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true, + + PatKind::Struct(_, ref field_pats, _) => { + field_pats.iter().any(|fp| is_binding_pat(&fp.pat)) + } + + PatKind::Slice(ref pats1, ref pats2, ref pats3) => { + pats1.iter().any(|p| is_binding_pat(&p)) + || pats2.iter().any(|p| is_binding_pat(&p)) + || pats3.iter().any(|p| is_binding_pat(&p)) + } + + PatKind::Or(ref subpats) + | PatKind::TupleStruct(_, ref subpats, _) + | PatKind::Tuple(ref subpats, _) => subpats.iter().any(|p| is_binding_pat(&p)), + + PatKind::Box(ref subpat) => is_binding_pat(&subpat), + + PatKind::Ref(_, _) + | PatKind::Binding( + hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable, + .., + ) + | PatKind::Wild + | PatKind::Path(_) + | PatKind::Lit(_) + | PatKind::Range(_, _, _) => false, + } + } + + /// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate: + /// + /// ```text + /// E& = & ET + /// | StructName { ..., f: E&, ... } + /// | [ ..., E&, ... ] + /// | ( ..., E&, ... ) + /// | {...; E&} + /// | box E& + /// | E& as ... + /// | ( E& ) + /// ``` + fn record_rvalue_scope_if_borrow_expr<'tcx>( + visitor: &mut RegionResolutionVisitor<'tcx>, + expr: &hir::Expr<'_>, + blk_id: Option, + ) { + match expr.kind { + hir::ExprKind::AddrOf(_, _, subexpr) => { + record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id); + visitor.scope_tree.record_rvalue_candidate( + subexpr.hir_id, + RvalueCandidateType::Borrow { + target: subexpr.hir_id.local_id, + lifetime: blk_id, + }, + ); + } + hir::ExprKind::Struct(_, fields, _) => { + for field in fields { + record_rvalue_scope_if_borrow_expr(visitor, &field.expr, blk_id); + } + } + hir::ExprKind::Array(subexprs) | hir::ExprKind::Tup(subexprs) => { + for subexpr in subexprs { + record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id); + } + } + hir::ExprKind::Cast(ref subexpr, _) => { + record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id) + } + hir::ExprKind::Block(ref block, _) => { + if let Some(ref subexpr) = block.expr { + record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id); + } + } + hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) => { + // FIXME(@dingxiangfei2009): choose call arguments here + // for candidacy for extended parameter rule application + } + hir::ExprKind::Index(..) => { + // FIXME(@dingxiangfei2009): select the indices + // as candidate for rvalue scope rules + } + _ => {} + } + } +} + +impl<'tcx> RegionResolutionVisitor<'tcx> { + /// Records the current parent (if any) as the parent of `child_scope`. + /// Returns the depth of `child_scope`. + fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth { + let parent = self.cx.parent; + self.scope_tree.record_scope_parent(child_scope, parent); + // If `child_scope` has no parent, it must be the root node, and so has + // a depth of 1. Otherwise, its depth is one more than its parent's. + parent.map_or(1, |(_p, d)| d + 1) + } + + /// Records the current parent (if any) as the parent of `child_scope`, + /// and sets `child_scope` as the new current parent. + fn enter_scope(&mut self, child_scope: Scope) { + let child_depth = self.record_child_scope(child_scope); + self.cx.parent = Some((child_scope, child_depth)); + } + + fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) { + // If node was previously marked as a terminating scope during the + // recursive visit of its parent node in the AST, then we need to + // account for the destruction scope representing the scope of + // the destructors that run immediately after it completes. + if self.terminating_scopes.contains(&id) { + self.enter_scope(Scope { id, data: ScopeData::Destruction }); + } + self.enter_scope(Scope { id, data: ScopeData::Node }); + } +} + +impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> { + fn visit_block(&mut self, b: &'tcx Block<'tcx>) { + resolve_block(self, b); + } + + fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) { + let body_id = body.id(); + let owner_id = self.tcx.hir().body_owner_def_id(body_id); + + debug!( + "visit_body(id={:?}, span={:?}, body.id={:?}, cx.parent={:?})", + owner_id, + self.tcx.sess.source_map().span_to_diagnostic_string(body.value.span), + body_id, + self.cx.parent + ); + + // Save all state that is specific to the outer function + // body. These will be restored once down below, once we've + // visited the body. + let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0); + let outer_cx = self.cx; + let outer_ts = mem::take(&mut self.terminating_scopes); + // The 'pessimistic yield' flag is set to true when we are + // processing a `+=` statement and have to make pessimistic + // control flow assumptions. This doesn't apply to nested + // bodies within the `+=` statements. See #69307. + let outer_pessimistic_yield = mem::replace(&mut self.pessimistic_yield, false); + self.terminating_scopes.insert(body.value.hir_id.local_id); + + self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::CallSite }); + self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::Arguments }); + + // The arguments and `self` are parented to the fn. + self.cx.var_parent = self.cx.parent.take(); + for param in body.params { + self.visit_pat(¶m.pat); + } + + // The body of the every fn is a root scope. + self.cx.parent = self.cx.var_parent; + if self.tcx.hir().body_owner_kind(owner_id).is_fn_or_closure() { + self.visit_expr(&body.value) + } else { + // Only functions have an outer terminating (drop) scope, while + // temporaries in constant initializers may be 'static, but only + // according to rvalue lifetime semantics, using the same + // syntactical rules used for let initializers. + // + // e.g., in `let x = &f();`, the temporary holding the result from + // the `f()` call lives for the entirety of the surrounding block. + // + // Similarly, `const X: ... = &f();` would have the result of `f()` + // live for `'static`, implying (if Drop restrictions on constants + // ever get lifted) that the value *could* have a destructor, but + // it'd get leaked instead of the destructor running during the + // evaluation of `X` (if at all allowed by CTFE). + // + // However, `const Y: ... = g(&f());`, like `let y = g(&f());`, + // would *not* let the `f()` temporary escape into an outer scope + // (i.e., `'static`), which means that after `g` returns, it drops, + // and all the associated destruction scope rules apply. + self.cx.var_parent = None; + resolve_local(self, None, Some(&body.value), None); + } + + if body.generator_kind.is_some() { + self.scope_tree.body_expr_count.insert(body_id, self.expr_and_pat_count); + } + + // Restore context we had at the start. + self.expr_and_pat_count = outer_ec; + self.cx = outer_cx; + self.terminating_scopes = outer_ts; + self.pessimistic_yield = outer_pessimistic_yield; + } + + fn visit_arm(&mut self, a: &'tcx Arm<'tcx>) { + resolve_arm(self, a); + } + fn visit_pat(&mut self, p: &'tcx Pat<'tcx>) { + resolve_pat(self, p); + } + fn visit_stmt(&mut self, s: &'tcx Stmt<'tcx>) { + resolve_stmt(self, s); + } + fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) { + resolve_expr(self, ex); + } + fn visit_local(&mut self, l: &'tcx Local<'tcx>) { + resolve_local(self, Some(&l.pat), l.init, l.els) + } +} + +/// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body; +/// in the case of closures, this will be redirected to the enclosing function. +/// +/// Performance: This is a query rather than a simple function to enable +/// re-use in incremental scenarios. We may sometimes need to rerun the +/// type checker even when the HIR hasn't changed, and in those cases +/// we can avoid reconstructing the region scope tree. +pub fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree { + let typeck_root_def_id = tcx.typeck_root_def_id(def_id); + if typeck_root_def_id != def_id { + return tcx.region_scope_tree(typeck_root_def_id); + } + + let scope_tree = if let Some(body_id) = tcx.hir().maybe_body_owned_by(def_id.expect_local()) { + let mut visitor = RegionResolutionVisitor { + tcx, + scope_tree: ScopeTree::default(), + expr_and_pat_count: 0, + cx: Context { parent: None, var_parent: None }, + terminating_scopes: Default::default(), + pessimistic_yield: false, + fixup_scopes: vec![], + }; + + let body = tcx.hir().body(body_id); + visitor.scope_tree.root_body = Some(body.value.hir_id); + visitor.visit_body(body); + visitor.scope_tree + } else { + ScopeTree::default() + }; + + tcx.arena.alloc(scope_tree) +} diff --git a/compiler/rustc_typeck/src/check/regionck.rs b/compiler/rustc_typeck/src/check/regionck.rs new file mode 100644 index 000000000..d49a6138f --- /dev/null +++ b/compiler/rustc_typeck/src/check/regionck.rs @@ -0,0 +1,47 @@ +use crate::outlives::outlives_bounds::InferCtxtExt as _; +use rustc_data_structures::fx::FxHashSet; +use rustc_hir as hir; +use rustc_infer::infer::outlives::env::OutlivesEnvironment; +use rustc_infer::infer::InferCtxt; +use rustc_middle::ty::Ty; + +pub(crate) trait OutlivesEnvironmentExt<'tcx> { + fn add_implied_bounds( + &mut self, + infcx: &InferCtxt<'_, 'tcx>, + fn_sig_tys: FxHashSet>, + body_id: hir::HirId, + ); +} + +impl<'tcx> OutlivesEnvironmentExt<'tcx> for OutlivesEnvironment<'tcx> { + /// This method adds "implied bounds" into the outlives environment. + /// Implied bounds are outlives relationships that we can deduce + /// on the basis that certain types must be well-formed -- these are + /// either the types that appear in the function signature or else + /// the input types to an impl. For example, if you have a function + /// like + /// + /// ``` + /// fn foo<'a, 'b, T>(x: &'a &'b [T]) { } + /// ``` + /// + /// we can assume in the caller's body that `'b: 'a` and that `T: + /// 'b` (and hence, transitively, that `T: 'a`). This method would + /// add those assumptions into the outlives-environment. + /// + /// Tests: `src/test/ui/regions/regions-free-region-ordering-*.rs` + #[instrument(level = "debug", skip(self, infcx))] + fn add_implied_bounds<'a>( + &mut self, + infcx: &InferCtxt<'a, 'tcx>, + fn_sig_tys: FxHashSet>, + body_id: hir::HirId, + ) { + for ty in fn_sig_tys { + let ty = infcx.resolve_vars_if_possible(ty); + let implied_bounds = infcx.implied_outlives_bounds(self.param_env, body_id, ty); + self.add_outlives_bounds(Some(infcx), implied_bounds) + } + } +} diff --git a/compiler/rustc_typeck/src/check/rvalue_scopes.rs b/compiler/rustc_typeck/src/check/rvalue_scopes.rs new file mode 100644 index 000000000..22c9e7961 --- /dev/null +++ b/compiler/rustc_typeck/src/check/rvalue_scopes.rs @@ -0,0 +1,83 @@ +use super::FnCtxt; +use hir::def_id::DefId; +use hir::Node; +use rustc_hir as hir; +use rustc_middle::middle::region::{RvalueCandidateType, Scope, ScopeTree}; +use rustc_middle::ty::RvalueScopes; + +/// Applied to an expression `expr` if `expr` -- or something owned or partially owned by +/// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that +/// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let` +/// statement. +/// +/// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching +/// `` as `blk_id`: +/// +/// ```text +/// ET = *ET +/// | ET[...] +/// | ET.f +/// | (ET) +/// | +/// ``` +/// +/// Note: ET is intended to match "rvalues or places based on rvalues". +fn record_rvalue_scope_rec( + rvalue_scopes: &mut RvalueScopes, + mut expr: &hir::Expr<'_>, + lifetime: Option, +) { + loop { + // Note: give all the expressions matching `ET` with the + // extended temporary lifetime, not just the innermost rvalue, + // because in codegen if we must compile e.g., `*rvalue()` + // into a temporary, we request the temporary scope of the + // outer expression. + + rvalue_scopes.record_rvalue_scope(expr.hir_id.local_id, lifetime); + + match expr.kind { + hir::ExprKind::AddrOf(_, _, subexpr) + | hir::ExprKind::Unary(hir::UnOp::Deref, subexpr) + | hir::ExprKind::Field(subexpr, _) + | hir::ExprKind::Index(subexpr, _) => { + expr = subexpr; + } + _ => { + return; + } + } + } +} +fn record_rvalue_scope( + rvalue_scopes: &mut RvalueScopes, + expr: &hir::Expr<'_>, + candidate: &RvalueCandidateType, +) { + debug!("resolve_rvalue_scope(expr={expr:?}, candidate={candidate:?})"); + match candidate { + RvalueCandidateType::Borrow { lifetime, .. } + | RvalueCandidateType::Pattern { lifetime, .. } => { + record_rvalue_scope_rec(rvalue_scopes, expr, *lifetime) + } // FIXME(@dingxiangfei2009): handle the candidates in the function call arguments + } +} + +pub fn resolve_rvalue_scopes<'a, 'tcx>( + fcx: &'a FnCtxt<'a, 'tcx>, + scope_tree: &'a ScopeTree, + def_id: DefId, +) -> RvalueScopes { + let tcx = &fcx.tcx; + let hir_map = tcx.hir(); + let mut rvalue_scopes = RvalueScopes::new(); + debug!("start resolving rvalue scopes, def_id={def_id:?}"); + debug!("rvalue_scope: rvalue_candidates={:?}", scope_tree.rvalue_candidates); + for (&hir_id, candidate) in &scope_tree.rvalue_candidates { + let Some(Node::Expr(expr)) = hir_map.find(hir_id) else { + bug!("hir node does not exist") + }; + record_rvalue_scope(&mut rvalue_scopes, expr, candidate); + } + rvalue_scopes +} diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_typeck/src/check/upvar.rs new file mode 100644 index 000000000..dd8f943b9 --- /dev/null +++ b/compiler/rustc_typeck/src/check/upvar.rs @@ -0,0 +1,2272 @@ +//! ### Inferring borrow kinds for upvars +//! +//! Whenever there is a closure expression, we need to determine how each +//! upvar is used. We do this by initially assigning each upvar an +//! immutable "borrow kind" (see `ty::BorrowKind` for details) and then +//! "escalating" the kind as needed. The borrow kind proceeds according to +//! the following lattice: +//! ```ignore (not-rust) +//! ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow +//! ``` +//! So, for example, if we see an assignment `x = 5` to an upvar `x`, we +//! will promote its borrow kind to mutable borrow. If we see an `&mut x` +//! we'll do the same. Naturally, this applies not just to the upvar, but +//! to everything owned by `x`, so the result is the same for something +//! like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a +//! struct). These adjustments are performed in +//! `adjust_upvar_borrow_kind()` (you can trace backwards through the code +//! from there). +//! +//! The fact that we are inferring borrow kinds as we go results in a +//! semi-hacky interaction with mem-categorization. In particular, +//! mem-categorization will query the current borrow kind as it +//! categorizes, and we'll return the *current* value, but this may get +//! adjusted later. Therefore, in this module, we generally ignore the +//! borrow kind (and derived mutabilities) that are returned from +//! mem-categorization, since they may be inaccurate. (Another option +//! would be to use a unification scheme, where instead of returning a +//! concrete borrow kind like `ty::ImmBorrow`, we return a +//! `ty::InferBorrow(upvar_id)` or something like that, but this would +//! then mean that all later passes would have to check for these figments +//! and report an error, and it just seems like more mess in the end.) + +use super::FnCtxt; + +use crate::expr_use_visitor as euv; +use rustc_errors::{Applicability, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def_id::LocalDefId; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_infer::infer::UpvarRegion; +use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection, ProjectionKind}; +use rustc_middle::mir::FakeReadCause; +use rustc_middle::ty::{ + self, ClosureSizeProfileData, Ty, TyCtxt, TypeckResults, UpvarCapture, UpvarSubsts, +}; +use rustc_session::lint; +use rustc_span::sym; +use rustc_span::{BytePos, Pos, Span, Symbol}; +use rustc_trait_selection::infer::InferCtxtExt; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_index::vec::Idx; +use rustc_target::abi::VariantIdx; + +use std::iter; + +/// Describe the relationship between the paths of two places +/// eg: +/// - `foo` is ancestor of `foo.bar.baz` +/// - `foo.bar.baz` is an descendant of `foo.bar` +/// - `foo.bar` and `foo.baz` are divergent +enum PlaceAncestryRelation { + Ancestor, + Descendant, + SamePlace, + Divergent, +} + +/// Intermediate format to store a captured `Place` and associated `ty::CaptureInfo` +/// during capture analysis. Information in this map feeds into the minimum capture +/// analysis pass. +type InferredCaptureInformation<'tcx> = Vec<(Place<'tcx>, ty::CaptureInfo)>; + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn closure_analyze(&self, body: &'tcx hir::Body<'tcx>) { + InferBorrowKindVisitor { fcx: self }.visit_body(body); + + // it's our job to process these. + assert!(self.deferred_call_resolutions.borrow().is_empty()); + } +} + +/// Intermediate format to store the hir_id pointing to the use that resulted in the +/// corresponding place being captured and a String which contains the captured value's +/// name (i.e: a.b.c) +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +enum UpvarMigrationInfo { + /// We previously captured all of `x`, but now we capture some sub-path. + CapturingPrecise { source_expr: Option, var_name: String }, + CapturingNothing { + // where the variable appears in the closure (but is not captured) + use_span: Span, + }, +} + +/// Reasons that we might issue a migration warning. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct MigrationWarningReason { + /// When we used to capture `x` in its entirety, we implemented the auto-trait(s) + /// in this vec, but now we don't. + auto_traits: Vec<&'static str>, + + /// When we used to capture `x` in its entirety, we would execute some destructors + /// at a different time. + drop_order: bool, +} + +impl MigrationWarningReason { + fn migration_message(&self) -> String { + let base = "changes to closure capture in Rust 2021 will affect"; + if !self.auto_traits.is_empty() && self.drop_order { + format!("{} drop order and which traits the closure implements", base) + } else if self.drop_order { + format!("{} drop order", base) + } else { + format!("{} which traits the closure implements", base) + } + } +} + +/// Intermediate format to store information needed to generate a note in the migration lint. +struct MigrationLintNote { + captures_info: UpvarMigrationInfo, + + /// reasons why migration is needed for this capture + reason: MigrationWarningReason, +} + +/// Intermediate format to store the hir id of the root variable and a HashSet containing +/// information on why the root variable should be fully captured +struct NeededMigration { + var_hir_id: hir::HirId, + diagnostics_info: Vec, +} + +struct InferBorrowKindVisitor<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, +} + +impl<'a, 'tcx> Visitor<'tcx> for InferBorrowKindVisitor<'a, 'tcx> { + fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) { + match expr.kind { + hir::ExprKind::Closure(&hir::Closure { capture_clause, body: body_id, .. }) => { + let body = self.fcx.tcx.hir().body(body_id); + self.visit_body(body); + self.fcx.analyze_closure(expr.hir_id, expr.span, body_id, body, capture_clause); + } + hir::ExprKind::ConstBlock(anon_const) => { + let body = self.fcx.tcx.hir().body(anon_const.body); + self.visit_body(body); + } + _ => {} + } + + intravisit::walk_expr(self, expr); + } +} + +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + /// Analysis starting point. + #[instrument(skip(self, body), level = "debug")] + fn analyze_closure( + &self, + closure_hir_id: hir::HirId, + span: Span, + body_id: hir::BodyId, + body: &'tcx hir::Body<'tcx>, + capture_clause: hir::CaptureBy, + ) { + // Extract the type of the closure. + let ty = self.node_ty(closure_hir_id); + let (closure_def_id, substs) = match *ty.kind() { + ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)), + ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)), + ty::Error(_) => { + // #51714: skip analysis when we have already encountered type errors + return; + } + _ => { + span_bug!( + span, + "type of closure expr {:?} is not a closure {:?}", + closure_hir_id, + ty + ); + } + }; + let closure_def_id = closure_def_id.expect_local(); + + let infer_kind = if let UpvarSubsts::Closure(closure_substs) = substs { + self.closure_kind(closure_substs).is_none().then_some(closure_substs) + } else { + None + }; + + assert_eq!(self.tcx.hir().body_owner_def_id(body.id()), closure_def_id); + let mut delegate = InferBorrowKind { + fcx: self, + closure_def_id, + capture_information: Default::default(), + fake_reads: Default::default(), + }; + euv::ExprUseVisitor::new( + &mut delegate, + &self.infcx, + closure_def_id, + self.param_env, + &self.typeck_results.borrow(), + ) + .consume_body(body); + + debug!( + "For closure={:?}, capture_information={:#?}", + closure_def_id, delegate.capture_information + ); + + self.log_capture_analysis_first_pass(closure_def_id, &delegate.capture_information, span); + + let (capture_information, closure_kind, origin) = self + .process_collected_capture_information(capture_clause, delegate.capture_information); + + self.compute_min_captures(closure_def_id, capture_information, span); + + let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id); + + if should_do_rust_2021_incompatible_closure_captures_analysis(self.tcx, closure_hir_id) { + self.perform_2229_migration_anaysis(closure_def_id, body_id, capture_clause, span); + } + + let after_feature_tys = self.final_upvar_tys(closure_def_id); + + // We now fake capture information for all variables that are mentioned within the closure + // We do this after handling migrations so that min_captures computes before + if !enable_precise_capture(self.tcx, span) { + let mut capture_information: InferredCaptureInformation<'tcx> = Default::default(); + + if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) { + for var_hir_id in upvars.keys() { + let place = self.place_for_root_variable(closure_def_id, *var_hir_id); + + debug!("seed place {:?}", place); + + let capture_kind = self.init_capture_kind_for_place(&place, capture_clause); + let fake_info = ty::CaptureInfo { + capture_kind_expr_id: None, + path_expr_id: None, + capture_kind, + }; + + capture_information.push((place, fake_info)); + } + } + + // This will update the min captures based on this new fake information. + self.compute_min_captures(closure_def_id, capture_information, span); + } + + let before_feature_tys = self.final_upvar_tys(closure_def_id); + + if let Some(closure_substs) = infer_kind { + // Unify the (as yet unbound) type variable in the closure + // substs with the kind we inferred. + let closure_kind_ty = closure_substs.as_closure().kind_ty(); + self.demand_eqtype(span, closure_kind.to_ty(self.tcx), closure_kind_ty); + + // If we have an origin, store it. + if let Some(origin) = origin { + let origin = if enable_precise_capture(self.tcx, span) { + (origin.0, origin.1) + } else { + (origin.0, Place { projections: vec![], ..origin.1 }) + }; + + self.typeck_results + .borrow_mut() + .closure_kind_origins_mut() + .insert(closure_hir_id, origin); + } + } + + self.log_closure_min_capture_info(closure_def_id, span); + + // Now that we've analyzed the closure, we know how each + // variable is borrowed, and we know what traits the closure + // implements (Fn vs FnMut etc). We now have some updates to do + // with that information. + // + // Note that no closure type C may have an upvar of type C + // (though it may reference itself via a trait object). This + // results from the desugaring of closures to a struct like + // `Foo<..., UV0...UVn>`. If one of those upvars referenced + // C, then the type would have infinite size (and the + // inference algorithm will reject it). + + // Equate the type variables for the upvars with the actual types. + let final_upvar_tys = self.final_upvar_tys(closure_def_id); + debug!( + "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}", + closure_hir_id, substs, final_upvar_tys + ); + + // Build a tuple (U0..Un) of the final upvar types U0..Un + // and unify the upvar tuple type in the closure with it: + let final_tupled_upvars_type = self.tcx.mk_tup(final_upvar_tys.iter()); + self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type); + + let fake_reads = delegate + .fake_reads + .into_iter() + .map(|(place, cause, hir_id)| (place, cause, hir_id)) + .collect(); + self.typeck_results.borrow_mut().closure_fake_reads.insert(closure_def_id, fake_reads); + + if self.tcx.sess.opts.unstable_opts.profile_closures { + self.typeck_results.borrow_mut().closure_size_eval.insert( + closure_def_id, + ClosureSizeProfileData { + before_feature_tys: self.tcx.mk_tup(before_feature_tys.into_iter()), + after_feature_tys: self.tcx.mk_tup(after_feature_tys.into_iter()), + }, + ); + } + + // If we are also inferred the closure kind here, + // process any deferred resolutions. + let deferred_call_resolutions = self.remove_deferred_call_resolutions(closure_def_id); + for deferred_call_resolution in deferred_call_resolutions { + deferred_call_resolution.resolve(self); + } + } + + // Returns a list of `Ty`s for each upvar. + fn final_upvar_tys(&self, closure_id: LocalDefId) -> Vec> { + self.typeck_results + .borrow() + .closure_min_captures_flattened(closure_id) + .map(|captured_place| { + let upvar_ty = captured_place.place.ty(); + let capture = captured_place.info.capture_kind; + + debug!( + "final_upvar_tys: place={:?} upvar_ty={:?} capture={:?}, mutability={:?}", + captured_place.place, upvar_ty, capture, captured_place.mutability, + ); + + apply_capture_kind_on_capture_ty(self.tcx, upvar_ty, capture, captured_place.region) + }) + .collect() + } + + /// Adjusts the closure capture information to ensure that the operations aren't unsafe, + /// and that the path can be captured with required capture kind (depending on use in closure, + /// move closure etc.) + /// + /// Returns the set of of adjusted information along with the inferred closure kind and span + /// associated with the closure kind inference. + /// + /// Note that we *always* infer a minimal kind, even if + /// we don't always *use* that in the final result (i.e., sometimes + /// we've taken the closure kind from the expectations instead, and + /// for generators we don't even implement the closure traits + /// really). + /// + /// If we inferred that the closure needs to be FnMut/FnOnce, last element of the returned tuple + /// contains a `Some()` with the `Place` that caused us to do so. + fn process_collected_capture_information( + &self, + capture_clause: hir::CaptureBy, + capture_information: InferredCaptureInformation<'tcx>, + ) -> (InferredCaptureInformation<'tcx>, ty::ClosureKind, Option<(Span, Place<'tcx>)>) { + let mut closure_kind = ty::ClosureKind::LATTICE_BOTTOM; + let mut origin: Option<(Span, Place<'tcx>)> = None; + + let processed = capture_information + .into_iter() + .map(|(place, mut capture_info)| { + // Apply rules for safety before inferring closure kind + let (place, capture_kind) = + restrict_capture_precision(place, capture_info.capture_kind); + + let (place, capture_kind) = truncate_capture_for_optimization(place, capture_kind); + + let usage_span = if let Some(usage_expr) = capture_info.path_expr_id { + self.tcx.hir().span(usage_expr) + } else { + unreachable!() + }; + + let updated = match capture_kind { + ty::UpvarCapture::ByValue => match closure_kind { + ty::ClosureKind::Fn | ty::ClosureKind::FnMut => { + (ty::ClosureKind::FnOnce, Some((usage_span, place.clone()))) + } + // If closure is already FnOnce, don't update + ty::ClosureKind::FnOnce => (closure_kind, origin.take()), + }, + + ty::UpvarCapture::ByRef( + ty::BorrowKind::MutBorrow | ty::BorrowKind::UniqueImmBorrow, + ) => { + match closure_kind { + ty::ClosureKind::Fn => { + (ty::ClosureKind::FnMut, Some((usage_span, place.clone()))) + } + // Don't update the origin + ty::ClosureKind::FnMut | ty::ClosureKind::FnOnce => { + (closure_kind, origin.take()) + } + } + } + + _ => (closure_kind, origin.take()), + }; + + closure_kind = updated.0; + origin = updated.1; + + let (place, capture_kind) = match capture_clause { + hir::CaptureBy::Value => adjust_for_move_closure(place, capture_kind), + hir::CaptureBy::Ref => adjust_for_non_move_closure(place, capture_kind), + }; + + // This restriction needs to be applied after we have handled adjustments for `move` + // closures. We want to make sure any adjustment that might make us move the place into + // the closure gets handled. + let (place, capture_kind) = + restrict_precision_for_drop_types(self, place, capture_kind, usage_span); + + capture_info.capture_kind = capture_kind; + (place, capture_info) + }) + .collect(); + + (processed, closure_kind, origin) + } + + /// Analyzes the information collected by `InferBorrowKind` to compute the min number of + /// Places (and corresponding capture kind) that we need to keep track of to support all + /// the required captured paths. + /// + /// + /// Note: If this function is called multiple times for the same closure, it will update + /// the existing min_capture map that is stored in TypeckResults. + /// + /// Eg: + /// ``` + /// #[derive(Debug)] + /// struct Point { x: i32, y: i32 } + /// + /// let s = String::from("s"); // hir_id_s + /// let mut p = Point { x: 2, y: -2 }; // his_id_p + /// let c = || { + /// println!("{s:?}"); // L1 + /// p.x += 10; // L2 + /// println!("{}" , p.y); // L3 + /// println!("{p:?}"); // L4 + /// drop(s); // L5 + /// }; + /// ``` + /// and let hir_id_L1..5 be the expressions pointing to use of a captured variable on + /// the lines L1..5 respectively. + /// + /// InferBorrowKind results in a structure like this: + /// + /// ```ignore (illustrative) + /// { + /// Place(base: hir_id_s, projections: [], ....) -> { + /// capture_kind_expr: hir_id_L5, + /// path_expr_id: hir_id_L5, + /// capture_kind: ByValue + /// }, + /// Place(base: hir_id_p, projections: [Field(0, 0)], ...) -> { + /// capture_kind_expr: hir_id_L2, + /// path_expr_id: hir_id_L2, + /// capture_kind: ByValue + /// }, + /// Place(base: hir_id_p, projections: [Field(1, 0)], ...) -> { + /// capture_kind_expr: hir_id_L3, + /// path_expr_id: hir_id_L3, + /// capture_kind: ByValue + /// }, + /// Place(base: hir_id_p, projections: [], ...) -> { + /// capture_kind_expr: hir_id_L4, + /// path_expr_id: hir_id_L4, + /// capture_kind: ByValue + /// }, + /// } + /// ``` + /// + /// After the min capture analysis, we get: + /// ```ignore (illustrative) + /// { + /// hir_id_s -> [ + /// Place(base: hir_id_s, projections: [], ....) -> { + /// capture_kind_expr: hir_id_L5, + /// path_expr_id: hir_id_L5, + /// capture_kind: ByValue + /// }, + /// ], + /// hir_id_p -> [ + /// Place(base: hir_id_p, projections: [], ...) -> { + /// capture_kind_expr: hir_id_L2, + /// path_expr_id: hir_id_L4, + /// capture_kind: ByValue + /// }, + /// ], + /// } + /// ``` + fn compute_min_captures( + &self, + closure_def_id: LocalDefId, + capture_information: InferredCaptureInformation<'tcx>, + closure_span: Span, + ) { + if capture_information.is_empty() { + return; + } + + let mut typeck_results = self.typeck_results.borrow_mut(); + + let mut root_var_min_capture_list = + typeck_results.closure_min_captures.remove(&closure_def_id).unwrap_or_default(); + + for (mut place, capture_info) in capture_information.into_iter() { + let var_hir_id = match place.base { + PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id, + base => bug!("Expected upvar, found={:?}", base), + }; + + let Some(min_cap_list) = root_var_min_capture_list.get_mut(&var_hir_id) else { + let mutability = self.determine_capture_mutability(&typeck_results, &place); + let min_cap_list = vec![ty::CapturedPlace { + place, + info: capture_info, + mutability, + region: None, + }]; + root_var_min_capture_list.insert(var_hir_id, min_cap_list); + continue; + }; + + // Go through each entry in the current list of min_captures + // - if ancestor is found, update it's capture kind to account for current place's + // capture information. + // + // - if descendant is found, remove it from the list, and update the current place's + // capture information to account for the descendant's capture kind. + // + // We can never be in a case where the list contains both an ancestor and a descendant + // Also there can only be ancestor but in case of descendants there might be + // multiple. + + let mut descendant_found = false; + let mut updated_capture_info = capture_info; + min_cap_list.retain(|possible_descendant| { + match determine_place_ancestry_relation(&place, &possible_descendant.place) { + // current place is ancestor of possible_descendant + PlaceAncestryRelation::Ancestor => { + descendant_found = true; + + let mut possible_descendant = possible_descendant.clone(); + let backup_path_expr_id = updated_capture_info.path_expr_id; + + // Truncate the descendant (already in min_captures) to be same as the ancestor to handle any + // possible change in capture mode. + truncate_place_to_len_and_update_capture_kind( + &mut possible_descendant.place, + &mut possible_descendant.info.capture_kind, + place.projections.len(), + ); + + updated_capture_info = + determine_capture_info(updated_capture_info, possible_descendant.info); + + // we need to keep the ancestor's `path_expr_id` + updated_capture_info.path_expr_id = backup_path_expr_id; + false + } + + _ => true, + } + }); + + let mut ancestor_found = false; + if !descendant_found { + for possible_ancestor in min_cap_list.iter_mut() { + match determine_place_ancestry_relation(&place, &possible_ancestor.place) { + PlaceAncestryRelation::SamePlace => { + ancestor_found = true; + possible_ancestor.info = determine_capture_info( + possible_ancestor.info, + updated_capture_info, + ); + + // Only one related place will be in the list. + break; + } + // current place is descendant of possible_ancestor + PlaceAncestryRelation::Descendant => { + ancestor_found = true; + let backup_path_expr_id = possible_ancestor.info.path_expr_id; + + // Truncate the descendant (current place) to be same as the ancestor to handle any + // possible change in capture mode. + truncate_place_to_len_and_update_capture_kind( + &mut place, + &mut updated_capture_info.capture_kind, + possible_ancestor.place.projections.len(), + ); + + possible_ancestor.info = determine_capture_info( + possible_ancestor.info, + updated_capture_info, + ); + + // we need to keep the ancestor's `path_expr_id` + possible_ancestor.info.path_expr_id = backup_path_expr_id; + + // Only one related place will be in the list. + break; + } + _ => {} + } + } + } + + // Only need to insert when we don't have an ancestor in the existing min capture list + if !ancestor_found { + let mutability = self.determine_capture_mutability(&typeck_results, &place); + let captured_place = ty::CapturedPlace { + place, + info: updated_capture_info, + mutability, + region: None, + }; + min_cap_list.push(captured_place); + } + } + + // For each capture that is determined to be captured by ref, add region info. + for (_, captures) in &mut root_var_min_capture_list { + for capture in captures { + match capture.info.capture_kind { + ty::UpvarCapture::ByRef(_) => { + let PlaceBase::Upvar(upvar_id) = capture.place.base else { bug!("expected upvar") }; + let origin = UpvarRegion(upvar_id, closure_span); + let upvar_region = self.next_region_var(origin); + capture.region = Some(upvar_region); + } + _ => (), + } + } + } + + debug!( + "For closure={:?}, min_captures before sorting={:?}", + closure_def_id, root_var_min_capture_list + ); + + // Now that we have the minimized list of captures, sort the captures by field id. + // This causes the closure to capture the upvars in the same order as the fields are + // declared which is also the drop order. Thus, in situations where we capture all the + // fields of some type, the observable drop order will remain the same as it previously + // was even though we're dropping each capture individually. + // See https://github.com/rust-lang/project-rfc-2229/issues/42 and + // `src/test/ui/closures/2229_closure_analysis/preserve_field_drop_order.rs`. + for (_, captures) in &mut root_var_min_capture_list { + captures.sort_by(|capture1, capture2| { + for (p1, p2) in capture1.place.projections.iter().zip(&capture2.place.projections) { + // We do not need to look at the `Projection.ty` fields here because at each + // step of the iteration, the projections will either be the same and therefore + // the types must be as well or the current projection will be different and + // we will return the result of comparing the field indexes. + match (p1.kind, p2.kind) { + // Paths are the same, continue to next loop. + (ProjectionKind::Deref, ProjectionKind::Deref) => {} + (ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _)) + if i1 == i2 => {} + + // Fields are different, compare them. + (ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _)) => { + return i1.cmp(&i2); + } + + // We should have either a pair of `Deref`s or a pair of `Field`s. + // Anything else is a bug. + ( + l @ (ProjectionKind::Deref | ProjectionKind::Field(..)), + r @ (ProjectionKind::Deref | ProjectionKind::Field(..)), + ) => bug!( + "ProjectionKinds Deref and Field were mismatched: ({:?}, {:?})", + l, + r + ), + ( + l @ (ProjectionKind::Index + | ProjectionKind::Subslice + | ProjectionKind::Deref + | ProjectionKind::Field(..)), + r @ (ProjectionKind::Index + | ProjectionKind::Subslice + | ProjectionKind::Deref + | ProjectionKind::Field(..)), + ) => bug!( + "ProjectionKinds Index or Subslice were unexpected: ({:?}, {:?})", + l, + r + ), + } + } + + unreachable!( + "we captured two identical projections: capture1 = {:?}, capture2 = {:?}", + capture1, capture2 + ); + }); + } + + debug!( + "For closure={:?}, min_captures after sorting={:#?}", + closure_def_id, root_var_min_capture_list + ); + typeck_results.closure_min_captures.insert(closure_def_id, root_var_min_capture_list); + } + + /// Perform the migration analysis for RFC 2229, and emit lint + /// `disjoint_capture_drop_reorder` if needed. + fn perform_2229_migration_anaysis( + &self, + closure_def_id: LocalDefId, + body_id: hir::BodyId, + capture_clause: hir::CaptureBy, + span: Span, + ) { + let (need_migrations, reasons) = self.compute_2229_migrations( + closure_def_id, + span, + capture_clause, + self.typeck_results.borrow().closure_min_captures.get(&closure_def_id), + ); + + if !need_migrations.is_empty() { + let (migration_string, migrated_variables_concat) = + migration_suggestion_for_2229(self.tcx, &need_migrations); + + let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id); + let closure_head_span = self.tcx.def_span(closure_def_id); + self.tcx.struct_span_lint_hir( + lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES, + closure_hir_id, + closure_head_span, + |lint| { + let mut diagnostics_builder = lint.build( + &reasons.migration_message(), + ); + for NeededMigration { var_hir_id, diagnostics_info } in &need_migrations { + // Labels all the usage of the captured variable and why they are responsible + // for migration being needed + for lint_note in diagnostics_info.iter() { + match &lint_note.captures_info { + UpvarMigrationInfo::CapturingPrecise { source_expr: Some(capture_expr_id), var_name: captured_name } => { + let cause_span = self.tcx.hir().span(*capture_expr_id); + diagnostics_builder.span_label(cause_span, format!("in Rust 2018, this closure captures all of `{}`, but in Rust 2021, it will only capture `{}`", + self.tcx.hir().name(*var_hir_id), + captured_name, + )); + } + UpvarMigrationInfo::CapturingNothing { use_span } => { + diagnostics_builder.span_label(*use_span, format!("in Rust 2018, this causes the closure to capture `{}`, but in Rust 2021, it has no effect", + self.tcx.hir().name(*var_hir_id), + )); + } + + _ => { } + } + + // Add a label pointing to where a captured variable affected by drop order + // is dropped + if lint_note.reason.drop_order { + let drop_location_span = drop_location_span(self.tcx, closure_hir_id); + + match &lint_note.captures_info { + UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => { + diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{}` is dropped here, but in Rust 2021, only `{}` will be dropped here as part of the closure", + self.tcx.hir().name(*var_hir_id), + captured_name, + )); + } + UpvarMigrationInfo::CapturingNothing { use_span: _ } => { + diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{v}` is dropped here along with the closure, but in Rust 2021 `{v}` is not part of the closure", + v = self.tcx.hir().name(*var_hir_id), + )); + } + } + } + + // Add a label explaining why a closure no longer implements a trait + for &missing_trait in &lint_note.reason.auto_traits { + // not capturing something anymore cannot cause a trait to fail to be implemented: + match &lint_note.captures_info { + UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => { + let var_name = self.tcx.hir().name(*var_hir_id); + diagnostics_builder.span_label(closure_head_span, format!("\ + in Rust 2018, this closure implements {missing_trait} \ + as `{var_name}` implements {missing_trait}, but in Rust 2021, \ + this closure will no longer implement {missing_trait} \ + because `{var_name}` is not fully captured \ + and `{captured_name}` does not implement {missing_trait}")); + } + + // Cannot happen: if we don't capture a variable, we impl strictly more traits + UpvarMigrationInfo::CapturingNothing { use_span } => span_bug!(*use_span, "missing trait from not capturing something"), + } + } + } + } + diagnostics_builder.note("for more information, see "); + + let diagnostic_msg = format!( + "add a dummy let to cause {} to be fully captured", + migrated_variables_concat + ); + + let closure_span = self.tcx.hir().span_with_body(closure_hir_id); + let mut closure_body_span = { + // If the body was entirely expanded from a macro + // invocation, i.e. the body is not contained inside the + // closure span, then we walk up the expansion until we + // find the span before the expansion. + let s = self.tcx.hir().span_with_body(body_id.hir_id); + s.find_ancestor_inside(closure_span).unwrap_or(s) + }; + + if let Ok(mut s) = self.tcx.sess.source_map().span_to_snippet(closure_body_span) { + if s.starts_with('$') { + // Looks like a macro fragment. Try to find the real block. + if let Some(hir::Node::Expr(&hir::Expr { + kind: hir::ExprKind::Block(block, ..), .. + })) = self.tcx.hir().find(body_id.hir_id) { + // If the body is a block (with `{..}`), we use the span of that block. + // E.g. with a `|| $body` expanded from a `m!({ .. })`, we use `{ .. }`, and not `$body`. + // Since we know it's a block, we know we can insert the `let _ = ..` without + // breaking the macro syntax. + if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(block.span) { + closure_body_span = block.span; + s = snippet; + } + } + } + + let mut lines = s.lines(); + let line1 = lines.next().unwrap_or_default(); + + if line1.trim_end() == "{" { + // This is a multi-line closure with just a `{` on the first line, + // so we put the `let` on its own line. + // We take the indentation from the next non-empty line. + let line2 = lines.find(|line| !line.is_empty()).unwrap_or_default(); + let indent = line2.split_once(|c: char| !c.is_whitespace()).unwrap_or_default().0; + diagnostics_builder.span_suggestion( + closure_body_span.with_lo(closure_body_span.lo() + BytePos::from_usize(line1.len())).shrink_to_lo(), + &diagnostic_msg, + format!("\n{indent}{migration_string};"), + Applicability::MachineApplicable, + ); + } else if line1.starts_with('{') { + // This is a closure with its body wrapped in + // braces, but with more than just the opening + // brace on the first line. We put the `let` + // directly after the `{`. + diagnostics_builder.span_suggestion( + closure_body_span.with_lo(closure_body_span.lo() + BytePos(1)).shrink_to_lo(), + &diagnostic_msg, + format!(" {migration_string};"), + Applicability::MachineApplicable, + ); + } else { + // This is a closure without braces around the body. + // We add braces to add the `let` before the body. + diagnostics_builder.multipart_suggestion( + &diagnostic_msg, + vec![ + (closure_body_span.shrink_to_lo(), format!("{{ {migration_string}; ")), + (closure_body_span.shrink_to_hi(), " }".to_string()), + ], + Applicability::MachineApplicable + ); + } + } else { + diagnostics_builder.span_suggestion( + closure_span, + &diagnostic_msg, + migration_string, + Applicability::HasPlaceholders + ); + } + + diagnostics_builder.emit(); + }, + ); + } + } + + /// Combines all the reasons for 2229 migrations + fn compute_2229_migrations_reasons( + &self, + auto_trait_reasons: FxHashSet<&'static str>, + drop_order: bool, + ) -> MigrationWarningReason { + let mut reasons = MigrationWarningReason::default(); + + reasons.auto_traits.extend(auto_trait_reasons); + reasons.drop_order = drop_order; + + // `auto_trait_reasons` are in hashset order, so sort them to put the + // diagnostics we emit later in a cross-platform-consistent order. + reasons.auto_traits.sort_unstable(); + + reasons + } + + /// Figures out the list of root variables (and their types) that aren't completely + /// captured by the closure when `capture_disjoint_fields` is enabled and auto-traits + /// differ between the root variable and the captured paths. + /// + /// Returns a tuple containing a HashMap of CapturesInfo that maps to a HashSet of trait names + /// if migration is needed for traits for the provided var_hir_id, otherwise returns None + fn compute_2229_migrations_for_trait( + &self, + min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>, + var_hir_id: hir::HirId, + closure_clause: hir::CaptureBy, + ) -> Option>> { + let auto_traits_def_id = vec![ + self.tcx.lang_items().clone_trait(), + self.tcx.lang_items().sync_trait(), + self.tcx.get_diagnostic_item(sym::Send), + self.tcx.lang_items().unpin_trait(), + self.tcx.get_diagnostic_item(sym::unwind_safe_trait), + self.tcx.get_diagnostic_item(sym::ref_unwind_safe_trait), + ]; + const AUTO_TRAITS: [&str; 6] = + ["`Clone`", "`Sync`", "`Send`", "`Unpin`", "`UnwindSafe`", "`RefUnwindSafe`"]; + + let root_var_min_capture_list = min_captures.and_then(|m| m.get(&var_hir_id))?; + + let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id)); + + let ty = match closure_clause { + hir::CaptureBy::Value => ty, // For move closure the capture kind should be by value + hir::CaptureBy::Ref => { + // For non move closure the capture kind is the max capture kind of all captures + // according to the ordering ImmBorrow < UniqueImmBorrow < MutBorrow < ByValue + let mut max_capture_info = root_var_min_capture_list.first().unwrap().info; + for capture in root_var_min_capture_list.iter() { + max_capture_info = determine_capture_info(max_capture_info, capture.info); + } + + apply_capture_kind_on_capture_ty( + self.tcx, + ty, + max_capture_info.capture_kind, + Some(self.tcx.lifetimes.re_erased), + ) + } + }; + + let mut obligations_should_hold = Vec::new(); + // Checks if a root variable implements any of the auto traits + for check_trait in auto_traits_def_id.iter() { + obligations_should_hold.push( + check_trait + .map(|check_trait| { + self.infcx + .type_implements_trait( + check_trait, + ty, + self.tcx.mk_substs_trait(ty, &[]), + self.param_env, + ) + .must_apply_modulo_regions() + }) + .unwrap_or(false), + ); + } + + let mut problematic_captures = FxHashMap::default(); + // Check whether captured fields also implement the trait + for capture in root_var_min_capture_list.iter() { + let ty = apply_capture_kind_on_capture_ty( + self.tcx, + capture.place.ty(), + capture.info.capture_kind, + Some(self.tcx.lifetimes.re_erased), + ); + + // Checks if a capture implements any of the auto traits + let mut obligations_holds_for_capture = Vec::new(); + for check_trait in auto_traits_def_id.iter() { + obligations_holds_for_capture.push( + check_trait + .map(|check_trait| { + self.infcx + .type_implements_trait( + check_trait, + ty, + self.tcx.mk_substs_trait(ty, &[]), + self.param_env, + ) + .must_apply_modulo_regions() + }) + .unwrap_or(false), + ); + } + + let mut capture_problems = FxHashSet::default(); + + // Checks if for any of the auto traits, one or more trait is implemented + // by the root variable but not by the capture + for (idx, _) in obligations_should_hold.iter().enumerate() { + if !obligations_holds_for_capture[idx] && obligations_should_hold[idx] { + capture_problems.insert(AUTO_TRAITS[idx]); + } + } + + if !capture_problems.is_empty() { + problematic_captures.insert( + UpvarMigrationInfo::CapturingPrecise { + source_expr: capture.info.path_expr_id, + var_name: capture.to_string(self.tcx), + }, + capture_problems, + ); + } + } + if !problematic_captures.is_empty() { + return Some(problematic_captures); + } + None + } + + /// Figures out the list of root variables (and their types) that aren't completely + /// captured by the closure when `capture_disjoint_fields` is enabled and drop order of + /// some path starting at that root variable **might** be affected. + /// + /// The output list would include a root variable if: + /// - It would have been moved into the closure when `capture_disjoint_fields` wasn't + /// enabled, **and** + /// - It wasn't completely captured by the closure, **and** + /// - One of the paths starting at this root variable, that is not captured needs Drop. + /// + /// This function only returns a HashSet of CapturesInfo for significant drops. If there + /// are no significant drops than None is returned + #[instrument(level = "debug", skip(self))] + fn compute_2229_migrations_for_drop( + &self, + closure_def_id: LocalDefId, + closure_span: Span, + min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>, + closure_clause: hir::CaptureBy, + var_hir_id: hir::HirId, + ) -> Option> { + let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id)); + + if !ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id)) { + debug!("does not have significant drop"); + return None; + } + + let Some(root_var_min_capture_list) = min_captures.and_then(|m| m.get(&var_hir_id)) else { + // The upvar is mentioned within the closure but no path starting from it is + // used. This occurs when you have (e.g.) + // + // ``` + // let x = move || { + // let _ = y; + // }); + // ``` + debug!("no path starting from it is used"); + + + match closure_clause { + // Only migrate if closure is a move closure + hir::CaptureBy::Value => { + let mut diagnostics_info = FxHashSet::default(); + let upvars = self.tcx.upvars_mentioned(closure_def_id).expect("must be an upvar"); + let upvar = upvars[&var_hir_id]; + diagnostics_info.insert(UpvarMigrationInfo::CapturingNothing { use_span: upvar.span }); + return Some(diagnostics_info); + } + hir::CaptureBy::Ref => {} + } + + return None; + }; + debug!(?root_var_min_capture_list); + + let mut projections_list = Vec::new(); + let mut diagnostics_info = FxHashSet::default(); + + for captured_place in root_var_min_capture_list.iter() { + match captured_place.info.capture_kind { + // Only care about captures that are moved into the closure + ty::UpvarCapture::ByValue => { + projections_list.push(captured_place.place.projections.as_slice()); + diagnostics_info.insert(UpvarMigrationInfo::CapturingPrecise { + source_expr: captured_place.info.path_expr_id, + var_name: captured_place.to_string(self.tcx), + }); + } + ty::UpvarCapture::ByRef(..) => {} + } + } + + debug!(?projections_list); + debug!(?diagnostics_info); + + let is_moved = !projections_list.is_empty(); + debug!(?is_moved); + + let is_not_completely_captured = + root_var_min_capture_list.iter().any(|capture| !capture.place.projections.is_empty()); + debug!(?is_not_completely_captured); + + if is_moved + && is_not_completely_captured + && self.has_significant_drop_outside_of_captures( + closure_def_id, + closure_span, + ty, + projections_list, + ) + { + return Some(diagnostics_info); + } + + None + } + + /// Figures out the list of root variables (and their types) that aren't completely + /// captured by the closure when `capture_disjoint_fields` is enabled and either drop + /// order of some path starting at that root variable **might** be affected or auto-traits + /// differ between the root variable and the captured paths. + /// + /// The output list would include a root variable if: + /// - It would have been moved into the closure when `capture_disjoint_fields` wasn't + /// enabled, **and** + /// - It wasn't completely captured by the closure, **and** + /// - One of the paths starting at this root variable, that is not captured needs Drop **or** + /// - One of the paths captured does not implement all the auto-traits its root variable + /// implements. + /// + /// Returns a tuple containing a vector of MigrationDiagnosticInfo, as well as a String + /// containing the reason why root variables whose HirId is contained in the vector should + /// be captured + #[instrument(level = "debug", skip(self))] + fn compute_2229_migrations( + &self, + closure_def_id: LocalDefId, + closure_span: Span, + closure_clause: hir::CaptureBy, + min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>, + ) -> (Vec, MigrationWarningReason) { + let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) else { + return (Vec::new(), MigrationWarningReason::default()); + }; + + let mut need_migrations = Vec::new(); + let mut auto_trait_migration_reasons = FxHashSet::default(); + let mut drop_migration_needed = false; + + // Perform auto-trait analysis + for (&var_hir_id, _) in upvars.iter() { + let mut diagnostics_info = Vec::new(); + + let auto_trait_diagnostic = if let Some(diagnostics_info) = + self.compute_2229_migrations_for_trait(min_captures, var_hir_id, closure_clause) + { + diagnostics_info + } else { + FxHashMap::default() + }; + + let drop_reorder_diagnostic = if let Some(diagnostics_info) = self + .compute_2229_migrations_for_drop( + closure_def_id, + closure_span, + min_captures, + closure_clause, + var_hir_id, + ) { + drop_migration_needed = true; + diagnostics_info + } else { + FxHashSet::default() + }; + + // Combine all the captures responsible for needing migrations into one HashSet + let mut capture_diagnostic = drop_reorder_diagnostic.clone(); + for key in auto_trait_diagnostic.keys() { + capture_diagnostic.insert(key.clone()); + } + + let mut capture_diagnostic = capture_diagnostic.into_iter().collect::>(); + capture_diagnostic.sort(); + for captures_info in capture_diagnostic { + // Get the auto trait reasons of why migration is needed because of that capture, if there are any + let capture_trait_reasons = + if let Some(reasons) = auto_trait_diagnostic.get(&captures_info) { + reasons.clone() + } else { + FxHashSet::default() + }; + + // Check if migration is needed because of drop reorder as a result of that capture + let capture_drop_reorder_reason = drop_reorder_diagnostic.contains(&captures_info); + + // Combine all the reasons of why the root variable should be captured as a result of + // auto trait implementation issues + auto_trait_migration_reasons.extend(capture_trait_reasons.clone()); + + diagnostics_info.push(MigrationLintNote { + captures_info, + reason: self.compute_2229_migrations_reasons( + capture_trait_reasons, + capture_drop_reorder_reason, + ), + }); + } + + if !diagnostics_info.is_empty() { + need_migrations.push(NeededMigration { var_hir_id, diagnostics_info }); + } + } + ( + need_migrations, + self.compute_2229_migrations_reasons( + auto_trait_migration_reasons, + drop_migration_needed, + ), + ) + } + + /// This is a helper function to `compute_2229_migrations_precise_pass`. Provided the type + /// of a root variable and a list of captured paths starting at this root variable (expressed + /// using list of `Projection` slices), it returns true if there is a path that is not + /// captured starting at this root variable that implements Drop. + /// + /// The way this function works is at a given call it looks at type `base_path_ty` of some base + /// path say P and then list of projection slices which represent the different captures moved + /// into the closure starting off of P. + /// + /// This will make more sense with an example: + /// + /// ```rust + /// #![feature(capture_disjoint_fields)] + /// + /// struct FancyInteger(i32); // This implements Drop + /// + /// struct Point { x: FancyInteger, y: FancyInteger } + /// struct Color; + /// + /// struct Wrapper { p: Point, c: Color } + /// + /// fn f(w: Wrapper) { + /// let c = || { + /// // Closure captures w.p.x and w.c by move. + /// }; + /// + /// c(); + /// } + /// ``` + /// + /// If `capture_disjoint_fields` wasn't enabled the closure would've moved `w` instead of the + /// precise paths. If we look closely `w.p.y` isn't captured which implements Drop and + /// therefore Drop ordering would change and we want this function to return true. + /// + /// Call stack to figure out if we need to migrate for `w` would look as follows: + /// + /// Our initial base path is just `w`, and the paths captured from it are `w[p, x]` and + /// `w[c]`. + /// Notation: + /// - Ty(place): Type of place + /// - `(a, b)`: Represents the function parameters `base_path_ty` and `captured_by_move_projs` + /// respectively. + /// ```ignore (illustrative) + /// (Ty(w), [ &[p, x], &[c] ]) + /// // | + /// // ---------------------------- + /// // | | + /// // v v + /// (Ty(w.p), [ &[x] ]) (Ty(w.c), [ &[] ]) // I(1) + /// // | | + /// // v v + /// (Ty(w.p), [ &[x] ]) false + /// // | + /// // | + /// // ------------------------------- + /// // | | + /// // v v + /// (Ty((w.p).x), [ &[] ]) (Ty((w.p).y), []) // IMP 2 + /// // | | + /// // v v + /// false NeedsSignificantDrop(Ty(w.p.y)) + /// // | + /// // v + /// true + /// ``` + /// + /// IMP 1 `(Ty(w.c), [ &[] ])`: Notice the single empty slice inside `captured_projs`. + /// This implies that the `w.c` is completely captured by the closure. + /// Since drop for this path will be called when the closure is + /// dropped we don't need to migrate for it. + /// + /// IMP 2 `(Ty((w.p).y), [])`: Notice that `captured_projs` is empty. This implies that this + /// path wasn't captured by the closure. Also note that even + /// though we didn't capture this path, the function visits it, + /// which is kind of the point of this function. We then return + /// if the type of `w.p.y` implements Drop, which in this case is + /// true. + /// + /// Consider another example: + /// + /// ```ignore (pseudo-rust) + /// struct X; + /// impl Drop for X {} + /// + /// struct Y(X); + /// impl Drop for Y {} + /// + /// fn foo() { + /// let y = Y(X); + /// let c = || move(y.0); + /// } + /// ``` + /// + /// Note that `y.0` is captured by the closure. When this function is called for `y`, it will + /// return true, because even though all paths starting at `y` are captured, `y` itself + /// implements Drop which will be affected since `y` isn't completely captured. + fn has_significant_drop_outside_of_captures( + &self, + closure_def_id: LocalDefId, + closure_span: Span, + base_path_ty: Ty<'tcx>, + captured_by_move_projs: Vec<&[Projection<'tcx>]>, + ) -> bool { + let needs_drop = + |ty: Ty<'tcx>| ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id)); + + let is_drop_defined_for_ty = |ty: Ty<'tcx>| { + let drop_trait = self.tcx.require_lang_item(hir::LangItem::Drop, Some(closure_span)); + let ty_params = self.tcx.mk_substs_trait(base_path_ty, &[]); + self.infcx + .type_implements_trait( + drop_trait, + ty, + ty_params, + self.tcx.param_env(closure_def_id), + ) + .must_apply_modulo_regions() + }; + + let is_drop_defined_for_ty = is_drop_defined_for_ty(base_path_ty); + + // If there is a case where no projection is applied on top of current place + // then there must be exactly one capture corresponding to such a case. Note that this + // represents the case of the path being completely captured by the variable. + // + // eg. If `a.b` is captured and we are processing `a.b`, then we can't have the closure also + // capture `a.b.c`, because that violates min capture. + let is_completely_captured = captured_by_move_projs.iter().any(|projs| projs.is_empty()); + + assert!(!is_completely_captured || (captured_by_move_projs.len() == 1)); + + if is_completely_captured { + // The place is captured entirely, so doesn't matter if needs dtor, it will be drop + // when the closure is dropped. + return false; + } + + if captured_by_move_projs.is_empty() { + return needs_drop(base_path_ty); + } + + if is_drop_defined_for_ty { + // If drop is implemented for this type then we need it to be fully captured, + // and we know it is not completely captured because of the previous checks. + + // Note that this is a bug in the user code that will be reported by the + // borrow checker, since we can't move out of drop types. + + // The bug exists in the user's code pre-migration, and we don't migrate here. + return false; + } + + match base_path_ty.kind() { + // Observations: + // - `captured_by_move_projs` is not empty. Therefore we can call + // `captured_by_move_projs.first().unwrap()` safely. + // - All entries in `captured_by_move_projs` have at least one projection. + // Therefore we can call `captured_by_move_projs.first().unwrap().first().unwrap()` safely. + + // We don't capture derefs in case of move captures, which would have be applied to + // access any further paths. + ty::Adt(def, _) if def.is_box() => unreachable!(), + ty::Ref(..) => unreachable!(), + ty::RawPtr(..) => unreachable!(), + + ty::Adt(def, substs) => { + // Multi-variant enums are captured in entirety, + // which would've been handled in the case of single empty slice in `captured_by_move_projs`. + assert_eq!(def.variants().len(), 1); + + // Only Field projections can be applied to a non-box Adt. + assert!( + captured_by_move_projs.iter().all(|projs| matches!( + projs.first().unwrap().kind, + ProjectionKind::Field(..) + )) + ); + def.variants().get(VariantIdx::new(0)).unwrap().fields.iter().enumerate().any( + |(i, field)| { + let paths_using_field = captured_by_move_projs + .iter() + .filter_map(|projs| { + if let ProjectionKind::Field(field_idx, _) = + projs.first().unwrap().kind + { + if (field_idx as usize) == i { Some(&projs[1..]) } else { None } + } else { + unreachable!(); + } + }) + .collect(); + + let after_field_ty = field.ty(self.tcx, substs); + self.has_significant_drop_outside_of_captures( + closure_def_id, + closure_span, + after_field_ty, + paths_using_field, + ) + }, + ) + } + + ty::Tuple(fields) => { + // Only Field projections can be applied to a tuple. + assert!( + captured_by_move_projs.iter().all(|projs| matches!( + projs.first().unwrap().kind, + ProjectionKind::Field(..) + )) + ); + + fields.iter().enumerate().any(|(i, element_ty)| { + let paths_using_field = captured_by_move_projs + .iter() + .filter_map(|projs| { + if let ProjectionKind::Field(field_idx, _) = projs.first().unwrap().kind + { + if (field_idx as usize) == i { Some(&projs[1..]) } else { None } + } else { + unreachable!(); + } + }) + .collect(); + + self.has_significant_drop_outside_of_captures( + closure_def_id, + closure_span, + element_ty, + paths_using_field, + ) + }) + } + + // Anything else would be completely captured and therefore handled already. + _ => unreachable!(), + } + } + + fn init_capture_kind_for_place( + &self, + place: &Place<'tcx>, + capture_clause: hir::CaptureBy, + ) -> ty::UpvarCapture { + match capture_clause { + // In case of a move closure if the data is accessed through a reference we + // want to capture by ref to allow precise capture using reborrows. + // + // If the data will be moved out of this place, then the place will be truncated + // at the first Deref in `adjust_upvar_borrow_kind_for_consume` and then moved into + // the closure. + hir::CaptureBy::Value if !place.deref_tys().any(Ty::is_ref) => { + ty::UpvarCapture::ByValue + } + hir::CaptureBy::Value | hir::CaptureBy::Ref => ty::UpvarCapture::ByRef(ty::ImmBorrow), + } + } + + fn place_for_root_variable( + &self, + closure_def_id: LocalDefId, + var_hir_id: hir::HirId, + ) -> Place<'tcx> { + let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id); + + Place { + base_ty: self.node_ty(var_hir_id), + base: PlaceBase::Upvar(upvar_id), + projections: Default::default(), + } + } + + fn should_log_capture_analysis(&self, closure_def_id: LocalDefId) -> bool { + self.tcx.has_attr(closure_def_id.to_def_id(), sym::rustc_capture_analysis) + } + + fn log_capture_analysis_first_pass( + &self, + closure_def_id: LocalDefId, + capture_information: &InferredCaptureInformation<'tcx>, + closure_span: Span, + ) { + if self.should_log_capture_analysis(closure_def_id) { + let mut diag = + self.tcx.sess.struct_span_err(closure_span, "First Pass analysis includes:"); + for (place, capture_info) in capture_information { + let capture_str = construct_capture_info_string(self.tcx, place, capture_info); + let output_str = format!("Capturing {capture_str}"); + + let span = + capture_info.path_expr_id.map_or(closure_span, |e| self.tcx.hir().span(e)); + diag.span_note(span, &output_str); + } + diag.emit(); + } + } + + fn log_closure_min_capture_info(&self, closure_def_id: LocalDefId, closure_span: Span) { + if self.should_log_capture_analysis(closure_def_id) { + if let Some(min_captures) = + self.typeck_results.borrow().closure_min_captures.get(&closure_def_id) + { + let mut diag = + self.tcx.sess.struct_span_err(closure_span, "Min Capture analysis includes:"); + + for (_, min_captures_for_var) in min_captures { + for capture in min_captures_for_var { + let place = &capture.place; + let capture_info = &capture.info; + + let capture_str = + construct_capture_info_string(self.tcx, place, capture_info); + let output_str = format!("Min Capture {capture_str}"); + + if capture.info.path_expr_id != capture.info.capture_kind_expr_id { + let path_span = capture_info + .path_expr_id + .map_or(closure_span, |e| self.tcx.hir().span(e)); + let capture_kind_span = capture_info + .capture_kind_expr_id + .map_or(closure_span, |e| self.tcx.hir().span(e)); + + let mut multi_span: MultiSpan = + MultiSpan::from_spans(vec![path_span, capture_kind_span]); + + let capture_kind_label = + construct_capture_kind_reason_string(self.tcx, place, capture_info); + let path_label = construct_path_string(self.tcx, place); + + multi_span.push_span_label(path_span, path_label); + multi_span.push_span_label(capture_kind_span, capture_kind_label); + + diag.span_note(multi_span, &output_str); + } else { + let span = capture_info + .path_expr_id + .map_or(closure_span, |e| self.tcx.hir().span(e)); + + diag.span_note(span, &output_str); + }; + } + } + diag.emit(); + } + } + } + + /// A captured place is mutable if + /// 1. Projections don't include a Deref of an immut-borrow, **and** + /// 2. PlaceBase is mut or projections include a Deref of a mut-borrow. + fn determine_capture_mutability( + &self, + typeck_results: &'a TypeckResults<'tcx>, + place: &Place<'tcx>, + ) -> hir::Mutability { + let var_hir_id = match place.base { + PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id, + _ => unreachable!(), + }; + + let bm = *typeck_results.pat_binding_modes().get(var_hir_id).expect("missing binding mode"); + + let mut is_mutbl = match bm { + ty::BindByValue(mutability) => mutability, + ty::BindByReference(_) => hir::Mutability::Not, + }; + + for pointer_ty in place.deref_tys() { + match pointer_ty.kind() { + // We don't capture derefs of raw ptrs + ty::RawPtr(_) => unreachable!(), + + // Dereferencing a mut-ref allows us to mut the Place if we don't deref + // an immut-ref after on top of this. + ty::Ref(.., hir::Mutability::Mut) => is_mutbl = hir::Mutability::Mut, + + // The place isn't mutable once we dereference an immutable reference. + ty::Ref(.., hir::Mutability::Not) => return hir::Mutability::Not, + + // Dereferencing a box doesn't change mutability + ty::Adt(def, ..) if def.is_box() => {} + + unexpected_ty => bug!("deref of unexpected pointer type {:?}", unexpected_ty), + } + } + + is_mutbl + } +} + +/// Truncate the capture so that the place being borrowed is in accordance with RFC 1240, +/// which states that it's unsafe to take a reference into a struct marked `repr(packed)`. +fn restrict_repr_packed_field_ref_capture<'tcx>( + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, + mut place: Place<'tcx>, + mut curr_borrow_kind: ty::UpvarCapture, +) -> (Place<'tcx>, ty::UpvarCapture) { + let pos = place.projections.iter().enumerate().position(|(i, p)| { + let ty = place.ty_before_projection(i); + + // Return true for fields of packed structs, unless those fields have alignment 1. + match p.kind { + ProjectionKind::Field(..) => match ty.kind() { + ty::Adt(def, _) if def.repr().packed() => { + // We erase regions here because they cannot be hashed + match tcx.layout_of(param_env.and(tcx.erase_regions(p.ty))) { + Ok(layout) if layout.align.abi.bytes() == 1 => { + // if the alignment is 1, the type can't be further + // disaligned. + debug!( + "restrict_repr_packed_field_ref_capture: ({:?}) - align = 1", + place + ); + false + } + _ => { + debug!("restrict_repr_packed_field_ref_capture: ({:?}) - true", place); + true + } + } + } + + _ => false, + }, + _ => false, + } + }); + + if let Some(pos) = pos { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_borrow_kind, pos); + } + + (place, curr_borrow_kind) +} + +/// Returns a Ty that applies the specified capture kind on the provided capture Ty +fn apply_capture_kind_on_capture_ty<'tcx>( + tcx: TyCtxt<'tcx>, + ty: Ty<'tcx>, + capture_kind: UpvarCapture, + region: Option>, +) -> Ty<'tcx> { + match capture_kind { + ty::UpvarCapture::ByValue => ty, + ty::UpvarCapture::ByRef(kind) => { + tcx.mk_ref(region.unwrap(), ty::TypeAndMut { ty: ty, mutbl: kind.to_mutbl_lossy() }) + } + } +} + +/// Returns the Span of where the value with the provided HirId would be dropped +fn drop_location_span<'tcx>(tcx: TyCtxt<'tcx>, hir_id: hir::HirId) -> Span { + let owner_id = tcx.hir().get_enclosing_scope(hir_id).unwrap(); + + let owner_node = tcx.hir().get(owner_id); + let owner_span = match owner_node { + hir::Node::Item(item) => match item.kind { + hir::ItemKind::Fn(_, _, owner_id) => tcx.hir().span(owner_id.hir_id), + _ => { + bug!("Drop location span error: need to handle more ItemKind '{:?}'", item.kind); + } + }, + hir::Node::Block(block) => tcx.hir().span(block.hir_id), + hir::Node::TraitItem(item) => tcx.hir().span(item.hir_id()), + hir::Node::ImplItem(item) => tcx.hir().span(item.hir_id()), + _ => { + bug!("Drop location span error: need to handle more Node '{:?}'", owner_node); + } + }; + tcx.sess.source_map().end_point(owner_span) +} + +struct InferBorrowKind<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + + // The def-id of the closure whose kind and upvar accesses are being inferred. + closure_def_id: LocalDefId, + + /// For each Place that is captured by the closure, we track the minimal kind of + /// access we need (ref, ref mut, move, etc) and the expression that resulted in such access. + /// + /// Consider closure where s.str1 is captured via an ImmutableBorrow and + /// s.str2 via a MutableBorrow + /// + /// ```rust,no_run + /// struct SomeStruct { str1: String, str2: String }; + /// + /// // Assume that the HirId for the variable definition is `V1` + /// let mut s = SomeStruct { str1: format!("s1"), str2: format!("s2") }; + /// + /// let fix_s = |new_s2| { + /// // Assume that the HirId for the expression `s.str1` is `E1` + /// println!("Updating SomeStruct with str1={0}", s.str1); + /// // Assume that the HirId for the expression `*s.str2` is `E2` + /// s.str2 = new_s2; + /// }; + /// ``` + /// + /// For closure `fix_s`, (at a high level) the map contains + /// + /// ```ignore (illustrative) + /// Place { V1, [ProjectionKind::Field(Index=0, Variant=0)] } : CaptureKind { E1, ImmutableBorrow } + /// Place { V1, [ProjectionKind::Field(Index=1, Variant=0)] } : CaptureKind { E2, MutableBorrow } + /// ``` + capture_information: InferredCaptureInformation<'tcx>, + fake_reads: Vec<(Place<'tcx>, FakeReadCause, hir::HirId)>, +} + +impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> { + fn fake_read( + &mut self, + place: &PlaceWithHirId<'tcx>, + cause: FakeReadCause, + diag_expr_id: hir::HirId, + ) { + let PlaceBase::Upvar(_) = place.place.base else { return }; + + // We need to restrict Fake Read precision to avoid fake reading unsafe code, + // such as deref of a raw pointer. + let dummy_capture_kind = ty::UpvarCapture::ByRef(ty::BorrowKind::ImmBorrow); + + let (place, _) = restrict_capture_precision(place.place.clone(), dummy_capture_kind); + + let (place, _) = restrict_repr_packed_field_ref_capture( + self.fcx.tcx, + self.fcx.param_env, + place, + dummy_capture_kind, + ); + self.fake_reads.push((place, cause, diag_expr_id)); + } + + #[instrument(skip(self), level = "debug")] + fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) { + let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return }; + assert_eq!(self.closure_def_id, upvar_id.closure_expr_id); + + self.capture_information.push(( + place_with_id.place.clone(), + ty::CaptureInfo { + capture_kind_expr_id: Some(diag_expr_id), + path_expr_id: Some(diag_expr_id), + capture_kind: ty::UpvarCapture::ByValue, + }, + )); + } + + #[instrument(skip(self), level = "debug")] + fn borrow( + &mut self, + place_with_id: &PlaceWithHirId<'tcx>, + diag_expr_id: hir::HirId, + bk: ty::BorrowKind, + ) { + let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return }; + assert_eq!(self.closure_def_id, upvar_id.closure_expr_id); + + // The region here will get discarded/ignored + let capture_kind = ty::UpvarCapture::ByRef(bk); + + // We only want repr packed restriction to be applied to reading references into a packed + // struct, and not when the data is being moved. Therefore we call this method here instead + // of in `restrict_capture_precision`. + let (place, mut capture_kind) = restrict_repr_packed_field_ref_capture( + self.fcx.tcx, + self.fcx.param_env, + place_with_id.place.clone(), + capture_kind, + ); + + // Raw pointers don't inherit mutability + if place_with_id.place.deref_tys().any(Ty::is_unsafe_ptr) { + capture_kind = ty::UpvarCapture::ByRef(ty::BorrowKind::ImmBorrow); + } + + self.capture_information.push(( + place, + ty::CaptureInfo { + capture_kind_expr_id: Some(diag_expr_id), + path_expr_id: Some(diag_expr_id), + capture_kind, + }, + )); + } + + #[instrument(skip(self), level = "debug")] + fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) { + self.borrow(assignee_place, diag_expr_id, ty::BorrowKind::MutBorrow); + } +} + +/// Rust doesn't permit moving fields out of a type that implements drop +fn restrict_precision_for_drop_types<'a, 'tcx>( + fcx: &'a FnCtxt<'a, 'tcx>, + mut place: Place<'tcx>, + mut curr_mode: ty::UpvarCapture, + span: Span, +) -> (Place<'tcx>, ty::UpvarCapture) { + let is_copy_type = fcx.infcx.type_is_copy_modulo_regions(fcx.param_env, place.ty(), span); + + if let (false, UpvarCapture::ByValue) = (is_copy_type, curr_mode) { + for i in 0..place.projections.len() { + match place.ty_before_projection(i).kind() { + ty::Adt(def, _) if def.destructor(fcx.tcx).is_some() => { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i); + break; + } + _ => {} + } + } + } + + (place, curr_mode) +} + +/// Truncate `place` so that an `unsafe` block isn't required to capture it. +/// - No projections are applied to raw pointers, since these require unsafe blocks. We capture +/// them completely. +/// - No projections are applied on top of Union ADTs, since these require unsafe blocks. +fn restrict_precision_for_unsafe<'tcx>( + mut place: Place<'tcx>, + mut curr_mode: ty::UpvarCapture, +) -> (Place<'tcx>, ty::UpvarCapture) { + if place.base_ty.is_unsafe_ptr() { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, 0); + } + + if place.base_ty.is_union() { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, 0); + } + + for (i, proj) in place.projections.iter().enumerate() { + if proj.ty.is_unsafe_ptr() { + // Don't apply any projections on top of an unsafe ptr. + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1); + break; + } + + if proj.ty.is_union() { + // Don't capture precise fields of a union. + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1); + break; + } + } + + (place, curr_mode) +} + +/// Truncate projections so that following rules are obeyed by the captured `place`: +/// - No Index projections are captured, since arrays are captured completely. +/// - No unsafe block is required to capture `place` +/// Returns the truncated place and updated capture mode. +fn restrict_capture_precision<'tcx>( + place: Place<'tcx>, + curr_mode: ty::UpvarCapture, +) -> (Place<'tcx>, ty::UpvarCapture) { + let (mut place, mut curr_mode) = restrict_precision_for_unsafe(place, curr_mode); + + if place.projections.is_empty() { + // Nothing to do here + return (place, curr_mode); + } + + for (i, proj) in place.projections.iter().enumerate() { + match proj.kind { + ProjectionKind::Index => { + // Arrays are completely captured, so we drop Index projections + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i); + return (place, curr_mode); + } + ProjectionKind::Deref => {} + ProjectionKind::Field(..) => {} // ignore + ProjectionKind::Subslice => {} // We never capture this + } + } + + (place, curr_mode) +} + +/// Truncate deref of any reference. +fn adjust_for_move_closure<'tcx>( + mut place: Place<'tcx>, + mut kind: ty::UpvarCapture, +) -> (Place<'tcx>, ty::UpvarCapture) { + let first_deref = place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref); + + if let Some(idx) = first_deref { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx); + } + + (place, ty::UpvarCapture::ByValue) +} + +/// Adjust closure capture just that if taking ownership of data, only move data +/// from enclosing stack frame. +fn adjust_for_non_move_closure<'tcx>( + mut place: Place<'tcx>, + mut kind: ty::UpvarCapture, +) -> (Place<'tcx>, ty::UpvarCapture) { + let contains_deref = + place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref); + + match kind { + ty::UpvarCapture::ByValue => { + if let Some(idx) = contains_deref { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx); + } + } + + ty::UpvarCapture::ByRef(..) => {} + } + + (place, kind) +} + +fn construct_place_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String { + let variable_name = match place.base { + PlaceBase::Upvar(upvar_id) => var_name(tcx, upvar_id.var_path.hir_id).to_string(), + _ => bug!("Capture_information should only contain upvars"), + }; + + let mut projections_str = String::new(); + for (i, item) in place.projections.iter().enumerate() { + let proj = match item.kind { + ProjectionKind::Field(a, b) => format!("({:?}, {:?})", a, b), + ProjectionKind::Deref => String::from("Deref"), + ProjectionKind::Index => String::from("Index"), + ProjectionKind::Subslice => String::from("Subslice"), + }; + if i != 0 { + projections_str.push(','); + } + projections_str.push_str(proj.as_str()); + } + + format!("{variable_name}[{projections_str}]") +} + +fn construct_capture_kind_reason_string<'tcx>( + tcx: TyCtxt<'_>, + place: &Place<'tcx>, + capture_info: &ty::CaptureInfo, +) -> String { + let place_str = construct_place_string(tcx, place); + + let capture_kind_str = match capture_info.capture_kind { + ty::UpvarCapture::ByValue => "ByValue".into(), + ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind), + }; + + format!("{place_str} captured as {capture_kind_str} here") +} + +fn construct_path_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String { + let place_str = construct_place_string(tcx, place); + + format!("{place_str} used here") +} + +fn construct_capture_info_string<'tcx>( + tcx: TyCtxt<'_>, + place: &Place<'tcx>, + capture_info: &ty::CaptureInfo, +) -> String { + let place_str = construct_place_string(tcx, place); + + let capture_kind_str = match capture_info.capture_kind { + ty::UpvarCapture::ByValue => "ByValue".into(), + ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind), + }; + format!("{place_str} -> {capture_kind_str}") +} + +fn var_name(tcx: TyCtxt<'_>, var_hir_id: hir::HirId) -> Symbol { + tcx.hir().name(var_hir_id) +} + +#[instrument(level = "debug", skip(tcx))] +fn should_do_rust_2021_incompatible_closure_captures_analysis( + tcx: TyCtxt<'_>, + closure_id: hir::HirId, +) -> bool { + let (level, _) = + tcx.lint_level_at_node(lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES, closure_id); + + !matches!(level, lint::Level::Allow) +} + +/// Return a two string tuple (s1, s2) +/// - s1: Line of code that is needed for the migration: eg: `let _ = (&x, ...)`. +/// - s2: Comma separated names of the variables being migrated. +fn migration_suggestion_for_2229( + tcx: TyCtxt<'_>, + need_migrations: &[NeededMigration], +) -> (String, String) { + let need_migrations_variables = need_migrations + .iter() + .map(|NeededMigration { var_hir_id: v, .. }| var_name(tcx, *v)) + .collect::>(); + + let migration_ref_concat = + need_migrations_variables.iter().map(|v| format!("&{v}")).collect::>().join(", "); + + let migration_string = if 1 == need_migrations.len() { + format!("let _ = {migration_ref_concat}") + } else { + format!("let _ = ({migration_ref_concat})") + }; + + let migrated_variables_concat = + need_migrations_variables.iter().map(|v| format!("`{v}`")).collect::>().join(", "); + + (migration_string, migrated_variables_concat) +} + +/// Helper function to determine if we need to escalate CaptureKind from +/// CaptureInfo A to B and returns the escalated CaptureInfo. +/// (Note: CaptureInfo contains CaptureKind and an expression that led to capture it in that way) +/// +/// If both `CaptureKind`s are considered equivalent, then the CaptureInfo is selected based +/// on the `CaptureInfo` containing an associated `capture_kind_expr_id`. +/// +/// It is the caller's duty to figure out which path_expr_id to use. +/// +/// If both the CaptureKind and Expression are considered to be equivalent, +/// then `CaptureInfo` A is preferred. This can be useful in cases where we want to prioritize +/// expressions reported back to the user as part of diagnostics based on which appears earlier +/// in the closure. This can be achieved simply by calling +/// `determine_capture_info(existing_info, current_info)`. This works out because the +/// expressions that occur earlier in the closure body than the current expression are processed before. +/// Consider the following example +/// ```rust,no_run +/// struct Point { x: i32, y: i32 } +/// let mut p = Point { x: 10, y: 10 }; +/// +/// let c = || { +/// p.x += 10; +/// // ^ E1 ^ +/// // ... +/// // More code +/// // ... +/// p.x += 10; // E2 +/// // ^ E2 ^ +/// }; +/// ``` +/// `CaptureKind` associated with both `E1` and `E2` will be ByRef(MutBorrow), +/// and both have an expression associated, however for diagnostics we prefer reporting +/// `E1` since it appears earlier in the closure body. When `E2` is being processed we +/// would've already handled `E1`, and have an existing capture_information for it. +/// Calling `determine_capture_info(existing_info_e1, current_info_e2)` will return +/// `existing_info_e1` in this case, allowing us to point to `E1` in case of diagnostics. +fn determine_capture_info( + capture_info_a: ty::CaptureInfo, + capture_info_b: ty::CaptureInfo, +) -> ty::CaptureInfo { + // If the capture kind is equivalent then, we don't need to escalate and can compare the + // expressions. + let eq_capture_kind = match (capture_info_a.capture_kind, capture_info_b.capture_kind) { + (ty::UpvarCapture::ByValue, ty::UpvarCapture::ByValue) => true, + (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => ref_a == ref_b, + (ty::UpvarCapture::ByValue, _) | (ty::UpvarCapture::ByRef(_), _) => false, + }; + + if eq_capture_kind { + match (capture_info_a.capture_kind_expr_id, capture_info_b.capture_kind_expr_id) { + (Some(_), _) | (None, None) => capture_info_a, + (None, Some(_)) => capture_info_b, + } + } else { + // We select the CaptureKind which ranks higher based the following priority order: + // ByValue > MutBorrow > UniqueImmBorrow > ImmBorrow + match (capture_info_a.capture_kind, capture_info_b.capture_kind) { + (ty::UpvarCapture::ByValue, _) => capture_info_a, + (_, ty::UpvarCapture::ByValue) => capture_info_b, + (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => { + match (ref_a, ref_b) { + // Take LHS: + (ty::UniqueImmBorrow | ty::MutBorrow, ty::ImmBorrow) + | (ty::MutBorrow, ty::UniqueImmBorrow) => capture_info_a, + + // Take RHS: + (ty::ImmBorrow, ty::UniqueImmBorrow | ty::MutBorrow) + | (ty::UniqueImmBorrow, ty::MutBorrow) => capture_info_b, + + (ty::ImmBorrow, ty::ImmBorrow) + | (ty::UniqueImmBorrow, ty::UniqueImmBorrow) + | (ty::MutBorrow, ty::MutBorrow) => { + bug!("Expected unequal capture kinds"); + } + } + } + } + } +} + +/// Truncates `place` to have up to `len` projections. +/// `curr_mode` is the current required capture kind for the place. +/// Returns the truncated `place` and the updated required capture kind. +/// +/// Note: Capture kind changes from `MutBorrow` to `UniqueImmBorrow` if the truncated part of the `place` +/// contained `Deref` of `&mut`. +fn truncate_place_to_len_and_update_capture_kind<'tcx>( + place: &mut Place<'tcx>, + curr_mode: &mut ty::UpvarCapture, + len: usize, +) { + let is_mut_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Mut)); + + // If the truncated part of the place contains `Deref` of a `&mut` then convert MutBorrow -> + // UniqueImmBorrow + // Note that if the place contained Deref of a raw pointer it would've not been MutBorrow, so + // we don't need to worry about that case here. + match curr_mode { + ty::UpvarCapture::ByRef(ty::BorrowKind::MutBorrow) => { + for i in len..place.projections.len() { + if place.projections[i].kind == ProjectionKind::Deref + && is_mut_ref(place.ty_before_projection(i)) + { + *curr_mode = ty::UpvarCapture::ByRef(ty::BorrowKind::UniqueImmBorrow); + break; + } + } + } + + ty::UpvarCapture::ByRef(..) => {} + ty::UpvarCapture::ByValue => {} + } + + place.projections.truncate(len); +} + +/// Determines the Ancestry relationship of Place A relative to Place B +/// +/// `PlaceAncestryRelation::Ancestor` implies Place A is ancestor of Place B +/// `PlaceAncestryRelation::Descendant` implies Place A is descendant of Place B +/// `PlaceAncestryRelation::Divergent` implies neither of them is the ancestor of the other. +fn determine_place_ancestry_relation<'tcx>( + place_a: &Place<'tcx>, + place_b: &Place<'tcx>, +) -> PlaceAncestryRelation { + // If Place A and Place B, don't start off from the same root variable, they are divergent. + if place_a.base != place_b.base { + return PlaceAncestryRelation::Divergent; + } + + // Assume of length of projections_a = n + let projections_a = &place_a.projections; + + // Assume of length of projections_b = m + let projections_b = &place_b.projections; + + let same_initial_projections = + iter::zip(projections_a, projections_b).all(|(proj_a, proj_b)| proj_a.kind == proj_b.kind); + + if same_initial_projections { + use std::cmp::Ordering; + + // First min(n, m) projections are the same + // Select Ancestor/Descendant + match projections_b.len().cmp(&projections_a.len()) { + Ordering::Greater => PlaceAncestryRelation::Ancestor, + Ordering::Equal => PlaceAncestryRelation::SamePlace, + Ordering::Less => PlaceAncestryRelation::Descendant, + } + } else { + PlaceAncestryRelation::Divergent + } +} + +/// Reduces the precision of the captured place when the precision doesn't yield any benefit from +/// borrow checking perspective, allowing us to save us on the size of the capture. +/// +/// +/// Fields that are read through a shared reference will always be read via a shared ref or a copy, +/// and therefore capturing precise paths yields no benefit. This optimization truncates the +/// rightmost deref of the capture if the deref is applied to a shared ref. +/// +/// Reason we only drop the last deref is because of the following edge case: +/// +/// ``` +/// # struct A { field_of_a: Box } +/// # struct B {} +/// # struct C<'a>(&'a i32); +/// struct MyStruct<'a> { +/// a: &'static A, +/// b: B, +/// c: C<'a>, +/// } +/// +/// fn foo<'a, 'b>(m: &'a MyStruct<'b>) -> impl FnMut() + 'static { +/// || drop(&*m.a.field_of_a) +/// // Here we really do want to capture `*m.a` because that outlives `'static` +/// +/// // If we capture `m`, then the closure no longer outlives `'static' +/// // it is constrained to `'a` +/// } +/// ``` +fn truncate_capture_for_optimization<'tcx>( + mut place: Place<'tcx>, + mut curr_mode: ty::UpvarCapture, +) -> (Place<'tcx>, ty::UpvarCapture) { + let is_shared_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Not)); + + // Find the right-most deref (if any). All the projections that come after this + // are fields or other "in-place pointer adjustments"; these refer therefore to + // data owned by whatever pointer is being dereferenced here. + let idx = place.projections.iter().rposition(|proj| ProjectionKind::Deref == proj.kind); + + match idx { + // If that pointer is a shared reference, then we don't need those fields. + Some(idx) if is_shared_ref(place.ty_before_projection(idx)) => { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, idx + 1) + } + None | Some(_) => {} + } + + (place, curr_mode) +} + +/// Precise capture is enabled if the feature gate `capture_disjoint_fields` is enabled or if +/// user is using Rust Edition 2021 or higher. +/// +/// `span` is the span of the closure. +fn enable_precise_capture(tcx: TyCtxt<'_>, span: Span) -> bool { + // We use span here to ensure that if the closure was generated by a macro with a different + // edition. + tcx.features().capture_disjoint_fields || span.rust_2021() +} diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_typeck/src/check/wfcheck.rs new file mode 100644 index 000000000..d0334cd0d --- /dev/null +++ b/compiler/rustc_typeck/src/check/wfcheck.rs @@ -0,0 +1,1973 @@ +use crate::check::regionck::OutlivesEnvironmentExt; +use crate::constrained_generic_params::{identify_constrained_generic_params, Parameter}; +use rustc_ast as ast; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::lang_items::LangItem; +use rustc_hir::ItemKind; +use rustc_infer::infer::outlives::env::{OutlivesEnvironment, RegionBoundPairs}; +use rustc_infer::infer::outlives::obligations::TypeOutlives; +use rustc_infer::infer::{self, InferCtxt, TyCtxtInferExt}; +use rustc_infer::traits::Normalized; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts, Subst}; +use rustc_middle::ty::trait_def::TraitSpecializationKind; +use rustc_middle::ty::{ + self, AdtKind, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeFoldable, + TypeSuperVisitable, TypeVisitable, TypeVisitor, +}; +use rustc_session::parse::feature_err; +use rustc_span::symbol::{sym, Ident, Symbol}; +use rustc_span::{Span, DUMMY_SP}; +use rustc_trait_selection::autoderef::Autoderef; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; +use rustc_trait_selection::traits::query::normalize::AtExt; +use rustc_trait_selection::traits::query::NoSolution; +use rustc_trait_selection::traits::{ + self, ObligationCause, ObligationCauseCode, ObligationCtxt, WellFormedLoc, +}; + +use std::cell::LazyCell; +use std::convert::TryInto; +use std::iter; +use std::ops::{ControlFlow, Deref}; + +pub(super) struct WfCheckingCtxt<'a, 'tcx> { + pub(super) ocx: ObligationCtxt<'a, 'tcx>, + span: Span, + body_id: hir::HirId, + param_env: ty::ParamEnv<'tcx>, +} +impl<'a, 'tcx> Deref for WfCheckingCtxt<'a, 'tcx> { + type Target = ObligationCtxt<'a, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.ocx + } +} + +impl<'tcx> WfCheckingCtxt<'_, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.ocx.infcx.tcx + } + + fn normalize(&self, span: Span, loc: Option, value: T) -> T + where + T: TypeFoldable<'tcx>, + { + self.ocx.normalize( + ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc)), + self.param_env, + value, + ) + } + + fn register_wf_obligation( + &self, + span: Span, + loc: Option, + arg: ty::GenericArg<'tcx>, + ) { + let cause = + traits::ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc)); + self.ocx.register_obligation(traits::Obligation::new( + cause, + self.param_env, + ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx()), + )); + } +} + +pub(super) fn enter_wf_checking_ctxt<'tcx, F>( + tcx: TyCtxt<'tcx>, + span: Span, + body_def_id: LocalDefId, + f: F, +) where + F: for<'a> FnOnce(&WfCheckingCtxt<'a, 'tcx>) -> FxHashSet>, +{ + let param_env = tcx.param_env(body_def_id); + let body_id = tcx.hir().local_def_id_to_hir_id(body_def_id); + tcx.infer_ctxt().enter(|ref infcx| { + let ocx = ObligationCtxt::new(infcx); + let mut wfcx = WfCheckingCtxt { ocx, span, body_id, param_env }; + + if !tcx.features().trivial_bounds { + wfcx.check_false_global_bounds() + } + let wf_tys = f(&mut wfcx); + let errors = wfcx.select_all_or_error(); + if !errors.is_empty() { + infcx.report_fulfillment_errors(&errors, None, false); + return; + } + + let mut outlives_environment = OutlivesEnvironment::new(param_env); + outlives_environment.add_implied_bounds(infcx, wf_tys, body_id); + infcx.check_region_obligations_and_report_errors(body_def_id, &outlives_environment); + }) +} + +fn check_well_formed(tcx: TyCtxt<'_>, def_id: LocalDefId) { + let node = tcx.hir().expect_owner(def_id); + match node { + hir::OwnerNode::Crate(_) => {} + hir::OwnerNode::Item(item) => check_item(tcx, item), + hir::OwnerNode::TraitItem(item) => check_trait_item(tcx, item), + hir::OwnerNode::ImplItem(item) => check_impl_item(tcx, item), + hir::OwnerNode::ForeignItem(item) => check_foreign_item(tcx, item), + } + + if let Some(generics) = node.generics() { + for param in generics.params { + check_param_wf(tcx, param) + } + } +} + +/// Checks that the field types (in a struct def'n) or argument types (in an enum def'n) are +/// well-formed, meaning that they do not require any constraints not declared in the struct +/// definition itself. For example, this definition would be illegal: +/// +/// ```rust +/// struct Ref<'a, T> { x: &'a T } +/// ``` +/// +/// because the type did not declare that `T:'a`. +/// +/// We do this check as a pre-pass before checking fn bodies because if these constraints are +/// not included it frequently leads to confusing errors in fn bodies. So it's better to check +/// the types first. +#[instrument(skip(tcx), level = "debug")] +fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) { + let def_id = item.def_id; + + debug!( + ?item.def_id, + item.name = ? tcx.def_path_str(def_id.to_def_id()) + ); + + match item.kind { + // Right now we check that every default trait implementation + // has an implementation of itself. Basically, a case like: + // + // impl Trait for T {} + // + // has a requirement of `T: Trait` which was required for default + // method implementations. Although this could be improved now that + // there's a better infrastructure in place for this, it's being left + // for a follow-up work. + // + // Since there's such a requirement, we need to check *just* positive + // implementations, otherwise things like: + // + // impl !Send for T {} + // + // won't be allowed unless there's an *explicit* implementation of `Send` + // for `T` + hir::ItemKind::Impl(ref impl_) => { + let is_auto = tcx + .impl_trait_ref(item.def_id) + .map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id)); + if let (hir::Defaultness::Default { .. }, true) = (impl_.defaultness, is_auto) { + let sp = impl_.of_trait.as_ref().map_or(item.span, |t| t.path.span); + let mut err = + tcx.sess.struct_span_err(sp, "impls of auto traits cannot be default"); + err.span_labels(impl_.defaultness_span, "default because of this"); + err.span_label(sp, "auto trait"); + err.emit(); + } + // We match on both `ty::ImplPolarity` and `ast::ImplPolarity` just to get the `!` span. + match (tcx.impl_polarity(def_id), impl_.polarity) { + (ty::ImplPolarity::Positive, _) => { + check_impl(tcx, item, impl_.self_ty, &impl_.of_trait, impl_.constness); + } + (ty::ImplPolarity::Negative, ast::ImplPolarity::Negative(span)) => { + // FIXME(#27579): what amount of WF checking do we need for neg impls? + if let hir::Defaultness::Default { .. } = impl_.defaultness { + let mut spans = vec![span]; + spans.extend(impl_.defaultness_span); + struct_span_err!( + tcx.sess, + spans, + E0750, + "negative impls cannot be default impls" + ) + .emit(); + } + } + (ty::ImplPolarity::Reservation, _) => { + // FIXME: what amount of WF checking do we need for reservation impls? + } + _ => unreachable!(), + } + } + hir::ItemKind::Fn(ref sig, ..) => { + check_item_fn(tcx, item.def_id, item.ident, item.span, sig.decl); + } + hir::ItemKind::Static(ty, ..) => { + check_item_type(tcx, item.def_id, ty.span, false); + } + hir::ItemKind::Const(ty, ..) => { + check_item_type(tcx, item.def_id, ty.span, false); + } + hir::ItemKind::Struct(ref struct_def, ref ast_generics) => { + check_type_defn(tcx, item, false, |wfcx| vec![wfcx.non_enum_variant(struct_def)]); + + check_variances_for_type_defn(tcx, item, ast_generics); + } + hir::ItemKind::Union(ref struct_def, ref ast_generics) => { + check_type_defn(tcx, item, true, |wfcx| vec![wfcx.non_enum_variant(struct_def)]); + + check_variances_for_type_defn(tcx, item, ast_generics); + } + hir::ItemKind::Enum(ref enum_def, ref ast_generics) => { + check_type_defn(tcx, item, true, |wfcx| wfcx.enum_variants(enum_def)); + + check_variances_for_type_defn(tcx, item, ast_generics); + } + hir::ItemKind::Trait(..) => { + check_trait(tcx, item); + } + hir::ItemKind::TraitAlias(..) => { + check_trait(tcx, item); + } + // `ForeignItem`s are handled separately. + hir::ItemKind::ForeignMod { .. } => {} + _ => {} + } +} + +fn check_foreign_item(tcx: TyCtxt<'_>, item: &hir::ForeignItem<'_>) { + let def_id = item.def_id; + + debug!( + ?item.def_id, + item.name = ? tcx.def_path_str(def_id.to_def_id()) + ); + + match item.kind { + hir::ForeignItemKind::Fn(decl, ..) => { + check_item_fn(tcx, item.def_id, item.ident, item.span, decl) + } + hir::ForeignItemKind::Static(ty, ..) => check_item_type(tcx, item.def_id, ty.span, true), + hir::ForeignItemKind::Type => (), + } +} + +fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) { + let def_id = trait_item.def_id; + + let (method_sig, span) = match trait_item.kind { + hir::TraitItemKind::Fn(ref sig, _) => (Some(sig), trait_item.span), + hir::TraitItemKind::Type(_bounds, Some(ty)) => (None, ty.span), + _ => (None, trait_item.span), + }; + check_object_unsafe_self_trait_by_name(tcx, trait_item); + check_associated_item(tcx, trait_item.def_id, span, method_sig); + + let encl_trait_def_id = tcx.local_parent(def_id); + let encl_trait = tcx.hir().expect_item(encl_trait_def_id); + let encl_trait_def_id = encl_trait.def_id.to_def_id(); + let fn_lang_item_name = if Some(encl_trait_def_id) == tcx.lang_items().fn_trait() { + Some("fn") + } else if Some(encl_trait_def_id) == tcx.lang_items().fn_mut_trait() { + Some("fn_mut") + } else { + None + }; + + if let (Some(fn_lang_item_name), "call") = + (fn_lang_item_name, trait_item.ident.name.to_ident_string().as_str()) + { + // We are looking at the `call` function of the `fn` or `fn_mut` lang item. + // Do some rudimentary sanity checking to avoid an ICE later (issue #83471). + if let Some(hir::FnSig { decl, span, .. }) = method_sig { + if let [self_ty, _] = decl.inputs { + if !matches!(self_ty.kind, hir::TyKind::Rptr(_, _)) { + tcx.sess + .struct_span_err( + self_ty.span, + &format!( + "first argument of `call` in `{fn_lang_item_name}` lang item must be a reference", + ), + ) + .emit(); + } + } else { + tcx.sess + .struct_span_err( + *span, + &format!( + "`call` function in `{fn_lang_item_name}` lang item takes exactly two arguments", + ), + ) + .emit(); + } + } else { + tcx.sess + .struct_span_err( + trait_item.span, + &format!( + "`call` trait item in `{fn_lang_item_name}` lang item must be a function", + ), + ) + .emit(); + } + } +} + +/// Require that the user writes where clauses on GATs for the implicit +/// outlives bounds involving trait parameters in trait functions and +/// lifetimes passed as GAT substs. See `self-outlives-lint` test. +/// +/// We use the following trait as an example throughout this function: +/// ```rust,ignore (this code fails due to this lint) +/// trait IntoIter { +/// type Iter<'a>: Iterator>; +/// type Item<'a>; +/// fn into_iter<'a>(&'a self) -> Self::Iter<'a>; +/// } +/// ``` +fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRef]) { + // Associates every GAT's def_id to a list of possibly missing bounds detected by this lint. + let mut required_bounds_by_item = FxHashMap::default(); + + // Loop over all GATs together, because if this lint suggests adding a where-clause bound + // to one GAT, it might then require us to an additional bound on another GAT. + // In our `IntoIter` example, we discover a missing `Self: 'a` bound on `Iter<'a>`, which + // then in a second loop adds a `Self: 'a` bound to `Item` due to the relationship between + // those GATs. + loop { + let mut should_continue = false; + for gat_item in associated_items { + let gat_def_id = gat_item.id.def_id; + let gat_item = tcx.associated_item(gat_def_id); + // If this item is not an assoc ty, or has no substs, then it's not a GAT + if gat_item.kind != ty::AssocKind::Type { + continue; + } + let gat_generics = tcx.generics_of(gat_def_id); + // FIXME(jackh726): we can also warn in the more general case + if gat_generics.params.is_empty() { + continue; + } + + // Gather the bounds with which all other items inside of this trait constrain the GAT. + // This is calculated by taking the intersection of the bounds that each item + // constrains the GAT with individually. + let mut new_required_bounds: Option>> = None; + for item in associated_items { + let item_def_id = item.id.def_id; + // Skip our own GAT, since it does not constrain itself at all. + if item_def_id == gat_def_id { + continue; + } + + let item_hir_id = item.id.hir_id(); + let param_env = tcx.param_env(item_def_id); + + let item_required_bounds = match item.kind { + // In our example, this corresponds to `into_iter` method + hir::AssocItemKind::Fn { .. } => { + // For methods, we check the function signature's return type for any GATs + // to constrain. In the `into_iter` case, we see that the return type + // `Self::Iter<'a>` is a GAT we want to gather any potential missing bounds from. + let sig: ty::FnSig<'_> = tcx.liberate_late_bound_regions( + item_def_id.to_def_id(), + tcx.fn_sig(item_def_id), + ); + gather_gat_bounds( + tcx, + param_env, + item_hir_id, + sig.output(), + // We also assume that all of the function signature's parameter types + // are well formed. + &sig.inputs().iter().copied().collect(), + gat_def_id, + gat_generics, + ) + } + // In our example, this corresponds to the `Iter` and `Item` associated types + hir::AssocItemKind::Type => { + // If our associated item is a GAT with missing bounds, add them to + // the param-env here. This allows this GAT to propagate missing bounds + // to other GATs. + let param_env = augment_param_env( + tcx, + param_env, + required_bounds_by_item.get(&item_def_id), + ); + gather_gat_bounds( + tcx, + param_env, + item_hir_id, + tcx.explicit_item_bounds(item_def_id) + .iter() + .copied() + .collect::>(), + &FxHashSet::default(), + gat_def_id, + gat_generics, + ) + } + hir::AssocItemKind::Const => None, + }; + + if let Some(item_required_bounds) = item_required_bounds { + // Take the intersection of the required bounds for this GAT, and + // the item_required_bounds which are the ones implied by just + // this item alone. + // This is why we use an Option<_>, since we need to distinguish + // the empty set of bounds from the _uninitialized_ set of bounds. + if let Some(new_required_bounds) = &mut new_required_bounds { + new_required_bounds.retain(|b| item_required_bounds.contains(b)); + } else { + new_required_bounds = Some(item_required_bounds); + } + } + } + + if let Some(new_required_bounds) = new_required_bounds { + let required_bounds = required_bounds_by_item.entry(gat_def_id).or_default(); + if new_required_bounds.into_iter().any(|p| required_bounds.insert(p)) { + // Iterate until our required_bounds no longer change + // Since they changed here, we should continue the loop + should_continue = true; + } + } + } + // We know that this loop will eventually halt, since we only set `should_continue` if the + // `required_bounds` for this item grows. Since we are not creating any new region or type + // variables, the set of all region and type bounds that we could ever insert are limited + // by the number of unique types and regions we observe in a given item. + if !should_continue { + break; + } + } + + for (gat_def_id, required_bounds) in required_bounds_by_item { + let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id); + debug!(?required_bounds); + let param_env = tcx.param_env(gat_def_id); + let gat_hir = gat_item_hir.hir_id(); + + let mut unsatisfied_bounds: Vec<_> = required_bounds + .into_iter() + .filter(|clause| match clause.kind().skip_binder() { + ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => { + !region_known_to_outlive(tcx, gat_hir, param_env, &FxHashSet::default(), a, b) + } + ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(a, b)) => { + !ty_known_to_outlive(tcx, gat_hir, param_env, &FxHashSet::default(), a, b) + } + _ => bug!("Unexpected PredicateKind"), + }) + .map(|clause| clause.to_string()) + .collect(); + + // We sort so that order is predictable + unsatisfied_bounds.sort(); + + if !unsatisfied_bounds.is_empty() { + let plural = pluralize!(unsatisfied_bounds.len()); + let mut err = tcx.sess.struct_span_err( + gat_item_hir.span, + &format!("missing required bound{} on `{}`", plural, gat_item_hir.ident), + ); + + let suggestion = format!( + "{} {}", + gat_item_hir.generics.add_where_or_trailing_comma(), + unsatisfied_bounds.join(", "), + ); + err.span_suggestion( + gat_item_hir.generics.tail_span_for_predicate_suggestion(), + &format!("add the required where clause{plural}"), + suggestion, + Applicability::MachineApplicable, + ); + + let bound = + if unsatisfied_bounds.len() > 1 { "these bounds are" } else { "this bound is" }; + err.note(&format!( + "{} currently required to ensure that impls have maximum flexibility", + bound + )); + err.note( + "we are soliciting feedback, see issue #87479 \ + \ + for more information", + ); + + err.emit(); + } + } +} + +/// Add a new set of predicates to the caller_bounds of an existing param_env. +fn augment_param_env<'tcx>( + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, + new_predicates: Option<&FxHashSet>>, +) -> ty::ParamEnv<'tcx> { + let Some(new_predicates) = new_predicates else { + return param_env; + }; + + if new_predicates.is_empty() { + return param_env; + } + + let bounds = + tcx.mk_predicates(param_env.caller_bounds().iter().chain(new_predicates.iter().cloned())); + // FIXME(compiler-errors): Perhaps there is a case where we need to normalize this + // i.e. traits::normalize_param_env_or_error + ty::ParamEnv::new(bounds, param_env.reveal(), param_env.constness()) +} + +/// We use the following trait as an example throughout this function. +/// Specifically, let's assume that `to_check` here is the return type +/// of `into_iter`, and the GAT we are checking this for is `Iter`. +/// ```rust,ignore (this code fails due to this lint) +/// trait IntoIter { +/// type Iter<'a>: Iterator>; +/// type Item<'a>; +/// fn into_iter<'a>(&'a self) -> Self::Iter<'a>; +/// } +/// ``` +fn gather_gat_bounds<'tcx, T: TypeFoldable<'tcx>>( + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, + item_hir: hir::HirId, + to_check: T, + wf_tys: &FxHashSet>, + gat_def_id: LocalDefId, + gat_generics: &'tcx ty::Generics, +) -> Option>> { + // The bounds we that we would require from `to_check` + let mut bounds = FxHashSet::default(); + + let (regions, types) = GATSubstCollector::visit(gat_def_id.to_def_id(), to_check); + + // If both regions and types are empty, then this GAT isn't in the + // set of types we are checking, and we shouldn't try to do clause analysis + // (particularly, doing so would end up with an empty set of clauses, + // since the current method would require none, and we take the + // intersection of requirements of all methods) + if types.is_empty() && regions.is_empty() { + return None; + } + + for (region_a, region_a_idx) in ®ions { + // Ignore `'static` lifetimes for the purpose of this lint: it's + // because we know it outlives everything and so doesn't give meaningful + // clues + if let ty::ReStatic = **region_a { + continue; + } + // For each region argument (e.g., `'a` in our example), check for a + // relationship to the type arguments (e.g., `Self`). If there is an + // outlives relationship (`Self: 'a`), then we want to ensure that is + // reflected in a where clause on the GAT itself. + for (ty, ty_idx) in &types { + // In our example, requires that `Self: 'a` + if ty_known_to_outlive(tcx, item_hir, param_env, &wf_tys, *ty, *region_a) { + debug!(?ty_idx, ?region_a_idx); + debug!("required clause: {ty} must outlive {region_a}"); + // Translate into the generic parameters of the GAT. In + // our example, the type was `Self`, which will also be + // `Self` in the GAT. + let ty_param = gat_generics.param_at(*ty_idx, tcx); + let ty_param = tcx + .mk_ty(ty::Param(ty::ParamTy { index: ty_param.index, name: ty_param.name })); + // Same for the region. In our example, 'a corresponds + // to the 'me parameter. + let region_param = gat_generics.param_at(*region_a_idx, tcx); + let region_param = + tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion { + def_id: region_param.def_id, + index: region_param.index, + name: region_param.name, + })); + // The predicate we expect to see. (In our example, + // `Self: 'me`.) + let clause = + ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_param, region_param)); + let clause = tcx.mk_predicate(ty::Binder::dummy(clause)); + bounds.insert(clause); + } + } + + // For each region argument (e.g., `'a` in our example), also check for a + // relationship to the other region arguments. If there is an outlives + // relationship, then we want to ensure that is reflected in the where clause + // on the GAT itself. + for (region_b, region_b_idx) in ®ions { + // Again, skip `'static` because it outlives everything. Also, we trivially + // know that a region outlives itself. + if ty::ReStatic == **region_b || region_a == region_b { + continue; + } + if region_known_to_outlive(tcx, item_hir, param_env, &wf_tys, *region_a, *region_b) { + debug!(?region_a_idx, ?region_b_idx); + debug!("required clause: {region_a} must outlive {region_b}"); + // Translate into the generic parameters of the GAT. + let region_a_param = gat_generics.param_at(*region_a_idx, tcx); + let region_a_param = + tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion { + def_id: region_a_param.def_id, + index: region_a_param.index, + name: region_a_param.name, + })); + // Same for the region. + let region_b_param = gat_generics.param_at(*region_b_idx, tcx); + let region_b_param = + tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion { + def_id: region_b_param.def_id, + index: region_b_param.index, + name: region_b_param.name, + })); + // The predicate we expect to see. + let clause = ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate( + region_a_param, + region_b_param, + )); + let clause = tcx.mk_predicate(ty::Binder::dummy(clause)); + bounds.insert(clause); + } + } + } + + Some(bounds) +} + +/// Given a known `param_env` and a set of well formed types, can we prove that +/// `ty` outlives `region`. +fn ty_known_to_outlive<'tcx>( + tcx: TyCtxt<'tcx>, + id: hir::HirId, + param_env: ty::ParamEnv<'tcx>, + wf_tys: &FxHashSet>, + ty: Ty<'tcx>, + region: ty::Region<'tcx>, +) -> bool { + resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |infcx, region_bound_pairs| { + let origin = infer::RelateParamBound(DUMMY_SP, ty, None); + let outlives = &mut TypeOutlives::new(infcx, tcx, region_bound_pairs, None, param_env); + outlives.type_must_outlive(origin, ty, region); + }) +} + +/// Given a known `param_env` and a set of well formed types, can we prove that +/// `region_a` outlives `region_b` +fn region_known_to_outlive<'tcx>( + tcx: TyCtxt<'tcx>, + id: hir::HirId, + param_env: ty::ParamEnv<'tcx>, + wf_tys: &FxHashSet>, + region_a: ty::Region<'tcx>, + region_b: ty::Region<'tcx>, +) -> bool { + resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |mut infcx, _| { + use rustc_infer::infer::outlives::obligations::TypeOutlivesDelegate; + let origin = infer::RelateRegionParamBound(DUMMY_SP); + // `region_a: region_b` -> `region_b <= region_a` + infcx.push_sub_region_constraint(origin, region_b, region_a); + }) +} + +/// Given a known `param_env` and a set of well formed types, set up an +/// `InferCtxt`, call the passed function (to e.g. set up region constraints +/// to be tested), then resolve region and return errors +fn resolve_regions_with_wf_tys<'tcx>( + tcx: TyCtxt<'tcx>, + id: hir::HirId, + param_env: ty::ParamEnv<'tcx>, + wf_tys: &FxHashSet>, + add_constraints: impl for<'a> FnOnce(&'a InferCtxt<'a, 'tcx>, &'a RegionBoundPairs<'tcx>), +) -> bool { + // Unfortunately, we have to use a new `InferCtxt` each call, because + // region constraints get added and solved there and we need to test each + // call individually. + tcx.infer_ctxt().enter(|infcx| { + let mut outlives_environment = OutlivesEnvironment::new(param_env); + outlives_environment.add_implied_bounds(&infcx, wf_tys.clone(), id); + let region_bound_pairs = outlives_environment.region_bound_pairs(); + + add_constraints(&infcx, region_bound_pairs); + + let errors = infcx.resolve_regions(&outlives_environment); + + debug!(?errors, "errors"); + + // If we were able to prove that the type outlives the region without + // an error, it must be because of the implied or explicit bounds... + errors.is_empty() + }) +} + +/// TypeVisitor that looks for uses of GATs like +/// `>::GAT` and adds the arguments `P0..Pm` into +/// the two vectors, `regions` and `types` (depending on their kind). For each +/// parameter `Pi` also track the index `i`. +struct GATSubstCollector<'tcx> { + gat: DefId, + // Which region appears and which parameter index its substituted for + regions: FxHashSet<(ty::Region<'tcx>, usize)>, + // Which params appears and which parameter index its substituted for + types: FxHashSet<(Ty<'tcx>, usize)>, +} + +impl<'tcx> GATSubstCollector<'tcx> { + fn visit>( + gat: DefId, + t: T, + ) -> (FxHashSet<(ty::Region<'tcx>, usize)>, FxHashSet<(Ty<'tcx>, usize)>) { + let mut visitor = + GATSubstCollector { gat, regions: FxHashSet::default(), types: FxHashSet::default() }; + t.visit_with(&mut visitor); + (visitor.regions, visitor.types) + } +} + +impl<'tcx> TypeVisitor<'tcx> for GATSubstCollector<'tcx> { + type BreakTy = !; + + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + match t.kind() { + ty::Projection(p) if p.item_def_id == self.gat => { + for (idx, subst) in p.substs.iter().enumerate() { + match subst.unpack() { + GenericArgKind::Lifetime(lt) if !lt.is_late_bound() => { + self.regions.insert((lt, idx)); + } + GenericArgKind::Type(t) => { + self.types.insert((t, idx)); + } + _ => {} + } + } + } + _ => {} + } + t.super_visit_with(self) + } +} + +fn could_be_self(trait_def_id: LocalDefId, ty: &hir::Ty<'_>) -> bool { + match ty.kind { + hir::TyKind::TraitObject([trait_ref], ..) => match trait_ref.trait_ref.path.segments { + [s] => s.res.and_then(|r| r.opt_def_id()) == Some(trait_def_id.to_def_id()), + _ => false, + }, + _ => false, + } +} + +/// Detect when an object unsafe trait is referring to itself in one of its associated items. +/// When this is done, suggest using `Self` instead. +fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem<'_>) { + let (trait_name, trait_def_id) = + match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(item.hir_id())) { + hir::Node::Item(item) => match item.kind { + hir::ItemKind::Trait(..) => (item.ident, item.def_id), + _ => return, + }, + _ => return, + }; + let mut trait_should_be_self = vec![]; + match &item.kind { + hir::TraitItemKind::Const(ty, _) | hir::TraitItemKind::Type(_, Some(ty)) + if could_be_self(trait_def_id, ty) => + { + trait_should_be_self.push(ty.span) + } + hir::TraitItemKind::Fn(sig, _) => { + for ty in sig.decl.inputs { + if could_be_self(trait_def_id, ty) { + trait_should_be_self.push(ty.span); + } + } + match sig.decl.output { + hir::FnRetTy::Return(ty) if could_be_self(trait_def_id, ty) => { + trait_should_be_self.push(ty.span); + } + _ => {} + } + } + _ => {} + } + if !trait_should_be_self.is_empty() { + if tcx.object_safety_violations(trait_def_id).is_empty() { + return; + } + let sugg = trait_should_be_self.iter().map(|span| (*span, "Self".to_string())).collect(); + tcx.sess + .struct_span_err( + trait_should_be_self, + "associated item referring to unboxed trait object for its own trait", + ) + .span_label(trait_name.span, "in this trait") + .multipart_suggestion( + "you might have meant to use `Self` to refer to the implementing type", + sugg, + Applicability::MachineApplicable, + ) + .emit(); + } +} + +fn check_impl_item(tcx: TyCtxt<'_>, impl_item: &hir::ImplItem<'_>) { + let def_id = impl_item.def_id; + + let (method_sig, span) = match impl_item.kind { + hir::ImplItemKind::Fn(ref sig, _) => (Some(sig), impl_item.span), + // Constrain binding and overflow error spans to `` in `type foo = `. + hir::ImplItemKind::TyAlias(ty) if ty.span != DUMMY_SP => (None, ty.span), + _ => (None, impl_item.span), + }; + + check_associated_item(tcx, def_id, span, method_sig); +} + +fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) { + match param.kind { + // We currently only check wf of const params here. + hir::GenericParamKind::Lifetime { .. } | hir::GenericParamKind::Type { .. } => (), + + // Const parameters are well formed if their type is structural match. + hir::GenericParamKind::Const { ty: hir_ty, default: _ } => { + let ty = tcx.type_of(tcx.hir().local_def_id(param.hir_id)); + + if tcx.features().adt_const_params { + if let Some(non_structural_match_ty) = + traits::search_for_adt_const_param_violation(param.span, tcx, ty) + { + // We use the same error code in both branches, because this is really the same + // issue: we just special-case the message for type parameters to make it + // clearer. + match non_structural_match_ty.kind() { + ty::Param(_) => { + // Const parameters may not have type parameters as their types, + // because we cannot be sure that the type parameter derives `PartialEq` + // and `Eq` (just implementing them is not enough for `structural_match`). + struct_span_err!( + tcx.sess, + hir_ty.span, + E0741, + "`{ty}` is not guaranteed to `#[derive(PartialEq, Eq)]`, so may not be \ + used as the type of a const parameter", + ) + .span_label( + hir_ty.span, + format!("`{ty}` may not derive both `PartialEq` and `Eq`"), + ) + .note( + "it is not currently possible to use a type parameter as the type of a \ + const parameter", + ) + .emit(); + } + ty::Float(_) => { + struct_span_err!( + tcx.sess, + hir_ty.span, + E0741, + "`{ty}` is forbidden as the type of a const generic parameter", + ) + .note("floats do not derive `Eq` or `Ord`, which are required for const parameters") + .emit(); + } + ty::FnPtr(_) => { + struct_span_err!( + tcx.sess, + hir_ty.span, + E0741, + "using function pointers as const generic parameters is forbidden", + ) + .emit(); + } + ty::RawPtr(_) => { + struct_span_err!( + tcx.sess, + hir_ty.span, + E0741, + "using raw pointers as const generic parameters is forbidden", + ) + .emit(); + } + _ => { + let mut diag = struct_span_err!( + tcx.sess, + hir_ty.span, + E0741, + "`{}` must be annotated with `#[derive(PartialEq, Eq)]` to be used as \ + the type of a const parameter", + non_structural_match_ty, + ); + + if ty == non_structural_match_ty { + diag.span_label( + hir_ty.span, + format!("`{ty}` doesn't derive both `PartialEq` and `Eq`"), + ); + } + + diag.emit(); + } + } + } + } else { + let err_ty_str; + let mut is_ptr = true; + + let err = match ty.kind() { + ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Error(_) => None, + ty::FnPtr(_) => Some("function pointers"), + ty::RawPtr(_) => Some("raw pointers"), + _ => { + is_ptr = false; + err_ty_str = format!("`{ty}`"); + Some(err_ty_str.as_str()) + } + }; + + if let Some(unsupported_type) = err { + if is_ptr { + tcx.sess.span_err( + hir_ty.span, + &format!( + "using {unsupported_type} as const generic parameters is forbidden", + ), + ); + } else { + let mut err = tcx.sess.struct_span_err( + hir_ty.span, + &format!( + "{unsupported_type} is forbidden as the type of a const generic parameter", + ), + ); + err.note("the only supported types are integers, `bool` and `char`"); + if tcx.sess.is_nightly_build() { + err.help( + "more complex types are supported with `#![feature(adt_const_params)]`", + ); + } + err.emit(); + } + } + } + } + } +} + +#[tracing::instrument(level = "debug", skip(tcx, span, sig_if_method))] +fn check_associated_item( + tcx: TyCtxt<'_>, + item_id: LocalDefId, + span: Span, + sig_if_method: Option<&hir::FnSig<'_>>, +) { + let loc = Some(WellFormedLoc::Ty(item_id)); + enter_wf_checking_ctxt(tcx, span, item_id, |wfcx| { + let item = tcx.associated_item(item_id); + + let (mut implied_bounds, self_ty) = match item.container { + ty::TraitContainer => (FxHashSet::default(), tcx.types.self_param), + ty::ImplContainer => { + let def_id = item.container_id(tcx); + ( + impl_implied_bounds(tcx, wfcx.param_env, def_id.expect_local(), span), + tcx.type_of(def_id), + ) + } + }; + + match item.kind { + ty::AssocKind::Const => { + let ty = tcx.type_of(item.def_id); + let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty); + wfcx.register_wf_obligation(span, loc, ty.into()); + } + ty::AssocKind::Fn => { + let sig = tcx.fn_sig(item.def_id); + let hir_sig = sig_if_method.expect("bad signature for method"); + check_fn_or_method( + wfcx, + item.ident(tcx).span, + sig, + hir_sig.decl, + item.def_id.expect_local(), + &mut implied_bounds, + ); + check_method_receiver(wfcx, hir_sig, item, self_ty); + } + ty::AssocKind::Type => { + if let ty::AssocItemContainer::TraitContainer = item.container { + check_associated_type_bounds(wfcx, item, span) + } + if item.defaultness(tcx).has_value() { + let ty = tcx.type_of(item.def_id); + let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty); + wfcx.register_wf_obligation(span, loc, ty.into()); + } + } + } + + implied_bounds + }) +} + +fn item_adt_kind(kind: &ItemKind<'_>) -> Option { + match kind { + ItemKind::Struct(..) => Some(AdtKind::Struct), + ItemKind::Union(..) => Some(AdtKind::Union), + ItemKind::Enum(..) => Some(AdtKind::Enum), + _ => None, + } +} + +/// In a type definition, we check that to ensure that the types of the fields are well-formed. +fn check_type_defn<'tcx, F>( + tcx: TyCtxt<'tcx>, + item: &hir::Item<'tcx>, + all_sized: bool, + mut lookup_fields: F, +) where + F: FnMut(&WfCheckingCtxt<'_, 'tcx>) -> Vec>, +{ + enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| { + let variants = lookup_fields(wfcx); + let packed = tcx.adt_def(item.def_id).repr().packed(); + + for variant in &variants { + // All field types must be well-formed. + for field in &variant.fields { + wfcx.register_wf_obligation( + field.span, + Some(WellFormedLoc::Ty(field.def_id)), + field.ty.into(), + ) + } + + // For DST, or when drop needs to copy things around, all + // intermediate types must be sized. + let needs_drop_copy = || { + packed && { + let ty = variant.fields.last().unwrap().ty; + let ty = tcx.erase_regions(ty); + if ty.needs_infer() { + tcx.sess + .delay_span_bug(item.span, &format!("inference variables in {:?}", ty)); + // Just treat unresolved type expression as if it needs drop. + true + } else { + ty.needs_drop(tcx, tcx.param_env(item.def_id)) + } + } + }; + // All fields (except for possibly the last) should be sized. + let all_sized = all_sized || variant.fields.is_empty() || needs_drop_copy(); + let unsized_len = if all_sized { 0 } else { 1 }; + for (idx, field) in + variant.fields[..variant.fields.len() - unsized_len].iter().enumerate() + { + let last = idx == variant.fields.len() - 1; + wfcx.register_bound( + traits::ObligationCause::new( + field.span, + wfcx.body_id, + traits::FieldSized { + adt_kind: match item_adt_kind(&item.kind) { + Some(i) => i, + None => bug!(), + }, + span: field.span, + last, + }, + ), + wfcx.param_env, + field.ty, + tcx.require_lang_item(LangItem::Sized, None), + ); + } + + // Explicit `enum` discriminant values must const-evaluate successfully. + if let Some(discr_def_id) = variant.explicit_discr { + let discr_substs = InternalSubsts::identity_for_item(tcx, discr_def_id.to_def_id()); + + let cause = traits::ObligationCause::new( + tcx.def_span(discr_def_id), + wfcx.body_id, + traits::MiscObligation, + ); + wfcx.register_obligation(traits::Obligation::new( + cause, + wfcx.param_env, + ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(ty::Unevaluated::new( + ty::WithOptConstParam::unknown(discr_def_id.to_def_id()), + discr_substs, + ))) + .to_predicate(tcx), + )); + } + } + + check_where_clauses(wfcx, item.span, item.def_id); + + // No implied bounds in a struct definition. + FxHashSet::default() + }); +} + +#[instrument(skip(tcx, item))] +fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) { + debug!(?item.def_id); + + let trait_def = tcx.trait_def(item.def_id); + if trait_def.is_marker + || matches!(trait_def.specialization_kind, TraitSpecializationKind::Marker) + { + for associated_def_id in &*tcx.associated_item_def_ids(item.def_id) { + struct_span_err!( + tcx.sess, + tcx.def_span(*associated_def_id), + E0714, + "marker traits cannot have associated items", + ) + .emit(); + } + } + + enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| { + check_where_clauses(wfcx, item.span, item.def_id); + + FxHashSet::default() + }); + + // Only check traits, don't check trait aliases + if let hir::ItemKind::Trait(_, _, _, _, items) = item.kind { + check_gat_where_clauses(tcx, items); + } +} + +/// Checks all associated type defaults of trait `trait_def_id`. +/// +/// Assuming the defaults are used, check that all predicates (bounds on the +/// assoc type and where clauses on the trait) hold. +fn check_associated_type_bounds(wfcx: &WfCheckingCtxt<'_, '_>, item: &ty::AssocItem, span: Span) { + let bounds = wfcx.tcx().explicit_item_bounds(item.def_id); + + debug!("check_associated_type_bounds: bounds={:?}", bounds); + let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| { + let normalized_bound = wfcx.normalize(span, None, bound); + traits::wf::predicate_obligations( + wfcx.infcx, + wfcx.param_env, + wfcx.body_id, + normalized_bound, + bound_span, + ) + }); + + wfcx.register_obligations(wf_obligations); +} + +fn check_item_fn( + tcx: TyCtxt<'_>, + def_id: LocalDefId, + ident: Ident, + span: Span, + decl: &hir::FnDecl<'_>, +) { + enter_wf_checking_ctxt(tcx, span, def_id, |wfcx| { + let sig = tcx.fn_sig(def_id); + let mut implied_bounds = FxHashSet::default(); + check_fn_or_method(wfcx, ident.span, sig, decl, def_id, &mut implied_bounds); + implied_bounds + }) +} + +fn check_item_type(tcx: TyCtxt<'_>, item_id: LocalDefId, ty_span: Span, allow_foreign_ty: bool) { + debug!("check_item_type: {:?}", item_id); + + enter_wf_checking_ctxt(tcx, ty_span, item_id, |wfcx| { + let ty = tcx.type_of(item_id); + let item_ty = wfcx.normalize(ty_span, Some(WellFormedLoc::Ty(item_id)), ty); + + let mut forbid_unsized = true; + if allow_foreign_ty { + let tail = tcx.struct_tail_erasing_lifetimes(item_ty, wfcx.param_env); + if let ty::Foreign(_) = tail.kind() { + forbid_unsized = false; + } + } + + wfcx.register_wf_obligation(ty_span, Some(WellFormedLoc::Ty(item_id)), item_ty.into()); + if forbid_unsized { + wfcx.register_bound( + traits::ObligationCause::new(ty_span, wfcx.body_id, traits::WellFormed(None)), + wfcx.param_env, + item_ty, + tcx.require_lang_item(LangItem::Sized, None), + ); + } + + // Ensure that the end result is `Sync` in a non-thread local `static`. + let should_check_for_sync = tcx.static_mutability(item_id.to_def_id()) + == Some(hir::Mutability::Not) + && !tcx.is_foreign_item(item_id.to_def_id()) + && !tcx.is_thread_local_static(item_id.to_def_id()); + + if should_check_for_sync { + wfcx.register_bound( + traits::ObligationCause::new(ty_span, wfcx.body_id, traits::SharedStatic), + wfcx.param_env, + item_ty, + tcx.require_lang_item(LangItem::Sync, Some(ty_span)), + ); + } + + // No implied bounds in a const, etc. + FxHashSet::default() + }); +} + +#[tracing::instrument(level = "debug", skip(tcx, ast_self_ty, ast_trait_ref))] +fn check_impl<'tcx>( + tcx: TyCtxt<'tcx>, + item: &'tcx hir::Item<'tcx>, + ast_self_ty: &hir::Ty<'_>, + ast_trait_ref: &Option>, + constness: hir::Constness, +) { + enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| { + match *ast_trait_ref { + Some(ref ast_trait_ref) => { + // `#[rustc_reservation_impl]` impls are not real impls and + // therefore don't need to be WF (the trait's `Self: Trait` predicate + // won't hold). + let trait_ref = tcx.impl_trait_ref(item.def_id).unwrap(); + let trait_ref = wfcx.normalize(ast_trait_ref.path.span, None, trait_ref); + let trait_pred = ty::TraitPredicate { + trait_ref, + constness: match constness { + hir::Constness::Const => ty::BoundConstness::ConstIfConst, + hir::Constness::NotConst => ty::BoundConstness::NotConst, + }, + polarity: ty::ImplPolarity::Positive, + }; + let obligations = traits::wf::trait_obligations( + wfcx.infcx, + wfcx.param_env, + wfcx.body_id, + &trait_pred, + ast_trait_ref.path.span, + item, + ); + debug!(?obligations); + wfcx.register_obligations(obligations); + } + None => { + let self_ty = tcx.type_of(item.def_id); + let self_ty = wfcx.normalize(item.span, None, self_ty); + wfcx.register_wf_obligation( + ast_self_ty.span, + Some(WellFormedLoc::Ty(item.hir_id().expect_owner())), + self_ty.into(), + ); + } + } + + check_where_clauses(wfcx, item.span, item.def_id); + + impl_implied_bounds(tcx, wfcx.param_env, item.def_id, item.span) + }); +} + +/// Checks where-clauses and inline bounds that are declared on `def_id`. +#[instrument(level = "debug", skip(wfcx))] +fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id: LocalDefId) { + let infcx = wfcx.infcx; + let tcx = wfcx.tcx(); + + let predicates = tcx.bound_predicates_of(def_id.to_def_id()); + let generics = tcx.generics_of(def_id); + + let is_our_default = |def: &ty::GenericParamDef| match def.kind { + GenericParamDefKind::Type { has_default, .. } + | GenericParamDefKind::Const { has_default } => { + has_default && def.index >= generics.parent_count as u32 + } + GenericParamDefKind::Lifetime => unreachable!(), + }; + + // Check that concrete defaults are well-formed. See test `type-check-defaults.rs`. + // For example, this forbids the declaration: + // + // struct Foo> { .. } + // + // Here, the default `Vec<[u32]>` is not WF because `[u32]: Sized` does not hold. + for param in &generics.params { + match param.kind { + GenericParamDefKind::Type { .. } => { + if is_our_default(param) { + let ty = tcx.type_of(param.def_id); + // Ignore dependent defaults -- that is, where the default of one type + // parameter includes another (e.g., ``). In those cases, we can't + // be sure if it will error or not as user might always specify the other. + if !ty.needs_subst() { + wfcx.register_wf_obligation(tcx.def_span(param.def_id), None, ty.into()); + } + } + } + GenericParamDefKind::Const { .. } => { + if is_our_default(param) { + // FIXME(const_generics_defaults): This + // is incorrect when dealing with unused substs, for example + // for `struct Foo` + // we should eagerly error. + let default_ct = tcx.const_param_default(param.def_id); + if !default_ct.needs_subst() { + wfcx.register_wf_obligation( + tcx.def_span(param.def_id), + None, + default_ct.into(), + ); + } + } + } + // Doesn't have defaults. + GenericParamDefKind::Lifetime => {} + } + } + + // Check that trait predicates are WF when params are substituted by their defaults. + // We don't want to overly constrain the predicates that may be written but we want to + // catch cases where a default my never be applied such as `struct Foo`. + // Therefore we check if a predicate which contains a single type param + // with a concrete default is WF with that default substituted. + // For more examples see tests `defaults-well-formedness.rs` and `type-check-defaults.rs`. + // + // First we build the defaulted substitution. + let substs = InternalSubsts::for_item(tcx, def_id.to_def_id(), |param, _| { + match param.kind { + GenericParamDefKind::Lifetime => { + // All regions are identity. + tcx.mk_param_from_def(param) + } + + GenericParamDefKind::Type { .. } => { + // If the param has a default, ... + if is_our_default(param) { + let default_ty = tcx.type_of(param.def_id); + // ... and it's not a dependent default, ... + if !default_ty.needs_subst() { + // ... then substitute it with the default. + return default_ty.into(); + } + } + + tcx.mk_param_from_def(param) + } + GenericParamDefKind::Const { .. } => { + // If the param has a default, ... + if is_our_default(param) { + let default_ct = tcx.const_param_default(param.def_id); + // ... and it's not a dependent default, ... + if !default_ct.needs_subst() { + // ... then substitute it with the default. + return default_ct.into(); + } + } + + tcx.mk_param_from_def(param) + } + } + }); + + // Now we build the substituted predicates. + let default_obligations = predicates + .0 + .predicates + .iter() + .flat_map(|&(pred, sp)| { + #[derive(Default)] + struct CountParams { + params: FxHashSet, + } + impl<'tcx> ty::visit::TypeVisitor<'tcx> for CountParams { + type BreakTy = (); + + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + if let ty::Param(param) = t.kind() { + self.params.insert(param.index); + } + t.super_visit_with(self) + } + + fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow { + ControlFlow::BREAK + } + + fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow { + if let ty::ConstKind::Param(param) = c.kind() { + self.params.insert(param.index); + } + c.super_visit_with(self) + } + } + let mut param_count = CountParams::default(); + let has_region = pred.visit_with(&mut param_count).is_break(); + let substituted_pred = predicates.rebind(pred).subst(tcx, substs); + // Don't check non-defaulted params, dependent defaults (including lifetimes) + // or preds with multiple params. + if substituted_pred.has_param_types_or_consts() + || param_count.params.len() > 1 + || has_region + { + None + } else if predicates.0.predicates.iter().any(|&(p, _)| p == substituted_pred) { + // Avoid duplication of predicates that contain no parameters, for example. + None + } else { + Some((substituted_pred, sp)) + } + }) + .map(|(pred, sp)| { + // Convert each of those into an obligation. So if you have + // something like `struct Foo`, we would + // take that predicate `T: Copy`, substitute to `String: Copy` + // (actually that happens in the previous `flat_map` call), + // and then try to prove it (in this case, we'll fail). + // + // Note the subtle difference from how we handle `predicates` + // below: there, we are not trying to prove those predicates + // to be *true* but merely *well-formed*. + let pred = wfcx.normalize(sp, None, pred); + let cause = traits::ObligationCause::new( + sp, + wfcx.body_id, + traits::ItemObligation(def_id.to_def_id()), + ); + traits::Obligation::new(cause, wfcx.param_env, pred) + }); + + let predicates = predicates.0.instantiate_identity(tcx); + + let predicates = wfcx.normalize(span, None, predicates); + + debug!(?predicates.predicates); + assert_eq!(predicates.predicates.len(), predicates.spans.len()); + let wf_obligations = + iter::zip(&predicates.predicates, &predicates.spans).flat_map(|(&p, &sp)| { + traits::wf::predicate_obligations(infcx, wfcx.param_env, wfcx.body_id, p, sp) + }); + + let obligations: Vec<_> = wf_obligations.chain(default_obligations).collect(); + wfcx.register_obligations(obligations); +} + +#[tracing::instrument(level = "debug", skip(wfcx, span, hir_decl))] +fn check_fn_or_method<'tcx>( + wfcx: &WfCheckingCtxt<'_, 'tcx>, + span: Span, + sig: ty::PolyFnSig<'tcx>, + hir_decl: &hir::FnDecl<'_>, + def_id: LocalDefId, + implied_bounds: &mut FxHashSet>, +) { + let tcx = wfcx.tcx(); + let sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), sig); + + // Normalize the input and output types one at a time, using a different + // `WellFormedLoc` for each. We cannot call `normalize_associated_types` + // on the entire `FnSig`, since this would use the same `WellFormedLoc` + // for each type, preventing the HIR wf check from generating + // a nice error message. + let ty::FnSig { mut inputs_and_output, c_variadic, unsafety, abi } = sig; + inputs_and_output = tcx.mk_type_list(inputs_and_output.iter().enumerate().map(|(i, ty)| { + wfcx.normalize( + span, + Some(WellFormedLoc::Param { + function: def_id, + // Note that the `param_idx` of the output type is + // one greater than the index of the last input type. + param_idx: i.try_into().unwrap(), + }), + ty, + ) + })); + // Manually call `normalize_associated_types_in` on the other types + // in `FnSig`. This ensures that if the types of these fields + // ever change to include projections, we will start normalizing + // them automatically. + let sig = ty::FnSig { + inputs_and_output, + c_variadic: wfcx.normalize(span, None, c_variadic), + unsafety: wfcx.normalize(span, None, unsafety), + abi: wfcx.normalize(span, None, abi), + }; + + for (i, (&input_ty, ty)) in iter::zip(sig.inputs(), hir_decl.inputs).enumerate() { + wfcx.register_wf_obligation( + ty.span, + Some(WellFormedLoc::Param { function: def_id, param_idx: i.try_into().unwrap() }), + input_ty.into(), + ); + } + + implied_bounds.extend(sig.inputs()); + + wfcx.register_wf_obligation(hir_decl.output.span(), None, sig.output().into()); + + // FIXME(#27579) return types should not be implied bounds + implied_bounds.insert(sig.output()); + + debug!(?implied_bounds); + + check_where_clauses(wfcx, span, def_id); +} + +const HELP_FOR_SELF_TYPE: &str = "consider changing to `self`, `&self`, `&mut self`, `self: Box`, \ + `self: Rc`, `self: Arc`, or `self: Pin

` (where P is one \ + of the previous types except `Self`)"; + +#[tracing::instrument(level = "debug", skip(wfcx))] +fn check_method_receiver<'tcx>( + wfcx: &WfCheckingCtxt<'_, 'tcx>, + fn_sig: &hir::FnSig<'_>, + method: &ty::AssocItem, + self_ty: Ty<'tcx>, +) { + let tcx = wfcx.tcx(); + + if !method.fn_has_self_parameter { + return; + } + + let span = fn_sig.decl.inputs[0].span; + + let sig = tcx.fn_sig(method.def_id); + let sig = tcx.liberate_late_bound_regions(method.def_id, sig); + let sig = wfcx.normalize(span, None, sig); + + debug!("check_method_receiver: sig={:?}", sig); + + let self_ty = wfcx.normalize(span, None, self_ty); + + let receiver_ty = sig.inputs()[0]; + let receiver_ty = wfcx.normalize(span, None, receiver_ty); + + if tcx.features().arbitrary_self_types { + if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, true) { + // Report error; `arbitrary_self_types` was enabled. + e0307(tcx, span, receiver_ty); + } + } else { + if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, false) { + if receiver_is_valid(wfcx, span, receiver_ty, self_ty, true) { + // Report error; would have worked with `arbitrary_self_types`. + feature_err( + &tcx.sess.parse_sess, + sym::arbitrary_self_types, + span, + &format!( + "`{receiver_ty}` cannot be used as the type of `self` without \ + the `arbitrary_self_types` feature", + ), + ) + .help(HELP_FOR_SELF_TYPE) + .emit(); + } else { + // Report error; would not have worked with `arbitrary_self_types`. + e0307(tcx, span, receiver_ty); + } + } + } +} + +fn e0307<'tcx>(tcx: TyCtxt<'tcx>, span: Span, receiver_ty: Ty<'_>) { + struct_span_err!( + tcx.sess.diagnostic(), + span, + E0307, + "invalid `self` parameter type: {receiver_ty}" + ) + .note("type of `self` must be `Self` or a type that dereferences to it") + .help(HELP_FOR_SELF_TYPE) + .emit(); +} + +/// Returns whether `receiver_ty` would be considered a valid receiver type for `self_ty`. If +/// `arbitrary_self_types` is enabled, `receiver_ty` must transitively deref to `self_ty`, possibly +/// through a `*const/mut T` raw pointer. If the feature is not enabled, the requirements are more +/// strict: `receiver_ty` must implement `Receiver` and directly implement +/// `Deref`. +/// +/// N.B., there are cases this function returns `true` but causes an error to be emitted, +/// particularly when `receiver_ty` derefs to a type that is the same as `self_ty` but has the +/// wrong lifetime. Be careful of this if you are calling this function speculatively. +fn receiver_is_valid<'tcx>( + wfcx: &WfCheckingCtxt<'_, 'tcx>, + span: Span, + receiver_ty: Ty<'tcx>, + self_ty: Ty<'tcx>, + arbitrary_self_types_enabled: bool, +) -> bool { + let infcx = wfcx.infcx; + let tcx = wfcx.tcx(); + let cause = + ObligationCause::new(span, wfcx.body_id, traits::ObligationCauseCode::MethodReceiver); + + let can_eq_self = |ty| infcx.can_eq(wfcx.param_env, self_ty, ty).is_ok(); + + // `self: Self` is always valid. + if can_eq_self(receiver_ty) { + if let Err(err) = wfcx.equate_types(&cause, wfcx.param_env, self_ty, receiver_ty) { + infcx.report_mismatched_types(&cause, self_ty, receiver_ty, err).emit(); + } + return true; + } + + let mut autoderef = + Autoderef::new(infcx, wfcx.param_env, wfcx.body_id, span, receiver_ty, span); + + // The `arbitrary_self_types` feature allows raw pointer receivers like `self: *const Self`. + if arbitrary_self_types_enabled { + autoderef = autoderef.include_raw_pointers(); + } + + // The first type is `receiver_ty`, which we know its not equal to `self_ty`; skip it. + autoderef.next(); + + let receiver_trait_def_id = tcx.require_lang_item(LangItem::Receiver, None); + + // Keep dereferencing `receiver_ty` until we get to `self_ty`. + loop { + if let Some((potential_self_ty, _)) = autoderef.next() { + debug!( + "receiver_is_valid: potential self type `{:?}` to match `{:?}`", + potential_self_ty, self_ty + ); + + if can_eq_self(potential_self_ty) { + wfcx.register_obligations(autoderef.into_obligations()); + + if let Err(err) = + wfcx.equate_types(&cause, wfcx.param_env, self_ty, potential_self_ty) + { + infcx.report_mismatched_types(&cause, self_ty, potential_self_ty, err).emit(); + } + + break; + } else { + // Without `feature(arbitrary_self_types)`, we require that each step in the + // deref chain implement `receiver` + if !arbitrary_self_types_enabled + && !receiver_is_implemented( + wfcx, + receiver_trait_def_id, + cause.clone(), + potential_self_ty, + ) + { + return false; + } + } + } else { + debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`", receiver_ty, self_ty); + // If the receiver already has errors reported due to it, consider it valid to avoid + // unnecessary errors (#58712). + return receiver_ty.references_error(); + } + } + + // Without `feature(arbitrary_self_types)`, we require that `receiver_ty` implements `Receiver`. + if !arbitrary_self_types_enabled + && !receiver_is_implemented(wfcx, receiver_trait_def_id, cause.clone(), receiver_ty) + { + return false; + } + + true +} + +fn receiver_is_implemented<'tcx>( + wfcx: &WfCheckingCtxt<'_, 'tcx>, + receiver_trait_def_id: DefId, + cause: ObligationCause<'tcx>, + receiver_ty: Ty<'tcx>, +) -> bool { + let tcx = wfcx.tcx(); + let trait_ref = ty::Binder::dummy(ty::TraitRef { + def_id: receiver_trait_def_id, + substs: tcx.mk_substs_trait(receiver_ty, &[]), + }); + + let obligation = + traits::Obligation::new(cause, wfcx.param_env, trait_ref.without_const().to_predicate(tcx)); + + if wfcx.infcx.predicate_must_hold_modulo_regions(&obligation) { + true + } else { + debug!( + "receiver_is_implemented: type `{:?}` does not implement `Receiver` trait", + receiver_ty + ); + false + } +} + +fn check_variances_for_type_defn<'tcx>( + tcx: TyCtxt<'tcx>, + item: &hir::Item<'tcx>, + hir_generics: &hir::Generics<'_>, +) { + let ty = tcx.type_of(item.def_id); + if tcx.has_error_field(ty) { + return; + } + + let ty_predicates = tcx.predicates_of(item.def_id); + assert_eq!(ty_predicates.parent, None); + let variances = tcx.variances_of(item.def_id); + + let mut constrained_parameters: FxHashSet<_> = variances + .iter() + .enumerate() + .filter(|&(_, &variance)| variance != ty::Bivariant) + .map(|(index, _)| Parameter(index as u32)) + .collect(); + + identify_constrained_generic_params(tcx, ty_predicates, None, &mut constrained_parameters); + + // Lazily calculated because it is only needed in case of an error. + let explicitly_bounded_params = LazyCell::new(|| { + let icx = crate::collect::ItemCtxt::new(tcx, item.def_id.to_def_id()); + hir_generics + .predicates + .iter() + .filter_map(|predicate| match predicate { + hir::WherePredicate::BoundPredicate(predicate) => { + match icx.to_ty(predicate.bounded_ty).kind() { + ty::Param(data) => Some(Parameter(data.index)), + _ => None, + } + } + _ => None, + }) + .collect::>() + }); + + for (index, _) in variances.iter().enumerate() { + let parameter = Parameter(index as u32); + + if constrained_parameters.contains(¶meter) { + continue; + } + + let param = &hir_generics.params[index]; + + match param.name { + hir::ParamName::Error => {} + _ => { + let has_explicit_bounds = explicitly_bounded_params.contains(¶meter); + report_bivariance(tcx, param, has_explicit_bounds); + } + } + } +} + +fn report_bivariance( + tcx: TyCtxt<'_>, + param: &rustc_hir::GenericParam<'_>, + has_explicit_bounds: bool, +) -> ErrorGuaranteed { + let span = param.span; + let param_name = param.name.ident().name; + let mut err = error_392(tcx, span, param_name); + + let suggested_marker_id = tcx.lang_items().phantom_data(); + // Help is available only in presence of lang items. + let msg = if let Some(def_id) = suggested_marker_id { + format!( + "consider removing `{}`, referring to it in a field, or using a marker such as `{}`", + param_name, + tcx.def_path_str(def_id), + ) + } else { + format!("consider removing `{param_name}` or referring to it in a field") + }; + err.help(&msg); + + if matches!(param.kind, hir::GenericParamKind::Type { .. }) && !has_explicit_bounds { + err.help(&format!( + "if you intended `{0}` to be a const parameter, use `const {0}: usize` instead", + param_name + )); + } + err.emit() +} + +impl<'tcx> WfCheckingCtxt<'_, 'tcx> { + /// Feature gates RFC 2056 -- trivial bounds, checking for global bounds that + /// aren't true. + fn check_false_global_bounds(&mut self) { + let tcx = self.ocx.infcx.tcx; + let mut span = self.span; + let empty_env = ty::ParamEnv::empty(); + + let def_id = tcx.hir().local_def_id(self.body_id); + let predicates_with_span = tcx.predicates_of(def_id).predicates.iter().copied(); + // Check elaborated bounds. + let implied_obligations = traits::elaborate_predicates_with_span(tcx, predicates_with_span); + + for obligation in implied_obligations { + // We lower empty bounds like `Vec:` as + // `WellFormed(Vec)`, which will later get checked by + // regular WF checking + if let ty::PredicateKind::WellFormed(..) = obligation.predicate.kind().skip_binder() { + continue; + } + let pred = obligation.predicate; + // Match the existing behavior. + if pred.is_global() && !pred.has_late_bound_regions() { + let pred = self.normalize(span, None, pred); + let hir_node = tcx.hir().find(self.body_id); + + // only use the span of the predicate clause (#90869) + + if let Some(hir::Generics { predicates, .. }) = + hir_node.and_then(|node| node.generics()) + { + let obligation_span = obligation.cause.span(); + + span = predicates + .iter() + // There seems to be no better way to find out which predicate we are in + .find(|pred| pred.span().contains(obligation_span)) + .map(|pred| pred.span()) + .unwrap_or(obligation_span); + } + + let obligation = traits::Obligation::new( + traits::ObligationCause::new(span, self.body_id, traits::TrivialBound), + empty_env, + pred, + ); + self.ocx.register_obligation(obligation); + } + } + } +} + +fn check_mod_type_wf(tcx: TyCtxt<'_>, module: LocalDefId) { + let items = tcx.hir_module_items(module); + items.par_items(|item| tcx.ensure().check_well_formed(item.def_id)); + items.par_impl_items(|item| tcx.ensure().check_well_formed(item.def_id)); + items.par_trait_items(|item| tcx.ensure().check_well_formed(item.def_id)); + items.par_foreign_items(|item| tcx.ensure().check_well_formed(item.def_id)); +} + +/////////////////////////////////////////////////////////////////////////// +// ADT + +// FIXME(eddyb) replace this with getting fields/discriminants through `ty::AdtDef`. +struct AdtVariant<'tcx> { + /// Types of fields in the variant, that must be well-formed. + fields: Vec>, + + /// Explicit discriminant of this variant (e.g. `A = 123`), + /// that must evaluate to a constant value. + explicit_discr: Option, +} + +struct AdtField<'tcx> { + ty: Ty<'tcx>, + def_id: LocalDefId, + span: Span, +} + +impl<'a, 'tcx> WfCheckingCtxt<'a, 'tcx> { + // FIXME(eddyb) replace this with getting fields through `ty::AdtDef`. + fn non_enum_variant(&self, struct_def: &hir::VariantData<'_>) -> AdtVariant<'tcx> { + let fields = struct_def + .fields() + .iter() + .map(|field| { + let def_id = self.tcx().hir().local_def_id(field.hir_id); + let field_ty = self.tcx().type_of(def_id); + let field_ty = self.normalize(field.ty.span, None, field_ty); + debug!("non_enum_variant: type of field {:?} is {:?}", field, field_ty); + AdtField { ty: field_ty, span: field.ty.span, def_id } + }) + .collect(); + AdtVariant { fields, explicit_discr: None } + } + + fn enum_variants(&self, enum_def: &hir::EnumDef<'_>) -> Vec> { + enum_def + .variants + .iter() + .map(|variant| AdtVariant { + fields: self.non_enum_variant(&variant.data).fields, + explicit_discr: variant + .disr_expr + .map(|explicit_discr| self.tcx().hir().local_def_id(explicit_discr.hir_id)), + }) + .collect() + } +} + +pub fn impl_implied_bounds<'tcx>( + tcx: TyCtxt<'tcx>, + param_env: ty::ParamEnv<'tcx>, + impl_def_id: LocalDefId, + span: Span, +) -> FxHashSet> { + // We completely ignore any obligations caused by normalizing the types + // we assume to be well formed. Considering that the user of the implied + // bounds will also normalize them, we leave it to them to emit errors + // which should result in better causes and spans. + tcx.infer_ctxt().enter(|infcx| { + let cause = ObligationCause::misc(span, tcx.hir().local_def_id_to_hir_id(impl_def_id)); + match tcx.impl_trait_ref(impl_def_id) { + Some(trait_ref) => { + // Trait impl: take implied bounds from all types that + // appear in the trait reference. + match infcx.at(&cause, param_env).normalize(trait_ref) { + Ok(Normalized { value, obligations: _ }) => value.substs.types().collect(), + Err(NoSolution) => FxHashSet::default(), + } + } + + None => { + // Inherent impl: take implied bounds from the `self` type. + let self_ty = tcx.type_of(impl_def_id); + match infcx.at(&cause, param_env).normalize(self_ty) { + Ok(Normalized { value, obligations: _ }) => FxHashSet::from_iter([value]), + Err(NoSolution) => FxHashSet::default(), + } + } + } + }) +} + +fn error_392( + tcx: TyCtxt<'_>, + span: Span, + param_name: Symbol, +) -> DiagnosticBuilder<'_, ErrorGuaranteed> { + let mut err = struct_span_err!(tcx.sess, span, E0392, "parameter `{param_name}` is never used"); + err.span_label(span, "unused parameter"); + err +} + +pub fn provide(providers: &mut Providers) { + *providers = Providers { check_mod_type_wf, check_well_formed, ..*providers }; +} diff --git a/compiler/rustc_typeck/src/check/writeback.rs b/compiler/rustc_typeck/src/check/writeback.rs new file mode 100644 index 000000000..f549807c3 --- /dev/null +++ b/compiler/rustc_typeck/src/check/writeback.rs @@ -0,0 +1,783 @@ +// Type resolution: the phase that finds all the types in the AST with +// unresolved type variables and replaces "ty_var" types with their +// substitutions. + +use crate::check::FnCtxt; + +use hir::def_id::LocalDefId; +use rustc_data_structures::fx::FxHashMap; +use rustc_errors::ErrorGuaranteed; +use rustc_hir as hir; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282; +use rustc_infer::infer::InferCtxt; +use rustc_middle::hir::place::Place as HirPlace; +use rustc_middle::mir::FakeReadCause; +use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast}; +use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable}; +use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable}; +use rustc_middle::ty::{self, ClosureSizeProfileData, Ty, TyCtxt}; +use rustc_span::symbol::sym; +use rustc_span::Span; + +use std::mem; +use std::ops::ControlFlow; + +/////////////////////////////////////////////////////////////////////////// +// Entry point + +// During type inference, partially inferred types are +// represented using Type variables (ty::Infer). These don't appear in +// the final TypeckResults since all of the types should have been +// inferred once typeck is done. +// When type inference is running however, having to update the typeck +// typeck results every time a new type is inferred would be unreasonably slow, +// so instead all of the replacement happens at the end in +// resolve_type_vars_in_body, which creates a new TypeTables which +// doesn't contain any inference types. +impl<'a, 'tcx> FnCtxt<'a, 'tcx> { + pub fn resolve_type_vars_in_body( + &self, + body: &'tcx hir::Body<'tcx>, + ) -> &'tcx ty::TypeckResults<'tcx> { + let item_id = self.tcx.hir().body_owner(body.id()); + let item_def_id = self.tcx.hir().local_def_id(item_id); + + // This attribute causes us to dump some writeback information + // in the form of errors, which is used for unit tests. + let rustc_dump_user_substs = + self.tcx.has_attr(item_def_id.to_def_id(), sym::rustc_dump_user_substs); + + let mut wbcx = WritebackCx::new(self, body, rustc_dump_user_substs); + for param in body.params { + wbcx.visit_node_id(param.pat.span, param.hir_id); + } + // Type only exists for constants and statics, not functions. + match self.tcx.hir().body_owner_kind(item_def_id) { + hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => { + wbcx.visit_node_id(body.value.span, item_id); + } + hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => (), + } + wbcx.visit_body(body); + wbcx.visit_min_capture_map(); + wbcx.eval_closure_size(); + wbcx.visit_fake_reads_map(); + wbcx.visit_closures(); + wbcx.visit_liberated_fn_sigs(); + wbcx.visit_fru_field_types(); + wbcx.visit_opaque_types(); + wbcx.visit_coercion_casts(); + wbcx.visit_user_provided_tys(); + wbcx.visit_user_provided_sigs(); + wbcx.visit_generator_interior_types(); + + wbcx.typeck_results.rvalue_scopes = + mem::take(&mut self.typeck_results.borrow_mut().rvalue_scopes); + + let used_trait_imports = + mem::take(&mut self.typeck_results.borrow_mut().used_trait_imports); + debug!("used_trait_imports({:?}) = {:?}", item_def_id, used_trait_imports); + wbcx.typeck_results.used_trait_imports = used_trait_imports; + + wbcx.typeck_results.treat_byte_string_as_slice = + mem::take(&mut self.typeck_results.borrow_mut().treat_byte_string_as_slice); + + if self.is_tainted_by_errors() { + // FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted. + wbcx.typeck_results.tainted_by_errors = + Some(ErrorGuaranteed::unchecked_claim_error_was_emitted()); + } + + debug!("writeback: typeck results for {:?} are {:#?}", item_def_id, wbcx.typeck_results); + + self.tcx.arena.alloc(wbcx.typeck_results) + } +} + +/////////////////////////////////////////////////////////////////////////// +// The Writeback context. This visitor walks the HIR, checking the +// fn-specific typeck results to find references to types or regions. It +// resolves those regions to remove inference variables and writes the +// final result back into the master typeck results in the tcx. Here and +// there, it applies a few ad-hoc checks that were not convenient to +// do elsewhere. + +struct WritebackCx<'cx, 'tcx> { + fcx: &'cx FnCtxt<'cx, 'tcx>, + + typeck_results: ty::TypeckResults<'tcx>, + + body: &'tcx hir::Body<'tcx>, + + rustc_dump_user_substs: bool, +} + +impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { + fn new( + fcx: &'cx FnCtxt<'cx, 'tcx>, + body: &'tcx hir::Body<'tcx>, + rustc_dump_user_substs: bool, + ) -> WritebackCx<'cx, 'tcx> { + let owner = body.id().hir_id.owner; + + WritebackCx { + fcx, + typeck_results: ty::TypeckResults::new(owner), + body, + rustc_dump_user_substs, + } + } + + fn tcx(&self) -> TyCtxt<'tcx> { + self.fcx.tcx + } + + fn write_ty_to_typeck_results(&mut self, hir_id: hir::HirId, ty: Ty<'tcx>) { + debug!("write_ty_to_typeck_results({:?}, {:?})", hir_id, ty); + assert!(!ty.needs_infer() && !ty.has_placeholders() && !ty.has_free_regions()); + self.typeck_results.node_types_mut().insert(hir_id, ty); + } + + // Hacky hack: During type-checking, we treat *all* operators + // as potentially overloaded. But then, during writeback, if + // we observe that something like `a+b` is (known to be) + // operating on scalars, we clear the overload. + fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr<'_>) { + match e.kind { + hir::ExprKind::Unary(hir::UnOp::Neg | hir::UnOp::Not, inner) => { + let inner_ty = self.fcx.node_ty(inner.hir_id); + let inner_ty = self.fcx.resolve_vars_if_possible(inner_ty); + + if inner_ty.is_scalar() { + let mut typeck_results = self.fcx.typeck_results.borrow_mut(); + typeck_results.type_dependent_defs_mut().remove(e.hir_id); + typeck_results.node_substs_mut().remove(e.hir_id); + } + } + hir::ExprKind::Binary(ref op, lhs, rhs) | hir::ExprKind::AssignOp(ref op, lhs, rhs) => { + let lhs_ty = self.fcx.node_ty(lhs.hir_id); + let lhs_ty = self.fcx.resolve_vars_if_possible(lhs_ty); + + let rhs_ty = self.fcx.node_ty(rhs.hir_id); + let rhs_ty = self.fcx.resolve_vars_if_possible(rhs_ty); + + if lhs_ty.is_scalar() && rhs_ty.is_scalar() { + let mut typeck_results = self.fcx.typeck_results.borrow_mut(); + typeck_results.type_dependent_defs_mut().remove(e.hir_id); + typeck_results.node_substs_mut().remove(e.hir_id); + + match e.kind { + hir::ExprKind::Binary(..) => { + if !op.node.is_by_value() { + let mut adjustments = typeck_results.adjustments_mut(); + if let Some(a) = adjustments.get_mut(lhs.hir_id) { + a.pop(); + } + if let Some(a) = adjustments.get_mut(rhs.hir_id) { + a.pop(); + } + } + } + hir::ExprKind::AssignOp(..) + if let Some(a) = typeck_results.adjustments_mut().get_mut(lhs.hir_id) => + { + a.pop(); + } + _ => {} + } + } + } + _ => {} + } + } + + // Similar to operators, indexing is always assumed to be overloaded + // Here, correct cases where an indexing expression can be simplified + // to use builtin indexing because the index type is known to be + // usize-ish + fn fix_index_builtin_expr(&mut self, e: &hir::Expr<'_>) { + if let hir::ExprKind::Index(ref base, ref index) = e.kind { + let mut typeck_results = self.fcx.typeck_results.borrow_mut(); + + // All valid indexing looks like this; might encounter non-valid indexes at this point. + let base_ty = typeck_results + .expr_ty_adjusted_opt(base) + .map(|t| self.fcx.resolve_vars_if_possible(t).kind()); + if base_ty.is_none() { + // When encountering `return [0][0]` outside of a `fn` body we can encounter a base + // that isn't in the type table. We assume more relevant errors have already been + // emitted, so we delay an ICE if none have. (#64638) + self.tcx().sess.delay_span_bug(e.span, &format!("bad base: `{:?}`", base)); + } + if let Some(ty::Ref(_, base_ty, _)) = base_ty { + let index_ty = typeck_results.expr_ty_adjusted_opt(index).unwrap_or_else(|| { + // When encountering `return [0][0]` outside of a `fn` body we would attempt + // to access an nonexistent index. We assume that more relevant errors will + // already have been emitted, so we only gate on this with an ICE if no + // error has been emitted. (#64638) + self.fcx.tcx.ty_error_with_message( + e.span, + &format!("bad index {:?} for base: `{:?}`", index, base), + ) + }); + let index_ty = self.fcx.resolve_vars_if_possible(index_ty); + + if base_ty.builtin_index().is_some() && index_ty == self.fcx.tcx.types.usize { + // Remove the method call record + typeck_results.type_dependent_defs_mut().remove(e.hir_id); + typeck_results.node_substs_mut().remove(e.hir_id); + + if let Some(a) = typeck_results.adjustments_mut().get_mut(base.hir_id) { + // Discard the need for a mutable borrow + + // Extra adjustment made when indexing causes a drop + // of size information - we need to get rid of it + // Since this is "after" the other adjustment to be + // discarded, we do an extra `pop()` + if let Some(Adjustment { + kind: Adjust::Pointer(PointerCast::Unsize), .. + }) = a.pop() + { + // So the borrow discard actually happens here + a.pop(); + } + } + } + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Impl of Visitor for Resolver +// +// This is the master code which walks the AST. It delegates most of +// the heavy lifting to the generic visit and resolve functions +// below. In general, a function is made into a `visitor` if it must +// traffic in node-ids or update typeck results in the type context etc. + +impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { + fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) { + self.fix_scalar_builtin_expr(e); + self.fix_index_builtin_expr(e); + + match e.kind { + hir::ExprKind::Closure(&hir::Closure { body, .. }) => { + let body = self.fcx.tcx.hir().body(body); + for param in body.params { + self.visit_node_id(e.span, param.hir_id); + } + + self.visit_body(body); + } + hir::ExprKind::Struct(_, fields, _) => { + for field in fields { + self.visit_field_id(field.hir_id); + } + } + hir::ExprKind::Field(..) => { + self.visit_field_id(e.hir_id); + } + hir::ExprKind::ConstBlock(anon_const) => { + self.visit_node_id(e.span, anon_const.hir_id); + + let body = self.tcx().hir().body(anon_const.body); + self.visit_body(body); + } + _ => {} + } + + self.visit_node_id(e.span, e.hir_id); + intravisit::walk_expr(self, e); + } + + fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) { + self.visit_node_id(b.span, b.hir_id); + intravisit::walk_block(self, b); + } + + fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) { + match p.kind { + hir::PatKind::Binding(..) => { + let typeck_results = self.fcx.typeck_results.borrow(); + if let Some(bm) = + typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span) + { + self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm); + } + } + hir::PatKind::Struct(_, fields, _) => { + for field in fields { + self.visit_field_id(field.hir_id); + } + } + _ => {} + }; + + self.visit_pat_adjustments(p.span, p.hir_id); + + self.visit_node_id(p.span, p.hir_id); + intravisit::walk_pat(self, p); + } + + fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) { + intravisit::walk_local(self, l); + let var_ty = self.fcx.local_ty(l.span, l.hir_id).decl_ty; + let var_ty = self.resolve(var_ty, &l.span); + self.write_ty_to_typeck_results(l.hir_id, var_ty); + } + + fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) { + intravisit::walk_ty(self, hir_ty); + let ty = self.fcx.node_ty(hir_ty.hir_id); + let ty = self.resolve(ty, &hir_ty.span); + self.write_ty_to_typeck_results(hir_ty.hir_id, ty); + } + + fn visit_infer(&mut self, inf: &'tcx hir::InferArg) { + intravisit::walk_inf(self, inf); + // Ignore cases where the inference is a const. + if let Some(ty) = self.fcx.node_ty_opt(inf.hir_id) { + let ty = self.resolve(ty, &inf.span); + self.write_ty_to_typeck_results(inf.hir_id, ty); + } + } +} + +impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { + fn eval_closure_size(&mut self) { + let mut res: FxHashMap> = Default::default(); + for (&closure_def_id, data) in self.fcx.typeck_results.borrow().closure_size_eval.iter() { + let closure_hir_id = self.tcx().hir().local_def_id_to_hir_id(closure_def_id); + + let data = self.resolve(*data, &closure_hir_id); + + res.insert(closure_def_id, data); + } + + self.typeck_results.closure_size_eval = res; + } + fn visit_min_capture_map(&mut self) { + let mut min_captures_wb = ty::MinCaptureInformationMap::with_capacity_and_hasher( + self.fcx.typeck_results.borrow().closure_min_captures.len(), + Default::default(), + ); + for (&closure_def_id, root_min_captures) in + self.fcx.typeck_results.borrow().closure_min_captures.iter() + { + let mut root_var_map_wb = ty::RootVariableMinCaptureList::with_capacity_and_hasher( + root_min_captures.len(), + Default::default(), + ); + for (var_hir_id, min_list) in root_min_captures.iter() { + let min_list_wb = min_list + .iter() + .map(|captured_place| { + let locatable = captured_place.info.path_expr_id.unwrap_or_else(|| { + self.tcx().hir().local_def_id_to_hir_id(closure_def_id) + }); + + self.resolve(captured_place.clone(), &locatable) + }) + .collect(); + root_var_map_wb.insert(*var_hir_id, min_list_wb); + } + min_captures_wb.insert(closure_def_id, root_var_map_wb); + } + + self.typeck_results.closure_min_captures = min_captures_wb; + } + + fn visit_fake_reads_map(&mut self) { + let mut resolved_closure_fake_reads: FxHashMap< + LocalDefId, + Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>, + > = Default::default(); + for (&closure_def_id, fake_reads) in + self.fcx.typeck_results.borrow().closure_fake_reads.iter() + { + let mut resolved_fake_reads = Vec::<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>::new(); + for (place, cause, hir_id) in fake_reads.iter() { + let locatable = self.tcx().hir().local_def_id_to_hir_id(closure_def_id); + + let resolved_fake_read = self.resolve(place.clone(), &locatable); + resolved_fake_reads.push((resolved_fake_read, *cause, *hir_id)); + } + resolved_closure_fake_reads.insert(closure_def_id, resolved_fake_reads); + } + self.typeck_results.closure_fake_reads = resolved_closure_fake_reads; + } + + fn visit_closures(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + let common_hir_owner = fcx_typeck_results.hir_owner; + + for (id, origin) in fcx_typeck_results.closure_kind_origins().iter() { + let hir_id = hir::HirId { owner: common_hir_owner, local_id: *id }; + let place_span = origin.0; + let place = self.resolve(origin.1.clone(), &place_span); + self.typeck_results.closure_kind_origins_mut().insert(hir_id, (place_span, place)); + } + } + + fn visit_coercion_casts(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + let fcx_coercion_casts = fcx_typeck_results.coercion_casts(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + + for local_id in fcx_coercion_casts { + self.typeck_results.set_coercion_cast(*local_id); + } + } + + fn visit_user_provided_tys(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + let common_hir_owner = fcx_typeck_results.hir_owner; + + let mut errors_buffer = Vec::new(); + for (&local_id, c_ty) in fcx_typeck_results.user_provided_types().iter() { + let hir_id = hir::HirId { owner: common_hir_owner, local_id }; + + if cfg!(debug_assertions) && c_ty.needs_infer() { + span_bug!( + hir_id.to_span(self.fcx.tcx), + "writeback: `{:?}` has inference variables", + c_ty + ); + }; + + self.typeck_results.user_provided_types_mut().insert(hir_id, *c_ty); + + if let ty::UserType::TypeOf(_, user_substs) = c_ty.value { + if self.rustc_dump_user_substs { + // This is a unit-testing mechanism. + let span = self.tcx().hir().span(hir_id); + // We need to buffer the errors in order to guarantee a consistent + // order when emitting them. + let err = self + .tcx() + .sess + .struct_span_err(span, &format!("user substs: {:?}", user_substs)); + err.buffer(&mut errors_buffer); + } + } + } + + if !errors_buffer.is_empty() { + errors_buffer.sort_by_key(|diag| diag.span.primary_span()); + for mut diag in errors_buffer.drain(..) { + self.tcx().sess.diagnostic().emit_diagnostic(&mut diag); + } + } + } + + fn visit_user_provided_sigs(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + + for (&def_id, c_sig) in fcx_typeck_results.user_provided_sigs.iter() { + if cfg!(debug_assertions) && c_sig.needs_infer() { + span_bug!( + self.fcx.tcx.hir().span_if_local(def_id).unwrap(), + "writeback: `{:?}` has inference variables", + c_sig + ); + }; + + self.typeck_results.user_provided_sigs.insert(def_id, *c_sig); + } + } + + fn visit_generator_interior_types(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + self.typeck_results.generator_interior_types = + fcx_typeck_results.generator_interior_types.clone(); + } + + #[instrument(skip(self), level = "debug")] + fn visit_opaque_types(&mut self) { + let opaque_types = + self.fcx.infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types(); + for (opaque_type_key, decl) in opaque_types { + let hidden_type = match decl.origin { + hir::OpaqueTyOrigin::FnReturn(_) | hir::OpaqueTyOrigin::AsyncFn(_) => { + let ty = self.resolve(decl.hidden_type.ty, &decl.hidden_type.span); + struct RecursionChecker { + def_id: LocalDefId, + } + impl<'tcx> ty::TypeVisitor<'tcx> for RecursionChecker { + type BreakTy = (); + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + if let ty::Opaque(def_id, _) = *t.kind() { + if def_id == self.def_id.to_def_id() { + return ControlFlow::Break(()); + } + } + t.super_visit_with(self) + } + } + if ty + .visit_with(&mut RecursionChecker { def_id: opaque_type_key.def_id }) + .is_break() + { + return; + } + Some(ty) + } + hir::OpaqueTyOrigin::TyAlias => None, + }; + self.typeck_results.concrete_opaque_types.insert(opaque_type_key.def_id, hidden_type); + } + } + + fn visit_field_id(&mut self, hir_id: hir::HirId) { + if let Some(index) = self.fcx.typeck_results.borrow_mut().field_indices_mut().remove(hir_id) + { + self.typeck_results.field_indices_mut().insert(hir_id, index); + } + } + + #[instrument(skip(self, span), level = "debug")] + fn visit_node_id(&mut self, span: Span, hir_id: hir::HirId) { + // Export associated path extensions and method resolutions. + if let Some(def) = + self.fcx.typeck_results.borrow_mut().type_dependent_defs_mut().remove(hir_id) + { + self.typeck_results.type_dependent_defs_mut().insert(hir_id, def); + } + + // Resolve any borrowings for the node with id `node_id` + self.visit_adjustments(span, hir_id); + + // Resolve the type of the node with id `node_id` + let n_ty = self.fcx.node_ty(hir_id); + let n_ty = self.resolve(n_ty, &span); + self.write_ty_to_typeck_results(hir_id, n_ty); + debug!(?n_ty); + + // Resolve any substitutions + if let Some(substs) = self.fcx.typeck_results.borrow().node_substs_opt(hir_id) { + let substs = self.resolve(substs, &span); + debug!("write_substs_to_tcx({:?}, {:?})", hir_id, substs); + assert!(!substs.needs_infer() && !substs.has_placeholders()); + self.typeck_results.node_substs_mut().insert(hir_id, substs); + } + } + + #[instrument(skip(self, span), level = "debug")] + fn visit_adjustments(&mut self, span: Span, hir_id: hir::HirId) { + let adjustment = self.fcx.typeck_results.borrow_mut().adjustments_mut().remove(hir_id); + match adjustment { + None => { + debug!("no adjustments for node"); + } + + Some(adjustment) => { + let resolved_adjustment = self.resolve(adjustment, &span); + debug!(?resolved_adjustment); + self.typeck_results.adjustments_mut().insert(hir_id, resolved_adjustment); + } + } + } + + #[instrument(skip(self, span), level = "debug")] + fn visit_pat_adjustments(&mut self, span: Span, hir_id: hir::HirId) { + let adjustment = self.fcx.typeck_results.borrow_mut().pat_adjustments_mut().remove(hir_id); + match adjustment { + None => { + debug!("no pat_adjustments for node"); + } + + Some(adjustment) => { + let resolved_adjustment = self.resolve(adjustment, &span); + debug!(?resolved_adjustment); + self.typeck_results.pat_adjustments_mut().insert(hir_id, resolved_adjustment); + } + } + } + + fn visit_liberated_fn_sigs(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + let common_hir_owner = fcx_typeck_results.hir_owner; + + for (&local_id, &fn_sig) in fcx_typeck_results.liberated_fn_sigs().iter() { + let hir_id = hir::HirId { owner: common_hir_owner, local_id }; + let fn_sig = self.resolve(fn_sig, &hir_id); + self.typeck_results.liberated_fn_sigs_mut().insert(hir_id, fn_sig); + } + } + + fn visit_fru_field_types(&mut self) { + let fcx_typeck_results = self.fcx.typeck_results.borrow(); + assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); + let common_hir_owner = fcx_typeck_results.hir_owner; + + for (&local_id, ftys) in fcx_typeck_results.fru_field_types().iter() { + let hir_id = hir::HirId { owner: common_hir_owner, local_id }; + let ftys = self.resolve(ftys.clone(), &hir_id); + self.typeck_results.fru_field_types_mut().insert(hir_id, ftys); + } + } + + fn resolve(&mut self, x: T, span: &dyn Locatable) -> T + where + T: TypeFoldable<'tcx>, + { + let mut resolver = Resolver::new(self.fcx, span, self.body); + let x = x.fold_with(&mut resolver); + if cfg!(debug_assertions) && x.needs_infer() { + span_bug!(span.to_span(self.fcx.tcx), "writeback: `{:?}` has inference variables", x); + } + + // We may have introduced e.g. `ty::Error`, if inference failed, make sure + // to mark the `TypeckResults` as tainted in that case, so that downstream + // users of the typeck results don't produce extra errors, or worse, ICEs. + if resolver.replaced_with_error { + // FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted. + self.typeck_results.tainted_by_errors = + Some(ErrorGuaranteed::unchecked_claim_error_was_emitted()); + } + + x + } +} + +pub(crate) trait Locatable { + fn to_span(&self, tcx: TyCtxt<'_>) -> Span; +} + +impl Locatable for Span { + fn to_span(&self, _: TyCtxt<'_>) -> Span { + *self + } +} + +impl Locatable for hir::HirId { + fn to_span(&self, tcx: TyCtxt<'_>) -> Span { + tcx.hir().span(*self) + } +} + +/// The Resolver. This is the type folding engine that detects +/// unresolved types and so forth. +struct Resolver<'cx, 'tcx> { + tcx: TyCtxt<'tcx>, + infcx: &'cx InferCtxt<'cx, 'tcx>, + span: &'cx dyn Locatable, + body: &'tcx hir::Body<'tcx>, + + /// Set to `true` if any `Ty` or `ty::Const` had to be replaced with an `Error`. + replaced_with_error: bool, +} + +impl<'cx, 'tcx> Resolver<'cx, 'tcx> { + fn new( + fcx: &'cx FnCtxt<'cx, 'tcx>, + span: &'cx dyn Locatable, + body: &'tcx hir::Body<'tcx>, + ) -> Resolver<'cx, 'tcx> { + Resolver { tcx: fcx.tcx, infcx: fcx, span, body, replaced_with_error: false } + } + + fn report_type_error(&self, t: Ty<'tcx>) { + if !self.tcx.sess.has_errors().is_some() { + self.infcx + .emit_inference_failure_err( + Some(self.body.id()), + self.span.to_span(self.tcx), + t.into(), + E0282, + false, + ) + .emit(); + } + } + + fn report_const_error(&self, c: ty::Const<'tcx>) { + if self.tcx.sess.has_errors().is_none() { + self.infcx + .emit_inference_failure_err( + Some(self.body.id()), + self.span.to_span(self.tcx), + c.into(), + E0282, + false, + ) + .emit(); + } + } +} + +struct EraseEarlyRegions<'tcx> { + tcx: TyCtxt<'tcx>, +} + +impl<'tcx> TypeFolder<'tcx> for EraseEarlyRegions<'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'tcx> { + self.tcx + } + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + if ty.has_type_flags(ty::TypeFlags::HAS_FREE_REGIONS) { + ty.super_fold_with(self) + } else { + ty + } + } + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + if r.is_late_bound() { r } else { self.tcx.lifetimes.re_erased } + } +} + +impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'tcx> { + self.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + match self.infcx.fully_resolve(t) { + Ok(t) => { + // Do not anonymize late-bound regions + // (e.g. keep `for<'a>` named `for<'a>`). + // This allows NLL to generate error messages that + // refer to the higher-ranked lifetime names written by the user. + EraseEarlyRegions { tcx: self.tcx }.fold_ty(t) + } + Err(_) => { + debug!("Resolver::fold_ty: input type `{:?}` not fully resolvable", t); + self.report_type_error(t); + self.replaced_with_error = true; + self.tcx().ty_error() + } + } + } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + debug_assert!(!r.is_late_bound(), "Should not be resolving bound region."); + self.tcx.lifetimes.re_erased + } + + fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> { + match self.infcx.fully_resolve(ct) { + Ok(ct) => self.tcx.erase_regions(ct), + Err(_) => { + debug!("Resolver::fold_const: input const `{:?}` not fully resolvable", ct); + self.report_const_error(ct); + self.replaced_with_error = true; + self.tcx().const_error(ct.ty()) + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// During type check, we store promises with the result of trait +// lookup rather than the actual results (because the results are not +// necessarily available immediately). These routines unwind the +// promises. It is expected that we will have already reported any +// errors that may be encountered, so if the promises store an error, +// a dummy result is returned. diff --git a/compiler/rustc_typeck/src/check_unused.rs b/compiler/rustc_typeck/src/check_unused.rs new file mode 100644 index 000000000..4a3cfa1ca --- /dev/null +++ b/compiler/rustc_typeck/src/check_unused.rs @@ -0,0 +1,196 @@ +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::Applicability; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_middle::ty::TyCtxt; +use rustc_session::lint; +use rustc_span::{Span, Symbol}; + +pub fn check_crate(tcx: TyCtxt<'_>) { + let mut used_trait_imports: FxHashSet = FxHashSet::default(); + + for item_def_id in tcx.hir().body_owners() { + let imports = tcx.used_trait_imports(item_def_id); + debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports); + used_trait_imports.extend(imports.iter()); + } + + for &id in tcx.maybe_unused_trait_imports(()) { + debug_assert_eq!(tcx.def_kind(id), DefKind::Use); + if tcx.visibility(id).is_public() { + continue; + } + if used_trait_imports.contains(&id) { + continue; + } + let item = tcx.hir().expect_item(id); + if item.span.is_dummy() { + continue; + } + let hir::ItemKind::Use(path, _) = item.kind else { unreachable!() }; + tcx.struct_span_lint_hir(lint::builtin::UNUSED_IMPORTS, item.hir_id(), path.span, |lint| { + let msg = if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(path.span) { + format!("unused import: `{}`", snippet) + } else { + "unused import".to_owned() + }; + lint.build(&msg).emit(); + }); + } + + unused_crates_lint(tcx); +} + +fn unused_crates_lint(tcx: TyCtxt<'_>) { + let lint = lint::builtin::UNUSED_EXTERN_CRATES; + + // Collect first the crates that are completely unused. These we + // can always suggest removing (no matter which edition we are + // in). + let unused_extern_crates: FxHashMap = tcx + .maybe_unused_extern_crates(()) + .iter() + .filter(|&&(def_id, _)| { + // The `def_id` here actually was calculated during resolution (at least + // at the time of this writing) and is being shipped to us via a side + // channel of the tcx. There may have been extra expansion phases, + // however, which ended up removing the `def_id` *after* expansion. + // + // As a result we need to verify that `def_id` is indeed still valid for + // our AST and actually present in the HIR map. If it's not there then + // there's safely nothing to warn about, and otherwise we carry on with + // our execution. + // + // Note that if we carry through to the `extern_mod_stmt_cnum` query + // below it'll cause a panic because `def_id` is actually bogus at this + // point in time otherwise. + if tcx.hir().find(tcx.hir().local_def_id_to_hir_id(def_id)).is_none() { + return false; + } + true + }) + .filter(|&&(def_id, _)| { + tcx.extern_mod_stmt_cnum(def_id).map_or(true, |cnum| { + !tcx.is_compiler_builtins(cnum) + && !tcx.is_panic_runtime(cnum) + && !tcx.has_global_allocator(cnum) + && !tcx.has_panic_handler(cnum) + }) + }) + .cloned() + .collect(); + + // Collect all the extern crates (in a reliable order). + let mut crates_to_lint = vec![]; + + for id in tcx.hir().items() { + if matches!(tcx.def_kind(id.def_id), DefKind::ExternCrate) { + let item = tcx.hir().item(id); + if let hir::ItemKind::ExternCrate(orig_name) = item.kind { + crates_to_lint.push(ExternCrateToLint { + def_id: item.def_id.to_def_id(), + span: item.span, + orig_name, + warn_if_unused: !item.ident.as_str().starts_with('_'), + }); + } + } + } + + let extern_prelude = &tcx.resolutions(()).extern_prelude; + + for extern_crate in &crates_to_lint { + let def_id = extern_crate.def_id.expect_local(); + let item = tcx.hir().expect_item(def_id); + + // If the crate is fully unused, we suggest removing it altogether. + // We do this in any edition. + if extern_crate.warn_if_unused { + if let Some(&span) = unused_extern_crates.get(&def_id) { + let id = tcx.hir().local_def_id_to_hir_id(def_id); + tcx.struct_span_lint_hir(lint, id, span, |lint| { + // Removal suggestion span needs to include attributes (Issue #54400) + let span_with_attrs = tcx + .hir() + .attrs(id) + .iter() + .map(|attr| attr.span) + .fold(span, |acc, attr_span| acc.to(attr_span)); + + lint.build("unused extern crate") + .span_suggestion_short( + span_with_attrs, + "remove it", + "", + Applicability::MachineApplicable, + ) + .emit(); + }); + continue; + } + } + + // If we are not in Rust 2018 edition, then we don't make any further + // suggestions. + if !tcx.sess.rust_2018() { + continue; + } + + // If the extern crate isn't in the extern prelude, + // there is no way it can be written as a `use`. + let orig_name = extern_crate.orig_name.unwrap_or(item.ident.name); + if !extern_prelude.get(&orig_name).map_or(false, |from_item| !from_item) { + continue; + } + + // If the extern crate is renamed, then we cannot suggest replacing it with a use as this + // would not insert the new name into the prelude, where other imports in the crate may be + // expecting it. + if extern_crate.orig_name.is_some() { + continue; + } + + let id = tcx.hir().local_def_id_to_hir_id(def_id); + // If the extern crate has any attributes, they may have funky + // semantics we can't faithfully represent using `use` (most + // notably `#[macro_use]`). Ignore it. + if !tcx.hir().attrs(id).is_empty() { + continue; + } + tcx.struct_span_lint_hir(lint, id, extern_crate.span, |lint| { + // Otherwise, we can convert it into a `use` of some kind. + let base_replacement = match extern_crate.orig_name { + Some(orig_name) => format!("use {} as {};", orig_name, item.ident.name), + None => format!("use {};", item.ident.name), + }; + let vis = tcx.sess.source_map().span_to_snippet(item.vis_span).unwrap_or_default(); + let add_vis = |to| if vis.is_empty() { to } else { format!("{} {}", vis, to) }; + lint.build("`extern crate` is not idiomatic in the new edition") + .span_suggestion_short( + extern_crate.span, + &format!("convert it to a `{}`", add_vis("use".to_string())), + add_vis(base_replacement), + Applicability::MachineApplicable, + ) + .emit(); + }) + } +} + +struct ExternCrateToLint { + /// `DefId` of the extern crate + def_id: DefId, + + /// span from the item + span: Span, + + /// if `Some`, then this is renamed (`extern crate orig_name as + /// crate_name`), and -- perhaps surprisingly -- this stores the + /// *original* name (`item.name` will contain the new name) + orig_name: Option, + + /// if `false`, the original name started with `_`, so we shouldn't lint + /// about it going unused (but we should still emit idiom lints). + warn_if_unused: bool, +} diff --git a/compiler/rustc_typeck/src/coherence/builtin.rs b/compiler/rustc_typeck/src/coherence/builtin.rs new file mode 100644 index 000000000..50946cc1d --- /dev/null +++ b/compiler/rustc_typeck/src/coherence/builtin.rs @@ -0,0 +1,603 @@ +//! Check properties that are required by built-in traits and set +//! up data structures required by type-checking/codegen. + +use crate::errors::{CopyImplOnNonAdt, CopyImplOnTypeWithDtor, DropImplOnWrongItem}; +use rustc_errors::{struct_span_err, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::lang_items::LangItem; +use rustc_hir::ItemKind; +use rustc_infer::infer; +use rustc_infer::infer::outlives::env::OutlivesEnvironment; +use rustc_infer::infer::TyCtxtInferExt; +use rustc_middle::ty::adjustment::CoerceUnsizedInfo; +use rustc_middle::ty::{self, suggest_constraining_type_params, Ty, TyCtxt, TypeVisitable}; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt; +use rustc_trait_selection::traits::misc::{can_type_implement_copy, CopyImplementationError}; +use rustc_trait_selection::traits::predicate_for_trait_def; +use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt}; +use std::collections::BTreeMap; + +pub fn check_trait(tcx: TyCtxt<'_>, trait_def_id: DefId) { + let lang_items = tcx.lang_items(); + Checker { tcx, trait_def_id } + .check(lang_items.drop_trait(), visit_implementation_of_drop) + .check(lang_items.copy_trait(), visit_implementation_of_copy) + .check(lang_items.coerce_unsized_trait(), visit_implementation_of_coerce_unsized) + .check(lang_items.dispatch_from_dyn_trait(), visit_implementation_of_dispatch_from_dyn); +} + +struct Checker<'tcx> { + tcx: TyCtxt<'tcx>, + trait_def_id: DefId, +} + +impl<'tcx> Checker<'tcx> { + fn check(&self, trait_def_id: Option, mut f: F) -> &Self + where + F: FnMut(TyCtxt<'tcx>, LocalDefId), + { + if Some(self.trait_def_id) == trait_def_id { + for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) { + f(self.tcx, impl_def_id); + } + } + self + } +} + +fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) { + // Destructors only work on nominal types. + if let ty::Adt(..) | ty::Error(_) = tcx.type_of(impl_did).kind() { + return; + } + + let sp = match tcx.hir().expect_item(impl_did).kind { + ItemKind::Impl(ref impl_) => impl_.self_ty.span, + _ => bug!("expected Drop impl item"), + }; + + tcx.sess.emit_err(DropImplOnWrongItem { span: sp }); +} + +fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) { + debug!("visit_implementation_of_copy: impl_did={:?}", impl_did); + + let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did); + + let self_type = tcx.type_of(impl_did); + debug!("visit_implementation_of_copy: self_type={:?} (bound)", self_type); + + let span = tcx.hir().span(impl_hir_id); + let param_env = tcx.param_env(impl_did); + assert!(!self_type.has_escaping_bound_vars()); + + debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type); + + let cause = traits::ObligationCause::misc(span, impl_hir_id); + match can_type_implement_copy(tcx, param_env, self_type, cause) { + Ok(()) => {} + Err(CopyImplementationError::InfrigingFields(fields)) => { + let item = tcx.hir().expect_item(impl_did); + let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(ref tr), .. }) = item.kind { + tr.path.span + } else { + span + }; + + let mut err = struct_span_err!( + tcx.sess, + span, + E0204, + "the trait `Copy` may not be implemented for this type" + ); + + // We'll try to suggest constraining type parameters to fulfill the requirements of + // their `Copy` implementation. + let mut errors: BTreeMap<_, Vec<_>> = Default::default(); + let mut bounds = vec![]; + + for (field, ty) in fields { + let field_span = tcx.def_span(field.did); + let field_ty_span = match tcx.hir().get_if_local(field.did) { + Some(hir::Node::Field(field_def)) => field_def.ty.span, + _ => field_span, + }; + err.span_label(field_span, "this field does not implement `Copy`"); + // Spin up a new FulfillmentContext, so we can get the _precise_ reason + // why this field does not implement Copy. This is useful because sometimes + // it is not immediately clear why Copy is not implemented for a field, since + // all we point at is the field itself. + tcx.infer_ctxt().ignoring_regions().enter(|infcx| { + let mut fulfill_cx = >::new(tcx); + fulfill_cx.register_bound( + &infcx, + param_env, + ty, + tcx.lang_items().copy_trait().unwrap(), + traits::ObligationCause::dummy_with_span(field_ty_span), + ); + for error in fulfill_cx.select_all_or_error(&infcx) { + let error_predicate = error.obligation.predicate; + // Only note if it's not the root obligation, otherwise it's trivial and + // should be self-explanatory (i.e. a field literally doesn't implement Copy). + + // FIXME: This error could be more descriptive, especially if the error_predicate + // contains a foreign type or if it's a deeply nested type... + if error_predicate != error.root_obligation.predicate { + errors + .entry((ty.to_string(), error_predicate.to_string())) + .or_default() + .push(error.obligation.cause.span); + } + if let ty::PredicateKind::Trait(ty::TraitPredicate { + trait_ref, + polarity: ty::ImplPolarity::Positive, + .. + }) = error_predicate.kind().skip_binder() + { + let ty = trait_ref.self_ty(); + if let ty::Param(_) = ty.kind() { + bounds.push(( + format!("{ty}"), + trait_ref.print_only_trait_path().to_string(), + Some(trait_ref.def_id), + )); + } + } + } + }); + } + for ((ty, error_predicate), spans) in errors { + let span: MultiSpan = spans.into(); + err.span_note( + span, + &format!("the `Copy` impl for `{}` requires that `{}`", ty, error_predicate), + ); + } + suggest_constraining_type_params( + tcx, + tcx.hir().get_generics(impl_did).expect("impls always have generics"), + &mut err, + bounds.iter().map(|(param, constraint, def_id)| { + (param.as_str(), constraint.as_str(), *def_id) + }), + ); + err.emit(); + } + Err(CopyImplementationError::NotAnAdt) => { + let item = tcx.hir().expect_item(impl_did); + let span = + if let ItemKind::Impl(ref impl_) = item.kind { impl_.self_ty.span } else { span }; + + tcx.sess.emit_err(CopyImplOnNonAdt { span }); + } + Err(CopyImplementationError::HasDestructor) => { + tcx.sess.emit_err(CopyImplOnTypeWithDtor { span }); + } + } +} + +fn visit_implementation_of_coerce_unsized<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) { + debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", impl_did); + + // Just compute this for the side-effects, in particular reporting + // errors; other parts of the code may demand it for the info of + // course. + let span = tcx.def_span(impl_did); + tcx.at(span).coerce_unsized_info(impl_did); +} + +fn visit_implementation_of_dispatch_from_dyn<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) { + debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", impl_did); + + let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did); + let span = tcx.hir().span(impl_hir_id); + + let dispatch_from_dyn_trait = tcx.require_lang_item(LangItem::DispatchFromDyn, Some(span)); + + let source = tcx.type_of(impl_did); + assert!(!source.has_escaping_bound_vars()); + let target = { + let trait_ref = tcx.impl_trait_ref(impl_did).unwrap(); + assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait); + + trait_ref.substs.type_at(1) + }; + + debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}", source, target); + + let param_env = tcx.param_env(impl_did); + + let create_err = |msg: &str| struct_span_err!(tcx.sess, span, E0378, "{}", msg); + + tcx.infer_ctxt().enter(|infcx| { + let cause = ObligationCause::misc(span, impl_hir_id); + + use rustc_type_ir::sty::TyKind::*; + match (source.kind(), target.kind()) { + (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b)) + if infcx.at(&cause, param_env).eq(r_a, *r_b).is_ok() && mutbl_a == *mutbl_b => {} + (&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => (), + (&Adt(def_a, substs_a), &Adt(def_b, substs_b)) + if def_a.is_struct() && def_b.is_struct() => + { + if def_a != def_b { + let source_path = tcx.def_path_str(def_a.did()); + let target_path = tcx.def_path_str(def_b.did()); + + create_err(&format!( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures with the same \ + definition; expected `{}`, found `{}`", + source_path, target_path, + )) + .emit(); + + return; + } + + if def_a.repr().c() || def_a.repr().packed() { + create_err( + "structs implementing `DispatchFromDyn` may not have \ + `#[repr(packed)]` or `#[repr(C)]`", + ) + .emit(); + } + + let fields = &def_a.non_enum_variant().fields; + + let coerced_fields = fields + .iter() + .filter(|field| { + let ty_a = field.ty(tcx, substs_a); + let ty_b = field.ty(tcx, substs_b); + + if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) { + if layout.is_zst() && layout.align.abi.bytes() == 1 { + // ignore ZST fields with alignment of 1 byte + return false; + } + } + + if let Ok(ok) = infcx.at(&cause, param_env).eq(ty_a, ty_b) { + if ok.obligations.is_empty() { + create_err( + "the trait `DispatchFromDyn` may only be implemented \ + for structs containing the field being coerced, \ + ZST fields with 1 byte alignment, and nothing else", + ) + .note(&format!( + "extra field `{}` of type `{}` is not allowed", + field.name, ty_a, + )) + .emit(); + + return false; + } + } + + return true; + }) + .collect::>(); + + if coerced_fields.is_empty() { + create_err( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures with a single field \ + being coerced, none found", + ) + .emit(); + } else if coerced_fields.len() > 1 { + create_err( + "implementing the `DispatchFromDyn` trait requires multiple coercions", + ) + .note( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures with a single field \ + being coerced", + ) + .note(&format!( + "currently, {} fields need coercions: {}", + coerced_fields.len(), + coerced_fields + .iter() + .map(|field| { + format!( + "`{}` (`{}` to `{}`)", + field.name, + field.ty(tcx, substs_a), + field.ty(tcx, substs_b), + ) + }) + .collect::>() + .join(", ") + )) + .emit(); + } else { + let mut fulfill_cx = >::new(infcx.tcx); + + for field in coerced_fields { + let predicate = predicate_for_trait_def( + tcx, + param_env, + cause.clone(), + dispatch_from_dyn_trait, + 0, + field.ty(tcx, substs_a), + &[field.ty(tcx, substs_b).into()], + ); + + fulfill_cx.register_predicate_obligation(&infcx, predicate); + } + + // Check that all transitive obligations are satisfied. + let errors = fulfill_cx.select_all_or_error(&infcx); + if !errors.is_empty() { + infcx.report_fulfillment_errors(&errors, None, false); + } + + // Finally, resolve all regions. + let outlives_env = OutlivesEnvironment::new(param_env); + infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env); + } + } + _ => { + create_err( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures", + ) + .emit(); + } + } + }) +} + +pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: DefId) -> CoerceUnsizedInfo { + debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did); + + // this provider should only get invoked for local def-ids + let impl_did = impl_did.expect_local(); + let span = tcx.def_span(impl_did); + + let coerce_unsized_trait = tcx.require_lang_item(LangItem::CoerceUnsized, Some(span)); + + let unsize_trait = tcx.lang_items().require(LangItem::Unsize).unwrap_or_else(|err| { + tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err)); + }); + + let source = tcx.type_of(impl_did); + let trait_ref = tcx.impl_trait_ref(impl_did).unwrap(); + assert_eq!(trait_ref.def_id, coerce_unsized_trait); + let target = trait_ref.substs.type_at(1); + debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (bound)", source, target); + + let param_env = tcx.param_env(impl_did); + assert!(!source.has_escaping_bound_vars()); + + let err_info = CoerceUnsizedInfo { custom_kind: None }; + + debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", source, target); + + tcx.infer_ctxt().enter(|infcx| { + let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did); + let cause = ObligationCause::misc(span, impl_hir_id); + let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>, + mt_b: ty::TypeAndMut<'tcx>, + mk_ptr: &dyn Fn(Ty<'tcx>) -> Ty<'tcx>| { + if (mt_a.mutbl, mt_b.mutbl) == (hir::Mutability::Not, hir::Mutability::Mut) { + infcx + .report_mismatched_types( + &cause, + mk_ptr(mt_b.ty), + target, + ty::error::TypeError::Mutability, + ) + .emit(); + } + (mt_a.ty, mt_b.ty, unsize_trait, None) + }; + let (source, target, trait_def_id, kind) = match (source.kind(), target.kind()) { + (&ty::Ref(r_a, ty_a, mutbl_a), &ty::Ref(r_b, ty_b, mutbl_b)) => { + infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a); + let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a }; + let mt_b = ty::TypeAndMut { ty: ty_b, mutbl: mutbl_b }; + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty)) + } + + (&ty::Ref(_, ty_a, mutbl_a), &ty::RawPtr(mt_b)) => { + let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a }; + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) + } + + (&ty::RawPtr(mt_a), &ty::RawPtr(mt_b)) => { + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) + } + + (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b)) + if def_a.is_struct() && def_b.is_struct() => + { + if def_a != def_b { + let source_path = tcx.def_path_str(def_a.did()); + let target_path = tcx.def_path_str(def_b.did()); + struct_span_err!( + tcx.sess, + span, + E0377, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures with the same \ + definition; expected `{}`, found `{}`", + source_path, + target_path + ) + .emit(); + return err_info; + } + + // Here we are considering a case of converting + // `S` to S`. As an example, let's imagine a struct `Foo`, + // which acts like a pointer to `U`, but carries along some extra data of type `T`: + // + // struct Foo { + // extra: T, + // ptr: *mut U, + // } + // + // We might have an impl that allows (e.g.) `Foo` to be unsized + // to `Foo`. That impl would look like: + // + // impl, V> CoerceUnsized> for Foo {} + // + // Here `U = [i32; 3]` and `V = [i32]`. At runtime, + // when this coercion occurs, we would be changing the + // field `ptr` from a thin pointer of type `*mut [i32; + // 3]` to a fat pointer of type `*mut [i32]` (with + // extra data `3`). **The purpose of this check is to + // make sure that we know how to do this conversion.** + // + // To check if this impl is legal, we would walk down + // the fields of `Foo` and consider their types with + // both substitutes. We are looking to find that + // exactly one (non-phantom) field has changed its + // type, which we will expect to be the pointer that + // is becoming fat (we could probably generalize this + // to multiple thin pointers of the same type becoming + // fat, but we don't). In this case: + // + // - `extra` has type `T` before and type `T` after + // - `ptr` has type `*mut U` before and type `*mut V` after + // + // Since just one field changed, we would then check + // that `*mut U: CoerceUnsized<*mut V>` is implemented + // (in other words, that we know how to do this + // conversion). This will work out because `U: + // Unsize`, and we have a builtin rule that `*mut + // U` can be coerced to `*mut V` if `U: Unsize`. + let fields = &def_a.non_enum_variant().fields; + let diff_fields = fields + .iter() + .enumerate() + .filter_map(|(i, f)| { + let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b)); + + if tcx.type_of(f.did).is_phantom_data() { + // Ignore PhantomData fields + return None; + } + + // Ignore fields that aren't changed; it may + // be that we could get away with subtyping or + // something more accepting, but we use + // equality because we want to be able to + // perform this check without computing + // variance where possible. (This is because + // we may have to evaluate constraint + // expressions in the course of execution.) + // See e.g., #41936. + if let Ok(ok) = infcx.at(&cause, param_env).eq(a, b) { + if ok.obligations.is_empty() { + return None; + } + } + + // Collect up all fields that were significantly changed + // i.e., those that contain T in coerce_unsized T -> U + Some((i, a, b)) + }) + .collect::>(); + + if diff_fields.is_empty() { + struct_span_err!( + tcx.sess, + span, + E0374, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures with one field \ + being coerced, none found" + ) + .emit(); + return err_info; + } else if diff_fields.len() > 1 { + let item = tcx.hir().expect_item(impl_did); + let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(ref t), .. }) = + item.kind + { + t.path.span + } else { + tcx.def_span(impl_did) + }; + + struct_span_err!( + tcx.sess, + span, + E0375, + "implementing the trait \ + `CoerceUnsized` requires multiple \ + coercions" + ) + .note( + "`CoerceUnsized` may only be implemented for \ + a coercion between structures with one field being coerced", + ) + .note(&format!( + "currently, {} fields need coercions: {}", + diff_fields.len(), + diff_fields + .iter() + .map(|&(i, a, b)| { + format!("`{}` (`{}` to `{}`)", fields[i].name, a, b) + }) + .collect::>() + .join(", ") + )) + .span_label(span, "requires multiple coercions") + .emit(); + return err_info; + } + + let (i, a, b) = diff_fields[0]; + let kind = ty::adjustment::CustomCoerceUnsized::Struct(i); + (a, b, coerce_unsized_trait, Some(kind)) + } + + _ => { + struct_span_err!( + tcx.sess, + span, + E0376, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures" + ) + .emit(); + return err_info; + } + }; + + let mut fulfill_cx = >::new(infcx.tcx); + + // Register an obligation for `A: Trait`. + let cause = traits::ObligationCause::misc(span, impl_hir_id); + let predicate = predicate_for_trait_def( + tcx, + param_env, + cause, + trait_def_id, + 0, + source, + &[target.into()], + ); + fulfill_cx.register_predicate_obligation(&infcx, predicate); + + // Check that all transitive obligations are satisfied. + let errors = fulfill_cx.select_all_or_error(&infcx); + if !errors.is_empty() { + infcx.report_fulfillment_errors(&errors, None, false); + } + + // Finally, resolve all regions. + let outlives_env = OutlivesEnvironment::new(param_env); + infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env); + + CoerceUnsizedInfo { custom_kind: kind } + }) +} diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls.rs b/compiler/rustc_typeck/src/coherence/inherent_impls.rs new file mode 100644 index 000000000..52aad636f --- /dev/null +++ b/compiler/rustc_typeck/src/coherence/inherent_impls.rs @@ -0,0 +1,249 @@ +//! The code in this module gathers up all of the inherent impls in +//! the current crate and organizes them in a map. It winds up +//! touching the whole crate and thus must be recomputed completely +//! for any change, but it is very cheap to compute. In practice, most +//! code in the compiler never *directly* requests this map. Instead, +//! it requests the inherent impls specific to some type (via +//! `tcx.inherent_impls(def_id)`). That value, however, +//! is computed by selecting an idea from this table. + +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; +use rustc_middle::ty::fast_reject::{simplify_type, SimplifiedType, TreatParams}; +use rustc_middle::ty::{self, CrateInherentImpls, Ty, TyCtxt}; +use rustc_span::symbol::sym; +use rustc_span::Span; + +/// On-demand query: yields a map containing all types mapped to their inherent impls. +pub fn crate_inherent_impls(tcx: TyCtxt<'_>, (): ()) -> CrateInherentImpls { + let mut collect = InherentCollect { tcx, impls_map: Default::default() }; + for id in tcx.hir().items() { + collect.check_item(id); + } + collect.impls_map +} + +pub fn crate_incoherent_impls(tcx: TyCtxt<'_>, (_, simp): (CrateNum, SimplifiedType)) -> &[DefId] { + let crate_map = tcx.crate_inherent_impls(()); + tcx.arena.alloc_from_iter( + crate_map.incoherent_impls.get(&simp).unwrap_or(&Vec::new()).iter().map(|d| d.to_def_id()), + ) +} + +/// On-demand query: yields a vector of the inherent impls for a specific type. +pub fn inherent_impls(tcx: TyCtxt<'_>, ty_def_id: DefId) -> &[DefId] { + let ty_def_id = ty_def_id.expect_local(); + + let crate_map = tcx.crate_inherent_impls(()); + match crate_map.inherent_impls.get(&ty_def_id) { + Some(v) => &v[..], + None => &[], + } +} + +struct InherentCollect<'tcx> { + tcx: TyCtxt<'tcx>, + impls_map: CrateInherentImpls, +} + +const INTO_CORE: &str = "consider moving this inherent impl into `core` if possible"; +const INTO_DEFINING_CRATE: &str = + "consider moving this inherent impl into the crate defining the type if possible"; +const ADD_ATTR_TO_TY: &str = "alternatively add `#[rustc_has_incoherent_inherent_impls]` to the type \ + and `#[rustc_allow_incoherent_impl]` to the relevant impl items"; +const ADD_ATTR: &str = + "alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items"; + +impl<'tcx> InherentCollect<'tcx> { + fn check_def_id(&mut self, item: &hir::Item<'_>, self_ty: Ty<'tcx>, def_id: DefId) { + let impl_def_id = item.def_id; + if let Some(def_id) = def_id.as_local() { + // Add the implementation to the mapping from implementation to base + // type def ID, if there is a base type for this implementation and + // the implementation does not have any associated traits. + let vec = self.impls_map.inherent_impls.entry(def_id).or_default(); + vec.push(impl_def_id.to_def_id()); + return; + } + + if self.tcx.features().rustc_attrs { + let hir::ItemKind::Impl(&hir::Impl { items, .. }) = item.kind else { + bug!("expected `impl` item: {:?}", item); + }; + + if !self.tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) { + struct_span_err!( + self.tcx.sess, + item.span, + E0390, + "cannot define inherent `impl` for a type outside of the crate where the type is defined", + ) + .help(INTO_DEFINING_CRATE) + .span_help(item.span, ADD_ATTR_TO_TY) + .emit(); + return; + } + + for impl_item in items { + if !self + .tcx + .has_attr(impl_item.id.def_id.to_def_id(), sym::rustc_allow_incoherent_impl) + { + struct_span_err!( + self.tcx.sess, + item.span, + E0390, + "cannot define inherent `impl` for a type outside of the crate where the type is defined", + ) + .help(INTO_DEFINING_CRATE) + .span_help(impl_item.span, ADD_ATTR) + .emit(); + return; + } + } + + if let Some(simp) = simplify_type(self.tcx, self_ty, TreatParams::AsInfer) { + self.impls_map.incoherent_impls.entry(simp).or_default().push(impl_def_id); + } else { + bug!("unexpected self type: {:?}", self_ty); + } + } else { + struct_span_err!( + self.tcx.sess, + item.span, + E0116, + "cannot define inherent `impl` for a type outside of the crate \ + where the type is defined" + ) + .span_label(item.span, "impl for type defined outside of crate.") + .note("define and implement a trait or new type instead") + .emit(); + } + } + + fn check_primitive_impl( + &mut self, + impl_def_id: LocalDefId, + ty: Ty<'tcx>, + items: &[hir::ImplItemRef], + span: Span, + ) { + if !self.tcx.hir().rustc_coherence_is_core() { + if self.tcx.features().rustc_attrs { + for item in items { + if !self + .tcx + .has_attr(item.id.def_id.to_def_id(), sym::rustc_allow_incoherent_impl) + { + struct_span_err!( + self.tcx.sess, + span, + E0390, + "cannot define inherent `impl` for primitive types outside of `core`", + ) + .help(INTO_CORE) + .span_help(item.span, ADD_ATTR) + .emit(); + return; + } + } + } else { + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0390, + "cannot define inherent `impl` for primitive types", + ); + err.help("consider using an extension trait instead"); + if let ty::Ref(_, subty, _) = ty.kind() { + err.note(&format!( + "you could also try moving the reference to \ + uses of `{}` (such as `self`) within the implementation", + subty + )); + } + err.emit(); + return; + } + } + + if let Some(simp) = simplify_type(self.tcx, ty, TreatParams::AsInfer) { + self.impls_map.incoherent_impls.entry(simp).or_default().push(impl_def_id); + } else { + bug!("unexpected primitive type: {:?}", ty); + } + } + + fn check_item(&mut self, id: hir::ItemId) { + if !matches!(self.tcx.def_kind(id.def_id), DefKind::Impl) { + return; + } + + let item = self.tcx.hir().item(id); + let hir::ItemKind::Impl(hir::Impl { of_trait: None, self_ty: ty, ref items, .. }) = item.kind else { + return; + }; + + let self_ty = self.tcx.type_of(item.def_id); + match *self_ty.kind() { + ty::Adt(def, _) => { + self.check_def_id(item, self_ty, def.did()); + } + ty::Foreign(did) => { + self.check_def_id(item, self_ty, did); + } + ty::Dynamic(data, ..) if data.principal_def_id().is_some() => { + self.check_def_id(item, self_ty, data.principal_def_id().unwrap()); + } + ty::Dynamic(..) => { + struct_span_err!( + self.tcx.sess, + ty.span, + E0785, + "cannot define inherent `impl` for a dyn auto trait" + ) + .span_label(ty.span, "impl requires at least one non-auto trait") + .note("define and implement a new trait or type instead") + .emit(); + } + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Str + | ty::Array(..) + | ty::Slice(_) + | ty::RawPtr(_) + | ty::Ref(..) + | ty::Never + | ty::FnPtr(_) + | ty::Tuple(..) => self.check_primitive_impl(item.def_id, self_ty, items, ty.span), + ty::Projection(..) | ty::Opaque(..) | ty::Param(_) => { + let mut err = struct_span_err!( + self.tcx.sess, + ty.span, + E0118, + "no nominal type found for inherent implementation" + ); + + err.span_label(ty.span, "impl requires a nominal type") + .note("either implement a trait on it or create a newtype to wrap it instead"); + + err.emit(); + } + ty::FnDef(..) + | ty::Closure(..) + | ty::Generator(..) + | ty::GeneratorWitness(..) + | ty::Bound(..) + | ty::Placeholder(_) + | ty::Infer(_) => { + bug!("unexpected impl self type of impl: {:?} {:?}", item.def_id, self_ty); + } + ty::Error(_) => {} + } + } +} diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs new file mode 100644 index 000000000..03e076bf5 --- /dev/null +++ b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs @@ -0,0 +1,307 @@ +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::DefId; +use rustc_index::vec::IndexVec; +use rustc_middle::traits::specialization_graph::OverlapMode; +use rustc_middle::ty::{self, TyCtxt}; +use rustc_span::Symbol; +use rustc_trait_selection::traits::{self, SkipLeakCheck}; +use smallvec::SmallVec; +use std::collections::hash_map::Entry; + +pub fn crate_inherent_impls_overlap_check(tcx: TyCtxt<'_>, (): ()) { + let mut inherent_overlap_checker = InherentOverlapChecker { tcx }; + for id in tcx.hir().items() { + inherent_overlap_checker.check_item(id); + } +} + +struct InherentOverlapChecker<'tcx> { + tcx: TyCtxt<'tcx>, +} + +impl<'tcx> InherentOverlapChecker<'tcx> { + /// Checks whether any associated items in impls 1 and 2 share the same identifier and + /// namespace. + fn impls_have_common_items( + &self, + impl_items1: &ty::AssocItems<'_>, + impl_items2: &ty::AssocItems<'_>, + ) -> bool { + let mut impl_items1 = &impl_items1; + let mut impl_items2 = &impl_items2; + + // Performance optimization: iterate over the smaller list + if impl_items1.len() > impl_items2.len() { + std::mem::swap(&mut impl_items1, &mut impl_items2); + } + + for item1 in impl_items1.in_definition_order() { + let collision = impl_items2 + .filter_by_name_unhygienic(item1.name) + .any(|item2| self.compare_hygienically(item1, item2)); + + if collision { + return true; + } + } + + false + } + + fn compare_hygienically(&self, item1: &ty::AssocItem, item2: &ty::AssocItem) -> bool { + // Symbols and namespace match, compare hygienically. + item1.kind.namespace() == item2.kind.namespace() + && item1.ident(self.tcx).normalize_to_macros_2_0() + == item2.ident(self.tcx).normalize_to_macros_2_0() + } + + fn check_for_common_items_in_impls( + &self, + impl1: DefId, + impl2: DefId, + overlap: traits::OverlapResult<'_>, + ) { + let impl_items1 = self.tcx.associated_items(impl1); + let impl_items2 = self.tcx.associated_items(impl2); + + for item1 in impl_items1.in_definition_order() { + let collision = impl_items2 + .filter_by_name_unhygienic(item1.name) + .find(|item2| self.compare_hygienically(item1, item2)); + + if let Some(item2) = collision { + let name = item1.ident(self.tcx).normalize_to_macros_2_0(); + let mut err = struct_span_err!( + self.tcx.sess, + self.tcx.def_span(item1.def_id), + E0592, + "duplicate definitions with name `{}`", + name + ); + err.span_label( + self.tcx.def_span(item1.def_id), + format!("duplicate definitions for `{}`", name), + ); + err.span_label( + self.tcx.def_span(item2.def_id), + format!("other definition for `{}`", name), + ); + + for cause in &overlap.intercrate_ambiguity_causes { + cause.add_intercrate_ambiguity_hint(&mut err); + } + + if overlap.involves_placeholder { + traits::add_placeholder_note(&mut err); + } + + err.emit(); + } + } + } + + fn check_for_overlapping_inherent_impls( + &self, + overlap_mode: OverlapMode, + impl1_def_id: DefId, + impl2_def_id: DefId, + ) { + traits::overlapping_impls( + self.tcx, + impl1_def_id, + impl2_def_id, + // We go ahead and just skip the leak check for + // inherent impls without warning. + SkipLeakCheck::Yes, + overlap_mode, + |overlap| { + self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id, overlap); + false + }, + || true, + ); + } + + fn check_item(&mut self, id: hir::ItemId) { + let def_kind = self.tcx.def_kind(id.def_id); + if !matches!(def_kind, DefKind::Enum | DefKind::Struct | DefKind::Trait | DefKind::Union) { + return; + } + + let impls = self.tcx.inherent_impls(id.def_id); + + // If there is only one inherent impl block, + // there is nothing to overlap check it with + if impls.len() <= 1 { + return; + } + + let overlap_mode = OverlapMode::get(self.tcx, id.def_id.to_def_id()); + + let impls_items = impls + .iter() + .map(|impl_def_id| (impl_def_id, self.tcx.associated_items(*impl_def_id))) + .collect::>(); + + // Perform a O(n^2) algorithm for small n, + // otherwise switch to an allocating algorithm with + // faster asymptotic runtime. + const ALLOCATING_ALGO_THRESHOLD: usize = 500; + if impls.len() < ALLOCATING_ALGO_THRESHOLD { + for (i, &(&impl1_def_id, impl_items1)) in impls_items.iter().enumerate() { + for &(&impl2_def_id, impl_items2) in &impls_items[(i + 1)..] { + if self.impls_have_common_items(impl_items1, impl_items2) { + self.check_for_overlapping_inherent_impls( + overlap_mode, + impl1_def_id, + impl2_def_id, + ); + } + } + } + } else { + // Build a set of connected regions of impl blocks. + // Two impl blocks are regarded as connected if they share + // an item with the same unhygienic identifier. + // After we have assembled the connected regions, + // run the O(n^2) algorithm on each connected region. + // This is advantageous to running the algorithm over the + // entire graph when there are many connected regions. + + rustc_index::newtype_index! { + pub struct RegionId { + ENCODABLE = custom + } + } + struct ConnectedRegion { + idents: SmallVec<[Symbol; 8]>, + impl_blocks: FxHashSet, + } + let mut connected_regions: IndexVec = Default::default(); + // Reverse map from the Symbol to the connected region id. + let mut connected_region_ids = FxHashMap::default(); + + for (i, &(&_impl_def_id, impl_items)) in impls_items.iter().enumerate() { + if impl_items.len() == 0 { + continue; + } + // First obtain a list of existing connected region ids + let mut idents_to_add = SmallVec::<[Symbol; 8]>::new(); + let mut ids = impl_items + .in_definition_order() + .filter_map(|item| { + let entry = connected_region_ids.entry(item.name); + if let Entry::Occupied(e) = &entry { + Some(*e.get()) + } else { + idents_to_add.push(item.name); + None + } + }) + .collect::>(); + // Sort the id list so that the algorithm is deterministic + ids.sort_unstable(); + ids.dedup(); + let ids = ids; + match &ids[..] { + // Create a new connected region + [] => { + let id_to_set = connected_regions.next_index(); + // Update the connected region ids + for ident in &idents_to_add { + connected_region_ids.insert(*ident, id_to_set); + } + connected_regions.insert( + id_to_set, + ConnectedRegion { + idents: idents_to_add, + impl_blocks: std::iter::once(i).collect(), + }, + ); + } + // Take the only id inside the list + &[id_to_set] => { + let region = connected_regions[id_to_set].as_mut().unwrap(); + region.impl_blocks.insert(i); + region.idents.extend_from_slice(&idents_to_add); + // Update the connected region ids + for ident in &idents_to_add { + connected_region_ids.insert(*ident, id_to_set); + } + } + // We have multiple connected regions to merge. + // In the worst case this might add impl blocks + // one by one and can thus be O(n^2) in the size + // of the resulting final connected region, but + // this is no issue as the final step to check + // for overlaps runs in O(n^2) as well. + &[id_to_set, ..] => { + let mut region = connected_regions.remove(id_to_set).unwrap(); + region.impl_blocks.insert(i); + region.idents.extend_from_slice(&idents_to_add); + // Update the connected region ids + for ident in &idents_to_add { + connected_region_ids.insert(*ident, id_to_set); + } + + // Remove other regions from ids. + for &id in ids.iter() { + if id == id_to_set { + continue; + } + let r = connected_regions.remove(id).unwrap(); + for ident in r.idents.iter() { + connected_region_ids.insert(*ident, id_to_set); + } + region.idents.extend_from_slice(&r.idents); + region.impl_blocks.extend(r.impl_blocks); + } + + connected_regions.insert(id_to_set, region); + } + } + } + + debug!( + "churning through {} components (sum={}, avg={}, var={}, max={})", + connected_regions.len(), + impls.len(), + impls.len() / connected_regions.len(), + { + let avg = impls.len() / connected_regions.len(); + let s = connected_regions + .iter() + .flatten() + .map(|r| r.impl_blocks.len() as isize - avg as isize) + .map(|v| v.abs() as usize) + .sum::(); + s / connected_regions.len() + }, + connected_regions.iter().flatten().map(|r| r.impl_blocks.len()).max().unwrap() + ); + // List of connected regions is built. Now, run the overlap check + // for each pair of impl blocks in the same connected region. + for region in connected_regions.into_iter().flatten() { + let mut impl_blocks = + region.impl_blocks.into_iter().collect::>(); + impl_blocks.sort_unstable(); + for (i, &impl1_items_idx) in impl_blocks.iter().enumerate() { + let &(&impl1_def_id, impl_items1) = &impls_items[impl1_items_idx]; + for &impl2_items_idx in impl_blocks[(i + 1)..].iter() { + let &(&impl2_def_id, impl_items2) = &impls_items[impl2_items_idx]; + if self.impls_have_common_items(impl_items1, impl_items2) { + self.check_for_overlapping_inherent_impls( + overlap_mode, + impl1_def_id, + impl2_def_id, + ); + } + } + } + } + } + } +} diff --git a/compiler/rustc_typeck/src/coherence/mod.rs b/compiler/rustc_typeck/src/coherence/mod.rs new file mode 100644 index 000000000..ae9ebe590 --- /dev/null +++ b/compiler/rustc_typeck/src/coherence/mod.rs @@ -0,0 +1,237 @@ +// Coherence phase +// +// The job of the coherence phase of typechecking is to ensure that +// each trait has at most one implementation for each type. This is +// done by the orphan and overlap modules. Then we build up various +// mappings. That mapping code resides here. + +use rustc_errors::struct_span_err; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::{self, TyCtxt, TypeVisitable}; +use rustc_trait_selection::traits; + +mod builtin; +mod inherent_impls; +mod inherent_impls_overlap; +mod orphan; +mod unsafety; + +fn check_impl(tcx: TyCtxt<'_>, impl_def_id: LocalDefId, trait_ref: ty::TraitRef<'_>) { + debug!( + "(checking implementation) adding impl for trait '{:?}', item '{}'", + trait_ref, + tcx.def_path_str(impl_def_id.to_def_id()) + ); + + // Skip impls where one of the self type is an error type. + // This occurs with e.g., resolve failures (#30589). + if trait_ref.references_error() { + return; + } + + enforce_trait_manually_implementable(tcx, impl_def_id, trait_ref.def_id); + enforce_empty_impls_for_marker_traits(tcx, impl_def_id, trait_ref.def_id); +} + +fn enforce_trait_manually_implementable( + tcx: TyCtxt<'_>, + impl_def_id: LocalDefId, + trait_def_id: DefId, +) { + let did = Some(trait_def_id); + let li = tcx.lang_items(); + let impl_header_span = tcx.def_span(impl_def_id); + + // Disallow *all* explicit impls of `Pointee`, `DiscriminantKind`, `Sized` and `Unsize` for now. + if did == li.pointee_trait() { + struct_span_err!( + tcx.sess, + impl_header_span, + E0322, + "explicit impls for the `Pointee` trait are not permitted" + ) + .span_label(impl_header_span, "impl of `Pointee` not allowed") + .emit(); + return; + } + + if did == li.discriminant_kind_trait() { + struct_span_err!( + tcx.sess, + impl_header_span, + E0322, + "explicit impls for the `DiscriminantKind` trait are not permitted" + ) + .span_label(impl_header_span, "impl of `DiscriminantKind` not allowed") + .emit(); + return; + } + + if did == li.sized_trait() { + struct_span_err!( + tcx.sess, + impl_header_span, + E0322, + "explicit impls for the `Sized` trait are not permitted" + ) + .span_label(impl_header_span, "impl of `Sized` not allowed") + .emit(); + return; + } + + if did == li.unsize_trait() { + struct_span_err!( + tcx.sess, + impl_header_span, + E0328, + "explicit impls for the `Unsize` trait are not permitted" + ) + .span_label(impl_header_span, "impl of `Unsize` not allowed") + .emit(); + return; + } + + if tcx.features().unboxed_closures { + // the feature gate allows all Fn traits + return; + } + + if let ty::trait_def::TraitSpecializationKind::AlwaysApplicable = + tcx.trait_def(trait_def_id).specialization_kind + { + if !tcx.features().specialization && !tcx.features().min_specialization { + tcx.sess + .struct_span_err( + impl_header_span, + "implementing `rustc_specialization_trait` traits is unstable", + ) + .help("add `#![feature(min_specialization)]` to the crate attributes to enable") + .emit(); + return; + } + } +} + +/// We allow impls of marker traits to overlap, so they can't override impls +/// as that could make it ambiguous which associated item to use. +fn enforce_empty_impls_for_marker_traits( + tcx: TyCtxt<'_>, + impl_def_id: LocalDefId, + trait_def_id: DefId, +) { + if !tcx.trait_def(trait_def_id).is_marker { + return; + } + + if tcx.associated_item_def_ids(trait_def_id).is_empty() { + return; + } + + struct_span_err!( + tcx.sess, + tcx.def_span(impl_def_id), + E0715, + "impls for marker traits cannot contain items" + ) + .emit(); +} + +pub fn provide(providers: &mut Providers) { + use self::builtin::coerce_unsized_info; + use self::inherent_impls::{crate_incoherent_impls, crate_inherent_impls, inherent_impls}; + use self::inherent_impls_overlap::crate_inherent_impls_overlap_check; + use self::orphan::orphan_check_impl; + + *providers = Providers { + coherent_trait, + crate_inherent_impls, + crate_incoherent_impls, + inherent_impls, + crate_inherent_impls_overlap_check, + coerce_unsized_info, + orphan_check_impl, + ..*providers + }; +} + +fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) { + // Trigger building the specialization graph for the trait. This will detect and report any + // overlap errors. + tcx.ensure().specialization_graph_of(def_id); + + let impls = tcx.hir().trait_impls(def_id); + for &impl_def_id in impls { + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + + check_impl(tcx, impl_def_id, trait_ref); + check_object_overlap(tcx, impl_def_id, trait_ref); + + tcx.sess.time("unsafety_checking", || unsafety::check_item(tcx, impl_def_id)); + tcx.sess.time("orphan_checking", || tcx.ensure().orphan_check_impl(impl_def_id)); + } + + builtin::check_trait(tcx, def_id); +} + +/// Checks whether an impl overlaps with the automatic `impl Trait for dyn Trait`. +fn check_object_overlap<'tcx>( + tcx: TyCtxt<'tcx>, + impl_def_id: LocalDefId, + trait_ref: ty::TraitRef<'tcx>, +) { + let trait_def_id = trait_ref.def_id; + + if trait_ref.references_error() { + debug!("coherence: skipping impl {:?} with error {:?}", impl_def_id, trait_ref); + return; + } + + // check for overlap with the automatic `impl Trait for dyn Trait` + if let ty::Dynamic(data, ..) = trait_ref.self_ty().kind() { + // This is something like impl Trait1 for Trait2. Illegal + // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe. + + let component_def_ids = data.iter().flat_map(|predicate| { + match predicate.skip_binder() { + ty::ExistentialPredicate::Trait(tr) => Some(tr.def_id), + ty::ExistentialPredicate::AutoTrait(def_id) => Some(def_id), + // An associated type projection necessarily comes with + // an additional `Trait` requirement. + ty::ExistentialPredicate::Projection(..) => None, + } + }); + + for component_def_id in component_def_ids { + if !tcx.is_object_safe(component_def_id) { + // Without the 'object_safe_for_dispatch' feature this is an error + // which will be reported by wfcheck. Ignore it here. + // This is tested by `coherence-impl-trait-for-trait-object-safe.rs`. + // With the feature enabled, the trait is not implemented automatically, + // so this is valid. + } else { + let mut supertrait_def_ids = traits::supertrait_def_ids(tcx, component_def_id); + if supertrait_def_ids.any(|d| d == trait_def_id) { + let span = tcx.def_span(impl_def_id); + struct_span_err!( + tcx.sess, + span, + E0371, + "the object type `{}` automatically implements the trait `{}`", + trait_ref.self_ty(), + tcx.def_path_str(trait_def_id) + ) + .span_label( + span, + format!( + "`{}` automatically implements trait `{}`", + trait_ref.self_ty(), + tcx.def_path_str(trait_def_id) + ), + ) + .emit(); + } + } + } + } +} diff --git a/compiler/rustc_typeck/src/coherence/orphan.rs b/compiler/rustc_typeck/src/coherence/orphan.rs new file mode 100644 index 000000000..1608550aa --- /dev/null +++ b/compiler/rustc_typeck/src/coherence/orphan.rs @@ -0,0 +1,507 @@ +//! Orphan checker: every impl either implements a trait defined in this +//! crate or pertains to a type defined in this crate. + +use rustc_data_structures::fx::FxHashSet; +use rustc_errors::struct_span_err; +use rustc_errors::{Diagnostic, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_infer::infer::TyCtxtInferExt; +use rustc_middle::ty::subst::GenericArgKind; +use rustc_middle::ty::subst::InternalSubsts; +use rustc_middle::ty::util::IgnoreRegions; +use rustc_middle::ty::{ + self, ImplPolarity, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, +}; +use rustc_session::lint; +use rustc_span::def_id::{DefId, LocalDefId}; +use rustc_span::Span; +use rustc_trait_selection::traits; +use std::ops::ControlFlow; + +#[instrument(skip(tcx), level = "debug")] +pub(crate) fn orphan_check_impl( + tcx: TyCtxt<'_>, + impl_def_id: LocalDefId, +) -> Result<(), ErrorGuaranteed> { + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + if let Some(err) = trait_ref.error_reported() { + return Err(err); + } + + let ret = do_orphan_check_impl(tcx, trait_ref, impl_def_id); + if tcx.trait_is_auto(trait_ref.def_id) { + lint_auto_trait_impl(tcx, trait_ref, impl_def_id); + } + + ret +} + +fn do_orphan_check_impl<'tcx>( + tcx: TyCtxt<'tcx>, + trait_ref: ty::TraitRef<'tcx>, + def_id: LocalDefId, +) -> Result<(), ErrorGuaranteed> { + let trait_def_id = trait_ref.def_id; + + let item = tcx.hir().item(hir::ItemId { def_id }); + let hir::ItemKind::Impl(ref impl_) = item.kind else { + bug!("{:?} is not an impl: {:?}", def_id, item); + }; + let sp = tcx.def_span(def_id); + let tr = impl_.of_trait.as_ref().unwrap(); + + // Ensure no opaque types are present in this impl header. See issues #76202 and #86411 for examples, + // and #84660 where it would otherwise allow unsoundness. + if trait_ref.has_opaque_types() { + trace!("{:#?}", item); + // First we find the opaque type in question. + for ty in trait_ref.substs { + for ty in ty.walk() { + let ty::subst::GenericArgKind::Type(ty) = ty.unpack() else { continue }; + let ty::Opaque(def_id, _) = *ty.kind() else { continue }; + trace!(?def_id); + + // Then we search for mentions of the opaque type's type alias in the HIR + struct SpanFinder<'tcx> { + sp: Span, + def_id: DefId, + tcx: TyCtxt<'tcx>, + } + impl<'v, 'tcx> hir::intravisit::Visitor<'v> for SpanFinder<'tcx> { + #[instrument(level = "trace", skip(self, _id))] + fn visit_path(&mut self, path: &'v hir::Path<'v>, _id: hir::HirId) { + // You can't mention an opaque type directly, so we look for type aliases + if let hir::def::Res::Def(hir::def::DefKind::TyAlias, def_id) = path.res { + // And check if that type alias's type contains the opaque type we're looking for + for arg in self.tcx.type_of(def_id).walk() { + if let GenericArgKind::Type(ty) = arg.unpack() { + if let ty::Opaque(def_id, _) = *ty.kind() { + if def_id == self.def_id { + // Finally we update the span to the mention of the type alias + self.sp = path.span; + return; + } + } + } + } + } + hir::intravisit::walk_path(self, path) + } + } + + let mut visitor = SpanFinder { sp, def_id, tcx }; + hir::intravisit::walk_item(&mut visitor, item); + let reported = tcx + .sess + .struct_span_err(visitor.sp, "cannot implement trait on type alias impl trait") + .span_note(tcx.def_span(def_id), "type alias impl trait defined here") + .emit(); + return Err(reported); + } + } + span_bug!(sp, "opaque type not found, but `has_opaque_types` is set") + } + + match traits::orphan_check(tcx, item.def_id.to_def_id()) { + Ok(()) => {} + Err(err) => emit_orphan_check_error( + tcx, + sp, + item.span, + tr.path.span, + trait_ref.self_ty(), + impl_.self_ty.span, + &impl_.generics, + err, + )?, + } + + // In addition to the above rules, we restrict impls of auto traits + // so that they can only be implemented on nominal types, such as structs, + // enums or foreign types. To see why this restriction exists, consider the + // following example (#22978). Imagine that crate A defines an auto trait + // `Foo` and a fn that operates on pairs of types: + // + // ``` + // // Crate A + // auto trait Foo { } + // fn two_foos(..) { + // one_foo::<(A,B)>(..) + // } + // fn one_foo(..) { .. } + // ``` + // + // This type-checks fine; in particular the fn + // `two_foos` is able to conclude that `(A,B):Foo` + // because `A:Foo` and `B:Foo`. + // + // Now imagine that crate B comes along and does the following: + // + // ``` + // struct A { } + // struct B { } + // impl Foo for A { } + // impl Foo for B { } + // impl !Send for (A, B) { } + // ``` + // + // This final impl is legal according to the orphan + // rules, but it invalidates the reasoning from + // `two_foos` above. + debug!( + "trait_ref={:?} trait_def_id={:?} trait_is_auto={}", + trait_ref, + trait_def_id, + tcx.trait_is_auto(trait_def_id) + ); + + if tcx.trait_is_auto(trait_def_id) && !trait_def_id.is_local() { + let self_ty = trait_ref.self_ty(); + let opt_self_def_id = match *self_ty.kind() { + ty::Adt(self_def, _) => Some(self_def.did()), + ty::Foreign(did) => Some(did), + _ => None, + }; + + let msg = match opt_self_def_id { + // We only want to permit nominal types, but not *all* nominal types. + // They must be local to the current crate, so that people + // can't do `unsafe impl Send for Rc` or + // `impl !Send for Box`. + Some(self_def_id) => { + if self_def_id.is_local() { + None + } else { + Some(( + format!( + "cross-crate traits with a default impl, like `{}`, \ + can only be implemented for a struct/enum type \ + defined in the current crate", + tcx.def_path_str(trait_def_id) + ), + "can't implement cross-crate trait for type in another crate", + )) + } + } + _ => Some(( + format!( + "cross-crate traits with a default impl, like `{}`, can \ + only be implemented for a struct/enum type, not `{}`", + tcx.def_path_str(trait_def_id), + self_ty + ), + "can't implement cross-crate trait with a default impl for \ + non-struct/enum type", + )), + }; + + if let Some((msg, label)) = msg { + let reported = + struct_span_err!(tcx.sess, sp, E0321, "{}", msg).span_label(sp, label).emit(); + return Err(reported); + } + } + + Ok(()) +} + +fn emit_orphan_check_error<'tcx>( + tcx: TyCtxt<'tcx>, + sp: Span, + full_impl_span: Span, + trait_span: Span, + self_ty: Ty<'tcx>, + self_ty_span: Span, + generics: &hir::Generics<'tcx>, + err: traits::OrphanCheckErr<'tcx>, +) -> Result { + Err(match err { + traits::OrphanCheckErr::NonLocalInputType(tys) => { + let msg = match self_ty.kind() { + ty::Adt(..) => "can be implemented for types defined outside of the crate", + _ if self_ty.is_primitive() => "can be implemented for primitive types", + _ => "can be implemented for arbitrary types", + }; + let mut err = struct_span_err!( + tcx.sess, + sp, + E0117, + "only traits defined in the current crate {msg}" + ); + err.span_label(sp, "impl doesn't use only types from inside the current crate"); + for (ty, is_target_ty) in &tys { + let mut ty = *ty; + tcx.infer_ctxt().enter(|infcx| { + // Remove the lifetimes unnecessary for this error. + ty = infcx.freshen(ty); + }); + ty = match ty.kind() { + // Remove the type arguments from the output, as they are not relevant. + // You can think of this as the reverse of `resolve_vars_if_possible`. + // That way if we had `Vec`, we will properly attribute the + // problem to `Vec` and avoid confusing the user if they were to see + // `MyType` in the error. + ty::Adt(def, _) => tcx.mk_adt(*def, ty::List::empty()), + _ => ty, + }; + let this = "this".to_string(); + let (ty, postfix) = match &ty.kind() { + ty::Slice(_) => (this, " because slices are always foreign"), + ty::Array(..) => (this, " because arrays are always foreign"), + ty::Tuple(..) => (this, " because tuples are always foreign"), + ty::RawPtr(ptr_ty) => { + emit_newtype_suggestion_for_raw_ptr( + full_impl_span, + self_ty, + self_ty_span, + ptr_ty, + &mut err, + ); + + (format!("`{}`", ty), " because raw pointers are always foreign") + } + _ => (format!("`{}`", ty), ""), + }; + + let msg = format!("{} is not defined in the current crate{}", ty, postfix); + if *is_target_ty { + // Point at `D` in `impl for C in D` + err.span_label(self_ty_span, &msg); + } else { + // Point at `C` in `impl for C in D` + err.span_label(trait_span, &msg); + } + } + err.note("define and implement a trait or new type instead"); + err.emit() + } + traits::OrphanCheckErr::UncoveredTy(param_ty, local_type) => { + let mut sp = sp; + for param in generics.params { + if param.name.ident().to_string() == param_ty.to_string() { + sp = param.span; + } + } + + match local_type { + Some(local_type) => struct_span_err!( + tcx.sess, + sp, + E0210, + "type parameter `{}` must be covered by another type \ + when it appears before the first local type (`{}`)", + param_ty, + local_type + ) + .span_label( + sp, + format!( + "type parameter `{}` must be covered by another type \ + when it appears before the first local type (`{}`)", + param_ty, local_type + ), + ) + .note( + "implementing a foreign trait is only possible if at \ + least one of the types for which it is implemented is local, \ + and no uncovered type parameters appear before that first \ + local type", + ) + .note( + "in this case, 'before' refers to the following order: \ + `impl<..> ForeignTrait for T0`, \ + where `T0` is the first and `Tn` is the last", + ) + .emit(), + None => struct_span_err!( + tcx.sess, + sp, + E0210, + "type parameter `{}` must be used as the type parameter for some \ + local type (e.g., `MyStruct<{}>`)", + param_ty, + param_ty + ) + .span_label( + sp, + format!( + "type parameter `{}` must be used as the type parameter for some \ + local type", + param_ty, + ), + ) + .note( + "implementing a foreign trait is only possible if at \ + least one of the types for which it is implemented is local", + ) + .note( + "only traits defined in the current crate can be \ + implemented for a type parameter", + ) + .emit(), + } + } + }) +} + +fn emit_newtype_suggestion_for_raw_ptr( + full_impl_span: Span, + self_ty: Ty<'_>, + self_ty_span: Span, + ptr_ty: &ty::TypeAndMut<'_>, + diag: &mut Diagnostic, +) { + if !self_ty.needs_subst() { + let mut_key = if ptr_ty.mutbl == rustc_middle::mir::Mutability::Mut { "mut " } else { "" }; + let msg_sugg = "consider introducing a new wrapper type".to_owned(); + let sugg = vec![ + ( + full_impl_span.shrink_to_lo(), + format!("struct WrapperType(*{}{});\n\n", mut_key, ptr_ty.ty), + ), + (self_ty_span, "WrapperType".to_owned()), + ]; + diag.multipart_suggestion(msg_sugg, sugg, rustc_errors::Applicability::MaybeIncorrect); + } +} + +/// Lint impls of auto traits if they are likely to have +/// unsound or surprising effects on auto impls. +fn lint_auto_trait_impl<'tcx>( + tcx: TyCtxt<'tcx>, + trait_ref: ty::TraitRef<'tcx>, + impl_def_id: LocalDefId, +) { + if tcx.impl_polarity(impl_def_id) != ImplPolarity::Positive { + return; + } + + assert_eq!(trait_ref.substs.len(), 1); + let self_ty = trait_ref.self_ty(); + let (self_type_did, substs) = match self_ty.kind() { + ty::Adt(def, substs) => (def.did(), substs), + _ => { + // FIXME: should also lint for stuff like `&i32` but + // considering that auto traits are unstable, that + // isn't too important for now as this only affects + // crates using `nightly`, and std. + return; + } + }; + + // Impls which completely cover a given root type are fine as they + // disable auto impls entirely. So only lint if the substs + // are not a permutation of the identity substs. + let Err(arg) = tcx.uses_unique_generic_params(substs, IgnoreRegions::Yes) else { + // ok + return; + }; + + // Ideally: + // + // - compute the requirements for the auto impl candidate + // - check whether these are implied by the non covering impls + // - if not, emit the lint + // + // What we do here is a bit simpler: + // + // - badly check if an auto impl candidate definitely does not apply + // for the given simplified type + // - if so, do not lint + if fast_reject_auto_impl(tcx, trait_ref.def_id, self_ty) { + // ok + return; + } + + tcx.struct_span_lint_hir( + lint::builtin::SUSPICIOUS_AUTO_TRAIT_IMPLS, + tcx.hir().local_def_id_to_hir_id(impl_def_id), + tcx.def_span(impl_def_id), + |err| { + let item_span = tcx.def_span(self_type_did); + let self_descr = tcx.def_kind(self_type_did).descr(self_type_did); + let mut err = err.build(&format!( + "cross-crate traits with a default impl, like `{}`, \ + should not be specialized", + tcx.def_path_str(trait_ref.def_id), + )); + match arg { + ty::util::NotUniqueParam::DuplicateParam(arg) => { + err.note(&format!("`{}` is mentioned multiple times", arg)); + } + ty::util::NotUniqueParam::NotParam(arg) => { + err.note(&format!("`{}` is not a generic parameter", arg)); + } + } + err.span_note( + item_span, + &format!( + "try using the same sequence of generic parameters as the {} definition", + self_descr, + ), + ); + err.emit(); + }, + ); +} + +fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty: Ty<'tcx>) -> bool { + struct DisableAutoTraitVisitor<'tcx> { + tcx: TyCtxt<'tcx>, + trait_def_id: DefId, + self_ty_root: Ty<'tcx>, + seen: FxHashSet, + } + + impl<'tcx> TypeVisitor<'tcx> for DisableAutoTraitVisitor<'tcx> { + type BreakTy = (); + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + let tcx = self.tcx; + if t != self.self_ty_root { + for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, t) { + match tcx.impl_polarity(impl_def_id) { + ImplPolarity::Negative => return ControlFlow::BREAK, + ImplPolarity::Reservation => {} + // FIXME(@lcnr): That's probably not good enough, idk + // + // We might just want to take the rustdoc code and somehow avoid + // explicit impls for `Self`. + ImplPolarity::Positive => return ControlFlow::CONTINUE, + } + } + } + + match t.kind() { + ty::Adt(def, substs) if def.is_phantom_data() => substs.visit_with(self), + ty::Adt(def, substs) => { + // @lcnr: This is the only place where cycles can happen. We avoid this + // by only visiting each `DefId` once. + // + // This will be is incorrect in subtle cases, but I don't care :) + if self.seen.insert(def.did()) { + for ty in def.all_fields().map(|field| field.ty(tcx, substs)) { + ty.visit_with(self)?; + } + } + + ControlFlow::CONTINUE + } + _ => t.super_visit_with(self), + } + } + } + + let self_ty_root = match self_ty.kind() { + ty::Adt(def, _) => tcx.mk_adt(*def, InternalSubsts::identity_for_item(tcx, def.did())), + _ => unimplemented!("unexpected self ty {:?}", self_ty), + }; + + self_ty_root + .visit_with(&mut DisableAutoTraitVisitor { + tcx, + self_ty_root, + trait_def_id, + seen: FxHashSet::default(), + }) + .is_break() +} diff --git a/compiler/rustc_typeck/src/coherence/unsafety.rs b/compiler/rustc_typeck/src/coherence/unsafety.rs new file mode 100644 index 000000000..e45fb5fe4 --- /dev/null +++ b/compiler/rustc_typeck/src/coherence/unsafety.rs @@ -0,0 +1,66 @@ +//! Unsafety checker: every impl either implements a trait defined in this +//! crate or pertains to a type defined in this crate. + +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::Unsafety; +use rustc_middle::ty::TyCtxt; +use rustc_span::def_id::LocalDefId; + +pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) { + debug_assert!(matches!(tcx.def_kind(def_id), DefKind::Impl)); + let item = tcx.hir().expect_item(def_id); + let hir::ItemKind::Impl(ref impl_) = item.kind else { bug!() }; + + if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) { + let trait_def = tcx.trait_def(trait_ref.def_id); + let unsafe_attr = + impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle"); + match (trait_def.unsafety, unsafe_attr, impl_.unsafety, impl_.polarity) { + (Unsafety::Normal, None, Unsafety::Unsafe, hir::ImplPolarity::Positive) => { + struct_span_err!( + tcx.sess, + item.span, + E0199, + "implementing the trait `{}` is not unsafe", + trait_ref.print_only_trait_path() + ) + .emit(); + } + + (Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => { + struct_span_err!( + tcx.sess, + item.span, + E0200, + "the trait `{}` requires an `unsafe impl` declaration", + trait_ref.print_only_trait_path() + ) + .emit(); + } + + (Unsafety::Normal, Some(attr_name), Unsafety::Normal, hir::ImplPolarity::Positive) => { + struct_span_err!( + tcx.sess, + item.span, + E0569, + "requires an `unsafe impl` declaration due to `#[{}]` attribute", + attr_name + ) + .emit(); + } + + (_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative(_)) => { + // Reported in AST validation + tcx.sess.delay_span_bug(item.span, "unsafe negative impl"); + } + (_, _, Unsafety::Normal, hir::ImplPolarity::Negative(_)) + | (Unsafety::Unsafe, _, Unsafety::Unsafe, hir::ImplPolarity::Positive) + | (Unsafety::Normal, Some(_), Unsafety::Unsafe, hir::ImplPolarity::Positive) + | (Unsafety::Normal, None, Unsafety::Normal, _) => { + // OK + } + } + } +} diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs new file mode 100644 index 000000000..99996e80c --- /dev/null +++ b/compiler/rustc_typeck/src/collect.rs @@ -0,0 +1,3361 @@ +//! "Collection" is the process of determining the type and other external +//! details of each item in Rust. Collection is specifically concerned +//! with *inter-procedural* things -- for example, for a function +//! definition, collection will figure out the type and signature of the +//! function, but it will not visit the *body* of the function in any way, +//! nor examine type annotations on local variables (that's the job of +//! type *checking*). +//! +//! Collecting is ultimately defined by a bundle of queries that +//! inquire after various facts about the items in the crate (e.g., +//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function +//! for the full set. +//! +//! At present, however, we do run collection across all items in the +//! crate as a kind of pass. This should eventually be factored away. + +use crate::astconv::AstConv; +use crate::bounds::Bounds; +use crate::check::intrinsic::intrinsic_operation_unsafety; +use crate::constrained_generic_params as cgp; +use crate::errors; +use crate::middle::resolve_lifetime as rl; +use rustc_ast as ast; +use rustc_ast::{MetaItemKind, NestedMetaItem}; +use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr}; +use rustc_data_structures::captures::Captures; +use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet}; +use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::def::{CtorKind, DefKind}; +use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE}; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_hir::weak_lang_items; +use rustc_hir::{GenericParamKind, HirId, Node}; +use rustc_middle::hir::nested_filter; +use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; +use rustc_middle::mir::mono::Linkage; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::subst::InternalSubsts; +use rustc_middle::ty::util::Discr; +use rustc_middle::ty::util::IntTypeExt; +use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt}; +use rustc_middle::ty::{ReprOptions, ToPredicate}; +use rustc_session::lint; +use rustc_session::parse::feature_err; +use rustc_span::symbol::{kw, sym, Ident, Symbol}; +use rustc_span::{Span, DUMMY_SP}; +use rustc_target::spec::{abi, SanitizerSet}; +use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName; +use std::iter; + +mod item_bounds; +mod type_of; + +#[derive(Debug)] +struct OnlySelfBounds(bool); + +/////////////////////////////////////////////////////////////////////////// +// Main entry point + +fn collect_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) { + tcx.hir().visit_item_likes_in_module(module_def_id, &mut CollectItemTypesVisitor { tcx }); +} + +pub fn provide(providers: &mut Providers) { + *providers = Providers { + opt_const_param_of: type_of::opt_const_param_of, + type_of: type_of::type_of, + item_bounds: item_bounds::item_bounds, + explicit_item_bounds: item_bounds::explicit_item_bounds, + generics_of, + predicates_of, + predicates_defined_on, + explicit_predicates_of, + super_predicates_of, + super_predicates_that_define_assoc_type, + trait_explicit_predicates_and_bounds, + type_param_predicates, + trait_def, + adt_def, + fn_sig, + impl_trait_ref, + impl_polarity, + is_foreign_item, + generator_kind, + codegen_fn_attrs, + asm_target_features, + collect_mod_item_types, + should_inherit_track_caller, + ..*providers + }; +} + +/////////////////////////////////////////////////////////////////////////// + +/// Context specific to some particular item. This is what implements +/// `AstConv`. It has information about the predicates that are defined +/// on the trait. Unfortunately, this predicate information is +/// available in various different forms at various points in the +/// process. So we can't just store a pointer to e.g., the AST or the +/// parsed ty form, we have to be more flexible. To this end, the +/// `ItemCtxt` is parameterized by a `DefId` that it uses to satisfy +/// `get_type_parameter_bounds` requests, drawing the information from +/// the AST (`hir::Generics`), recursively. +pub struct ItemCtxt<'tcx> { + tcx: TyCtxt<'tcx>, + item_def_id: DefId, +} + +/////////////////////////////////////////////////////////////////////////// + +#[derive(Default)] +pub(crate) struct HirPlaceholderCollector(pub(crate) Vec); + +impl<'v> Visitor<'v> for HirPlaceholderCollector { + fn visit_ty(&mut self, t: &'v hir::Ty<'v>) { + if let hir::TyKind::Infer = t.kind { + self.0.push(t.span); + } + intravisit::walk_ty(self, t) + } + fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) { + match generic_arg { + hir::GenericArg::Infer(inf) => { + self.0.push(inf.span); + intravisit::walk_inf(self, inf); + } + hir::GenericArg::Type(t) => self.visit_ty(t), + _ => {} + } + } + fn visit_array_length(&mut self, length: &'v hir::ArrayLen) { + if let &hir::ArrayLen::Infer(_, span) = length { + self.0.push(span); + } + intravisit::walk_array_len(self, length) + } +} + +struct CollectItemTypesVisitor<'tcx> { + tcx: TyCtxt<'tcx>, +} + +/// If there are any placeholder types (`_`), emit an error explaining that this is not allowed +/// and suggest adding type parameters in the appropriate place, taking into consideration any and +/// all already existing generic type parameters to avoid suggesting a name that is already in use. +pub(crate) fn placeholder_type_error<'tcx>( + tcx: TyCtxt<'tcx>, + generics: Option<&hir::Generics<'_>>, + placeholder_types: Vec, + suggest: bool, + hir_ty: Option<&hir::Ty<'_>>, + kind: &'static str, +) { + if placeholder_types.is_empty() { + return; + } + + placeholder_type_error_diag(tcx, generics, placeholder_types, vec![], suggest, hir_ty, kind) + .emit(); +} + +pub(crate) fn placeholder_type_error_diag<'tcx>( + tcx: TyCtxt<'tcx>, + generics: Option<&hir::Generics<'_>>, + placeholder_types: Vec, + additional_spans: Vec, + suggest: bool, + hir_ty: Option<&hir::Ty<'_>>, + kind: &'static str, +) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + if placeholder_types.is_empty() { + return bad_placeholder(tcx, additional_spans, kind); + } + + let params = generics.map(|g| g.params).unwrap_or_default(); + let type_name = params.next_type_param_name(None); + let mut sugg: Vec<_> = + placeholder_types.iter().map(|sp| (*sp, (*type_name).to_string())).collect(); + + if let Some(generics) = generics { + if let Some(arg) = params.iter().find(|arg| { + matches!(arg.name, hir::ParamName::Plain(Ident { name: kw::Underscore, .. })) + }) { + // Account for `_` already present in cases like `struct S<_>(_);` and suggest + // `struct S(T);` instead of `struct S<_, T>(T);`. + sugg.push((arg.span, (*type_name).to_string())); + } else if let Some(span) = generics.span_for_param_suggestion() { + // Account for bounds, we want `fn foo(_: K)` not `fn foo(_: K)`. + sugg.push((span, format!(", {}", type_name))); + } else { + sugg.push((generics.span, format!("<{}>", type_name))); + } + } + + let mut err = + bad_placeholder(tcx, placeholder_types.into_iter().chain(additional_spans).collect(), kind); + + // Suggest, but only if it is not a function in const or static + if suggest { + let mut is_fn = false; + let mut is_const_or_static = false; + + if let Some(hir_ty) = hir_ty && let hir::TyKind::BareFn(_) = hir_ty.kind { + is_fn = true; + + // Check if parent is const or static + let parent_id = tcx.hir().get_parent_node(hir_ty.hir_id); + let parent_node = tcx.hir().get(parent_id); + + is_const_or_static = matches!( + parent_node, + Node::Item(&hir::Item { + kind: hir::ItemKind::Const(..) | hir::ItemKind::Static(..), + .. + }) | Node::TraitItem(&hir::TraitItem { + kind: hir::TraitItemKind::Const(..), + .. + }) | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) + ); + } + + // if function is wrapped around a const or static, + // then don't show the suggestion + if !(is_fn && is_const_or_static) { + err.multipart_suggestion( + "use type parameters instead", + sugg, + Applicability::HasPlaceholders, + ); + } + } + + err +} + +fn reject_placeholder_type_signatures_in_item<'tcx>( + tcx: TyCtxt<'tcx>, + item: &'tcx hir::Item<'tcx>, +) { + let (generics, suggest) = match &item.kind { + hir::ItemKind::Union(_, generics) + | hir::ItemKind::Enum(_, generics) + | hir::ItemKind::TraitAlias(generics, _) + | hir::ItemKind::Trait(_, _, generics, ..) + | hir::ItemKind::Impl(hir::Impl { generics, .. }) + | hir::ItemKind::Struct(_, generics) => (generics, true), + hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. }) + | hir::ItemKind::TyAlias(_, generics) => (generics, false), + // `static`, `fn` and `const` are handled elsewhere to suggest appropriate type. + _ => return, + }; + + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_item(item); + + placeholder_type_error(tcx, Some(generics), visitor.0, suggest, None, item.kind.descr()); +} + +impl<'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'tcx> { + type NestedFilter = nested_filter::OnlyBodies; + + fn nested_visit_map(&mut self) -> Self::Map { + self.tcx.hir() + } + + fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) { + convert_item(self.tcx, item.item_id()); + reject_placeholder_type_signatures_in_item(self.tcx, item); + intravisit::walk_item(self, item); + } + + fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) { + for param in generics.params { + match param.kind { + hir::GenericParamKind::Lifetime { .. } => {} + hir::GenericParamKind::Type { default: Some(_), .. } => { + let def_id = self.tcx.hir().local_def_id(param.hir_id); + self.tcx.ensure().type_of(def_id); + } + hir::GenericParamKind::Type { .. } => {} + hir::GenericParamKind::Const { default, .. } => { + let def_id = self.tcx.hir().local_def_id(param.hir_id); + self.tcx.ensure().type_of(def_id); + if let Some(default) = default { + let default_def_id = self.tcx.hir().local_def_id(default.hir_id); + // need to store default and type of default + self.tcx.ensure().type_of(default_def_id); + self.tcx.ensure().const_param_default(def_id); + } + } + } + } + intravisit::walk_generics(self, generics); + } + + fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) { + if let hir::ExprKind::Closure { .. } = expr.kind { + let def_id = self.tcx.hir().local_def_id(expr.hir_id); + self.tcx.ensure().generics_of(def_id); + // We do not call `type_of` for closures here as that + // depends on typecheck and would therefore hide + // any further errors in case one typeck fails. + } + intravisit::walk_expr(self, expr); + } + + fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) { + convert_trait_item(self.tcx, trait_item.trait_item_id()); + intravisit::walk_trait_item(self, trait_item); + } + + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) { + convert_impl_item(self.tcx, impl_item.impl_item_id()); + intravisit::walk_impl_item(self, impl_item); + } +} + +/////////////////////////////////////////////////////////////////////////// +// Utility types and common code for the above passes. + +fn bad_placeholder<'tcx>( + tcx: TyCtxt<'tcx>, + mut spans: Vec, + kind: &'static str, +) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let kind = if kind.ends_with('s') { format!("{}es", kind) } else { format!("{}s", kind) }; + + spans.sort(); + let mut err = struct_span_err!( + tcx.sess, + spans.clone(), + E0121, + "the placeholder `_` is not allowed within types on item signatures for {}", + kind + ); + for span in spans { + err.span_label(span, "not allowed in type signatures"); + } + err +} + +impl<'tcx> ItemCtxt<'tcx> { + pub fn new(tcx: TyCtxt<'tcx>, item_def_id: DefId) -> ItemCtxt<'tcx> { + ItemCtxt { tcx, item_def_id } + } + + pub fn to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> { + >::ast_ty_to_ty(self, ast_ty) + } + + pub fn hir_id(&self) -> hir::HirId { + self.tcx.hir().local_def_id_to_hir_id(self.item_def_id.expect_local()) + } + + pub fn node(&self) -> hir::Node<'tcx> { + self.tcx.hir().get(self.hir_id()) + } +} + +impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } + + fn item_def_id(&self) -> Option { + Some(self.item_def_id) + } + + fn get_type_parameter_bounds( + &self, + span: Span, + def_id: DefId, + assoc_name: Ident, + ) -> ty::GenericPredicates<'tcx> { + self.tcx.at(span).type_param_predicates(( + self.item_def_id, + def_id.expect_local(), + assoc_name, + )) + } + + fn re_infer(&self, _: Option<&ty::GenericParamDef>, _: Span) -> Option> { + None + } + + fn allow_ty_infer(&self) -> bool { + false + } + + fn ty_infer(&self, _: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> { + self.tcx().ty_error_with_message(span, "bad placeholder type") + } + + fn ct_infer(&self, ty: Ty<'tcx>, _: Option<&ty::GenericParamDef>, span: Span) -> Const<'tcx> { + let ty = self.tcx.fold_regions(ty, |r, _| match *r { + ty::ReErased => self.tcx.lifetimes.re_static, + _ => r, + }); + self.tcx().const_error_with_message(ty, span, "bad placeholder constant") + } + + fn projected_ty_from_poly_trait_ref( + &self, + span: Span, + item_def_id: DefId, + item_segment: &hir::PathSegment<'_>, + poly_trait_ref: ty::PolyTraitRef<'tcx>, + ) -> Ty<'tcx> { + if let Some(trait_ref) = poly_trait_ref.no_bound_vars() { + let item_substs = >::create_substs_for_associated_item( + self, + self.tcx, + span, + item_def_id, + item_segment, + trait_ref.substs, + ); + self.tcx().mk_projection(item_def_id, item_substs) + } else { + // There are no late-bound regions; we can just ignore the binder. + let mut err = struct_span_err!( + self.tcx().sess, + span, + E0212, + "cannot use the associated type of a trait \ + with uninferred generic parameters" + ); + + match self.node() { + hir::Node::Field(_) | hir::Node::Ctor(_) | hir::Node::Variant(_) => { + let item = + self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(self.hir_id())); + match &item.kind { + hir::ItemKind::Enum(_, generics) + | hir::ItemKind::Struct(_, generics) + | hir::ItemKind::Union(_, generics) => { + let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics); + let (lt_sp, sugg) = match generics.params { + [] => (generics.span, format!("<{}>", lt_name)), + [bound, ..] => { + (bound.span.shrink_to_lo(), format!("{}, ", lt_name)) + } + }; + let suggestions = vec![ + (lt_sp, sugg), + ( + span.with_hi(item_segment.ident.span.lo()), + format!( + "{}::", + // Replace the existing lifetimes with a new named lifetime. + self.tcx.replace_late_bound_regions_uncached( + poly_trait_ref, + |_| { + self.tcx.mk_region(ty::ReEarlyBound( + ty::EarlyBoundRegion { + def_id: item_def_id, + index: 0, + name: Symbol::intern(<_name), + }, + )) + } + ), + ), + ), + ]; + err.multipart_suggestion( + "use a fully qualified path with explicit lifetimes", + suggestions, + Applicability::MaybeIncorrect, + ); + } + _ => {} + } + } + hir::Node::Item(hir::Item { + kind: + hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..), + .. + }) => {} + hir::Node::Item(_) + | hir::Node::ForeignItem(_) + | hir::Node::TraitItem(_) + | hir::Node::ImplItem(_) => { + err.span_suggestion_verbose( + span.with_hi(item_segment.ident.span.lo()), + "use a fully qualified path with inferred lifetimes", + format!( + "{}::", + // Erase named lt, we want `::C`, not `::C`. + self.tcx.anonymize_late_bound_regions(poly_trait_ref).skip_binder(), + ), + Applicability::MaybeIncorrect, + ); + } + _ => {} + } + err.emit(); + self.tcx().ty_error() + } + } + + fn normalize_ty(&self, _span: Span, ty: Ty<'tcx>) -> Ty<'tcx> { + // Types in item signatures are not normalized to avoid undue dependencies. + ty + } + + fn set_tainted_by_errors(&self) { + // There's no obvious place to track this, so just let it go. + } + + fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) { + // There's no place to record types from signatures? + } +} + +/// Synthesize a new lifetime name that doesn't clash with any of the lifetimes already present. +fn get_new_lifetime_name<'tcx>( + tcx: TyCtxt<'tcx>, + poly_trait_ref: ty::PolyTraitRef<'tcx>, + generics: &hir::Generics<'tcx>, +) -> String { + let existing_lifetimes = tcx + .collect_referenced_late_bound_regions(&poly_trait_ref) + .into_iter() + .filter_map(|lt| { + if let ty::BoundRegionKind::BrNamed(_, name) = lt { + Some(name.as_str().to_string()) + } else { + None + } + }) + .chain(generics.params.iter().filter_map(|param| { + if let hir::GenericParamKind::Lifetime { .. } = ¶m.kind { + Some(param.name.ident().as_str().to_string()) + } else { + None + } + })) + .collect::>(); + + let a_to_z_repeat_n = |n| { + (b'a'..=b'z').map(move |c| { + let mut s = '\''.to_string(); + s.extend(std::iter::repeat(char::from(c)).take(n)); + s + }) + }; + + // If all single char lifetime names are present, we wrap around and double the chars. + (1..).flat_map(a_to_z_repeat_n).find(|lt| !existing_lifetimes.contains(lt.as_str())).unwrap() +} + +/// Returns the predicates defined on `item_def_id` of the form +/// `X: Foo` where `X` is the type parameter `def_id`. +fn type_param_predicates( + tcx: TyCtxt<'_>, + (item_def_id, def_id, assoc_name): (DefId, LocalDefId, Ident), +) -> ty::GenericPredicates<'_> { + use rustc_hir::*; + + // In the AST, bounds can derive from two places. Either + // written inline like `` or in a where-clause like + // `where T: Foo`. + + let param_id = tcx.hir().local_def_id_to_hir_id(def_id); + let param_owner = tcx.hir().ty_param_owner(def_id); + let generics = tcx.generics_of(param_owner); + let index = generics.param_def_id_to_index[&def_id.to_def_id()]; + let ty = tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id)); + + // Don't look for bounds where the type parameter isn't in scope. + let parent = if item_def_id == param_owner.to_def_id() { + None + } else { + tcx.generics_of(item_def_id).parent + }; + + let mut result = parent + .map(|parent| { + let icx = ItemCtxt::new(tcx, parent); + icx.get_type_parameter_bounds(DUMMY_SP, def_id.to_def_id(), assoc_name) + }) + .unwrap_or_default(); + let mut extend = None; + + let item_hir_id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local()); + let ast_generics = match tcx.hir().get(item_hir_id) { + Node::TraitItem(item) => &item.generics, + + Node::ImplItem(item) => &item.generics, + + Node::Item(item) => { + match item.kind { + ItemKind::Fn(.., ref generics, _) + | ItemKind::Impl(hir::Impl { ref generics, .. }) + | ItemKind::TyAlias(_, ref generics) + | ItemKind::OpaqueTy(OpaqueTy { + ref generics, + origin: hir::OpaqueTyOrigin::TyAlias, + .. + }) + | ItemKind::Enum(_, ref generics) + | ItemKind::Struct(_, ref generics) + | ItemKind::Union(_, ref generics) => generics, + ItemKind::Trait(_, _, ref generics, ..) => { + // Implied `Self: Trait` and supertrait bounds. + if param_id == item_hir_id { + let identity_trait_ref = ty::TraitRef::identity(tcx, item_def_id); + extend = + Some((identity_trait_ref.without_const().to_predicate(tcx), item.span)); + } + generics + } + _ => return result, + } + } + + Node::ForeignItem(item) => match item.kind { + ForeignItemKind::Fn(_, _, ref generics) => generics, + _ => return result, + }, + + _ => return result, + }; + + let icx = ItemCtxt::new(tcx, item_def_id); + let extra_predicates = extend.into_iter().chain( + icx.type_parameter_bounds_in_generics( + ast_generics, + param_id, + ty, + OnlySelfBounds(true), + Some(assoc_name), + ) + .into_iter() + .filter(|(predicate, _)| match predicate.kind().skip_binder() { + ty::PredicateKind::Trait(data) => data.self_ty().is_param(index), + _ => false, + }), + ); + result.predicates = + tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(extra_predicates)); + result +} + +impl<'tcx> ItemCtxt<'tcx> { + /// Finds bounds from `hir::Generics`. This requires scanning through the + /// AST. We do this to avoid having to convert *all* the bounds, which + /// would create artificial cycles. Instead, we can only convert the + /// bounds for a type parameter `X` if `X::Foo` is used. + #[instrument(level = "trace", skip(self, ast_generics))] + fn type_parameter_bounds_in_generics( + &self, + ast_generics: &'tcx hir::Generics<'tcx>, + param_id: hir::HirId, + ty: Ty<'tcx>, + only_self_bounds: OnlySelfBounds, + assoc_name: Option, + ) -> Vec<(ty::Predicate<'tcx>, Span)> { + let param_def_id = self.tcx.hir().local_def_id(param_id).to_def_id(); + debug!(?param_def_id); + ast_generics + .predicates + .iter() + .filter_map(|wp| match *wp { + hir::WherePredicate::BoundPredicate(ref bp) => Some(bp), + _ => None, + }) + .flat_map(|bp| { + let bt = if bp.is_param_bound(param_def_id) { + Some(ty) + } else if !only_self_bounds.0 { + Some(self.to_ty(bp.bounded_ty)) + } else { + None + }; + let bvars = self.tcx.late_bound_vars(bp.bounded_ty.hir_id); + + bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b, bvars))).filter( + |(_, b, _)| match assoc_name { + Some(assoc_name) => self.bound_defines_assoc_item(b, assoc_name), + None => true, + }, + ) + }) + .flat_map(|(bt, b, bvars)| predicates_from_bound(self, bt, b, bvars)) + .collect() + } + + fn bound_defines_assoc_item(&self, b: &hir::GenericBound<'_>, assoc_name: Ident) -> bool { + debug!("bound_defines_assoc_item(b={:?}, assoc_name={:?})", b, assoc_name); + + match b { + hir::GenericBound::Trait(poly_trait_ref, _) => { + let trait_ref = &poly_trait_ref.trait_ref; + if let Some(trait_did) = trait_ref.trait_def_id() { + self.tcx.trait_may_define_assoc_type(trait_did, assoc_name) + } else { + false + } + } + _ => false, + } + } +} + +fn convert_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) { + let it = tcx.hir().item(item_id); + debug!("convert: item {} with id {}", it.ident, it.hir_id()); + let def_id = item_id.def_id; + + match it.kind { + // These don't define types. + hir::ItemKind::ExternCrate(_) + | hir::ItemKind::Use(..) + | hir::ItemKind::Macro(..) + | hir::ItemKind::Mod(_) + | hir::ItemKind::GlobalAsm(_) => {} + hir::ItemKind::ForeignMod { items, .. } => { + for item in items { + let item = tcx.hir().foreign_item(item.id); + tcx.ensure().generics_of(item.def_id); + tcx.ensure().type_of(item.def_id); + tcx.ensure().predicates_of(item.def_id); + match item.kind { + hir::ForeignItemKind::Fn(..) => tcx.ensure().fn_sig(item.def_id), + hir::ForeignItemKind::Static(..) => { + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_foreign_item(item); + placeholder_type_error( + tcx, + None, + visitor.0, + false, + None, + "static variable", + ); + } + _ => (), + } + } + } + hir::ItemKind::Enum(ref enum_definition, _) => { + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); + convert_enum_variant_types(tcx, def_id.to_def_id(), enum_definition.variants); + } + hir::ItemKind::Impl { .. } => { + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().impl_trait_ref(def_id); + tcx.ensure().predicates_of(def_id); + } + hir::ItemKind::Trait(..) => { + tcx.ensure().generics_of(def_id); + tcx.ensure().trait_def(def_id); + tcx.at(it.span).super_predicates_of(def_id); + tcx.ensure().predicates_of(def_id); + } + hir::ItemKind::TraitAlias(..) => { + tcx.ensure().generics_of(def_id); + tcx.at(it.span).super_predicates_of(def_id); + tcx.ensure().predicates_of(def_id); + } + hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); + + for f in struct_def.fields() { + let def_id = tcx.hir().local_def_id(f.hir_id); + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); + } + + if let Some(ctor_hir_id) = struct_def.ctor_hir_id() { + convert_variant_ctor(tcx, ctor_hir_id); + } + } + + // Desugared from `impl Trait`, so visited by the function's return type. + hir::ItemKind::OpaqueTy(hir::OpaqueTy { + origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..), + .. + }) => {} + + // Don't call `type_of` on opaque types, since that depends on type + // checking function bodies. `check_item_type` ensures that it's called + // instead. + hir::ItemKind::OpaqueTy(..) => { + tcx.ensure().generics_of(def_id); + tcx.ensure().predicates_of(def_id); + tcx.ensure().explicit_item_bounds(def_id); + } + hir::ItemKind::TyAlias(..) + | hir::ItemKind::Static(..) + | hir::ItemKind::Const(..) + | hir::ItemKind::Fn(..) => { + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); + match it.kind { + hir::ItemKind::Fn(..) => tcx.ensure().fn_sig(def_id), + hir::ItemKind::OpaqueTy(..) => tcx.ensure().item_bounds(def_id), + hir::ItemKind::Const(ty, ..) | hir::ItemKind::Static(ty, ..) => { + if !is_suggestable_infer_ty(ty) { + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_item(it); + placeholder_type_error(tcx, None, visitor.0, false, None, it.kind.descr()); + } + } + _ => (), + } + } + } +} + +fn convert_trait_item(tcx: TyCtxt<'_>, trait_item_id: hir::TraitItemId) { + let trait_item = tcx.hir().trait_item(trait_item_id); + tcx.ensure().generics_of(trait_item_id.def_id); + + match trait_item.kind { + hir::TraitItemKind::Fn(..) => { + tcx.ensure().type_of(trait_item_id.def_id); + tcx.ensure().fn_sig(trait_item_id.def_id); + } + + hir::TraitItemKind::Const(.., Some(_)) => { + tcx.ensure().type_of(trait_item_id.def_id); + } + + hir::TraitItemKind::Const(..) => { + tcx.ensure().type_of(trait_item_id.def_id); + // Account for `const C: _;`. + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_trait_item(trait_item); + placeholder_type_error(tcx, None, visitor.0, false, None, "constant"); + } + + hir::TraitItemKind::Type(_, Some(_)) => { + tcx.ensure().item_bounds(trait_item_id.def_id); + tcx.ensure().type_of(trait_item_id.def_id); + // Account for `type T = _;`. + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_trait_item(trait_item); + placeholder_type_error(tcx, None, visitor.0, false, None, "associated type"); + } + + hir::TraitItemKind::Type(_, None) => { + tcx.ensure().item_bounds(trait_item_id.def_id); + // #74612: Visit and try to find bad placeholders + // even if there is no concrete type. + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_trait_item(trait_item); + + placeholder_type_error(tcx, None, visitor.0, false, None, "associated type"); + } + }; + + tcx.ensure().predicates_of(trait_item_id.def_id); +} + +fn convert_impl_item(tcx: TyCtxt<'_>, impl_item_id: hir::ImplItemId) { + let def_id = impl_item_id.def_id; + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); + let impl_item = tcx.hir().impl_item(impl_item_id); + match impl_item.kind { + hir::ImplItemKind::Fn(..) => { + tcx.ensure().fn_sig(def_id); + } + hir::ImplItemKind::TyAlias(_) => { + // Account for `type T = _;` + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_impl_item(impl_item); + + placeholder_type_error(tcx, None, visitor.0, false, None, "associated type"); + } + hir::ImplItemKind::Const(..) => {} + } +} + +fn convert_variant_ctor(tcx: TyCtxt<'_>, ctor_id: hir::HirId) { + let def_id = tcx.hir().local_def_id(ctor_id); + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); +} + +fn convert_enum_variant_types(tcx: TyCtxt<'_>, def_id: DefId, variants: &[hir::Variant<'_>]) { + let def = tcx.adt_def(def_id); + let repr_type = def.repr().discr_type(); + let initial = repr_type.initial_discriminant(tcx); + let mut prev_discr = None::>; + + // fill the discriminant values and field types + for variant in variants { + let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx)); + prev_discr = Some( + if let Some(ref e) = variant.disr_expr { + let expr_did = tcx.hir().local_def_id(e.hir_id); + def.eval_explicit_discr(tcx, expr_did.to_def_id()) + } else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) { + Some(discr) + } else { + struct_span_err!(tcx.sess, variant.span, E0370, "enum discriminant overflowed") + .span_label( + variant.span, + format!("overflowed on value after {}", prev_discr.unwrap()), + ) + .note(&format!( + "explicitly set `{} = {}` if that is desired outcome", + variant.ident, wrapped_discr + )) + .emit(); + None + } + .unwrap_or(wrapped_discr), + ); + + for f in variant.data.fields() { + let def_id = tcx.hir().local_def_id(f.hir_id); + tcx.ensure().generics_of(def_id); + tcx.ensure().type_of(def_id); + tcx.ensure().predicates_of(def_id); + } + + // Convert the ctor, if any. This also registers the variant as + // an item. + if let Some(ctor_hir_id) = variant.data.ctor_hir_id() { + convert_variant_ctor(tcx, ctor_hir_id); + } + } +} + +fn convert_variant( + tcx: TyCtxt<'_>, + variant_did: Option, + ctor_did: Option, + ident: Ident, + discr: ty::VariantDiscr, + def: &hir::VariantData<'_>, + adt_kind: ty::AdtKind, + parent_did: LocalDefId, +) -> ty::VariantDef { + let mut seen_fields: FxHashMap = Default::default(); + let fields = def + .fields() + .iter() + .map(|f| { + let fid = tcx.hir().local_def_id(f.hir_id); + let dup_span = seen_fields.get(&f.ident.normalize_to_macros_2_0()).cloned(); + if let Some(prev_span) = dup_span { + tcx.sess.emit_err(errors::FieldAlreadyDeclared { + field_name: f.ident, + span: f.span, + prev_span, + }); + } else { + seen_fields.insert(f.ident.normalize_to_macros_2_0(), f.span); + } + + ty::FieldDef { did: fid.to_def_id(), name: f.ident.name, vis: tcx.visibility(fid) } + }) + .collect(); + let recovered = match def { + hir::VariantData::Struct(_, r) => *r, + _ => false, + }; + ty::VariantDef::new( + ident.name, + variant_did.map(LocalDefId::to_def_id), + ctor_did.map(LocalDefId::to_def_id), + discr, + fields, + CtorKind::from_hir(def), + adt_kind, + parent_did.to_def_id(), + recovered, + adt_kind == AdtKind::Struct && tcx.has_attr(parent_did.to_def_id(), sym::non_exhaustive) + || variant_did.map_or(false, |variant_did| { + tcx.has_attr(variant_did.to_def_id(), sym::non_exhaustive) + }), + ) +} + +fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> { + use rustc_hir::*; + + let def_id = def_id.expect_local(); + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let Node::Item(item) = tcx.hir().get(hir_id) else { + bug!(); + }; + + let repr = ReprOptions::new(tcx, def_id.to_def_id()); + let (kind, variants) = match item.kind { + ItemKind::Enum(ref def, _) => { + let mut distance_from_explicit = 0; + let variants = def + .variants + .iter() + .map(|v| { + let variant_did = Some(tcx.hir().local_def_id(v.id)); + let ctor_did = + v.data.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id)); + + let discr = if let Some(ref e) = v.disr_expr { + distance_from_explicit = 0; + ty::VariantDiscr::Explicit(tcx.hir().local_def_id(e.hir_id).to_def_id()) + } else { + ty::VariantDiscr::Relative(distance_from_explicit) + }; + distance_from_explicit += 1; + + convert_variant( + tcx, + variant_did, + ctor_did, + v.ident, + discr, + &v.data, + AdtKind::Enum, + def_id, + ) + }) + .collect(); + + (AdtKind::Enum, variants) + } + ItemKind::Struct(ref def, _) => { + let variant_did = None::; + let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id)); + + let variants = std::iter::once(convert_variant( + tcx, + variant_did, + ctor_did, + item.ident, + ty::VariantDiscr::Relative(0), + def, + AdtKind::Struct, + def_id, + )) + .collect(); + + (AdtKind::Struct, variants) + } + ItemKind::Union(ref def, _) => { + let variant_did = None; + let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id)); + + let variants = std::iter::once(convert_variant( + tcx, + variant_did, + ctor_did, + item.ident, + ty::VariantDiscr::Relative(0), + def, + AdtKind::Union, + def_id, + )) + .collect(); + + (AdtKind::Union, variants) + } + _ => bug!(), + }; + tcx.alloc_adt_def(def_id.to_def_id(), kind, variants, repr) +} + +/// Ensures that the super-predicates of the trait with a `DefId` +/// of `trait_def_id` are converted and stored. This also ensures that +/// the transitive super-predicates are converted. +fn super_predicates_of(tcx: TyCtxt<'_>, trait_def_id: DefId) -> ty::GenericPredicates<'_> { + debug!("super_predicates(trait_def_id={:?})", trait_def_id); + tcx.super_predicates_that_define_assoc_type((trait_def_id, None)) +} + +/// Ensures that the super-predicates of the trait with a `DefId` +/// of `trait_def_id` are converted and stored. This also ensures that +/// the transitive super-predicates are converted. +fn super_predicates_that_define_assoc_type( + tcx: TyCtxt<'_>, + (trait_def_id, assoc_name): (DefId, Option), +) -> ty::GenericPredicates<'_> { + debug!( + "super_predicates_that_define_assoc_type(trait_def_id={:?}, assoc_name={:?})", + trait_def_id, assoc_name + ); + if trait_def_id.is_local() { + debug!("super_predicates_that_define_assoc_type: local trait_def_id={:?}", trait_def_id); + let trait_hir_id = tcx.hir().local_def_id_to_hir_id(trait_def_id.expect_local()); + + let Node::Item(item) = tcx.hir().get(trait_hir_id) else { + bug!("trait_node_id {} is not an item", trait_hir_id); + }; + + let (generics, bounds) = match item.kind { + hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits), + hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits), + _ => span_bug!(item.span, "super_predicates invoked on non-trait"), + }; + + let icx = ItemCtxt::new(tcx, trait_def_id); + + // Convert the bounds that follow the colon, e.g., `Bar + Zed` in `trait Foo: Bar + Zed`. + let self_param_ty = tcx.types.self_param; + let superbounds1 = if let Some(assoc_name) = assoc_name { + >::compute_bounds_that_match_assoc_type( + &icx, + self_param_ty, + bounds, + assoc_name, + ) + } else { + >::compute_bounds(&icx, self_param_ty, bounds) + }; + + let superbounds1 = superbounds1.predicates(tcx, self_param_ty); + + // Convert any explicit superbounds in the where-clause, + // e.g., `trait Foo where Self: Bar`. + // In the case of trait aliases, however, we include all bounds in the where-clause, + // so e.g., `trait Foo = where u32: PartialEq` would include `u32: PartialEq` + // as one of its "superpredicates". + let is_trait_alias = tcx.is_trait_alias(trait_def_id); + let superbounds2 = icx.type_parameter_bounds_in_generics( + generics, + item.hir_id(), + self_param_ty, + OnlySelfBounds(!is_trait_alias), + assoc_name, + ); + + // Combine the two lists to form the complete set of superbounds: + let superbounds = &*tcx.arena.alloc_from_iter(superbounds1.into_iter().chain(superbounds2)); + debug!(?superbounds); + + // Now require that immediate supertraits are converted, + // which will, in turn, reach indirect supertraits. + if assoc_name.is_none() { + // Now require that immediate supertraits are converted, + // which will, in turn, reach indirect supertraits. + for &(pred, span) in superbounds { + debug!("superbound: {:?}", pred); + if let ty::PredicateKind::Trait(bound) = pred.kind().skip_binder() { + tcx.at(span).super_predicates_of(bound.def_id()); + } + } + } + + ty::GenericPredicates { parent: None, predicates: superbounds } + } else { + // if `assoc_name` is None, then the query should've been redirected to an + // external provider + assert!(assoc_name.is_some()); + tcx.super_predicates_of(trait_def_id) + } +} + +fn trait_def(tcx: TyCtxt<'_>, def_id: DefId) -> ty::TraitDef { + let item = tcx.hir().expect_item(def_id.expect_local()); + + let (is_auto, unsafety, items) = match item.kind { + hir::ItemKind::Trait(is_auto, unsafety, .., items) => { + (is_auto == hir::IsAuto::Yes, unsafety, items) + } + hir::ItemKind::TraitAlias(..) => (false, hir::Unsafety::Normal, &[][..]), + _ => span_bug!(item.span, "trait_def_of_item invoked on non-trait"), + }; + + let paren_sugar = tcx.has_attr(def_id, sym::rustc_paren_sugar); + if paren_sugar && !tcx.features().unboxed_closures { + tcx.sess + .struct_span_err( + item.span, + "the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \ + which traits can use parenthetical notation", + ) + .help("add `#![feature(unboxed_closures)]` to the crate attributes to use it") + .emit(); + } + + let is_marker = tcx.has_attr(def_id, sym::marker); + let skip_array_during_method_dispatch = + tcx.has_attr(def_id, sym::rustc_skip_array_during_method_dispatch); + let spec_kind = if tcx.has_attr(def_id, sym::rustc_unsafe_specialization_marker) { + ty::trait_def::TraitSpecializationKind::Marker + } else if tcx.has_attr(def_id, sym::rustc_specialization_trait) { + ty::trait_def::TraitSpecializationKind::AlwaysApplicable + } else { + ty::trait_def::TraitSpecializationKind::None + }; + let must_implement_one_of = tcx + .get_attr(def_id, sym::rustc_must_implement_one_of) + // Check that there are at least 2 arguments of `#[rustc_must_implement_one_of]` + // and that they are all identifiers + .and_then(|attr| match attr.meta_item_list() { + Some(items) if items.len() < 2 => { + tcx.sess + .struct_span_err( + attr.span, + "the `#[rustc_must_implement_one_of]` attribute must be \ + used with at least 2 args", + ) + .emit(); + + None + } + Some(items) => items + .into_iter() + .map(|item| item.ident().ok_or(item.span())) + .collect::, _>>() + .map_err(|span| { + tcx.sess + .struct_span_err(span, "must be a name of an associated function") + .emit(); + }) + .ok() + .zip(Some(attr.span)), + // Error is reported by `rustc_attr!` + None => None, + }) + // Check that all arguments of `#[rustc_must_implement_one_of]` reference + // functions in the trait with default implementations + .and_then(|(list, attr_span)| { + let errors = list.iter().filter_map(|ident| { + let item = items.iter().find(|item| item.ident == *ident); + + match item { + Some(item) if matches!(item.kind, hir::AssocItemKind::Fn { .. }) => { + if !tcx.impl_defaultness(item.id.def_id).has_value() { + tcx.sess + .struct_span_err( + item.span, + "This function doesn't have a default implementation", + ) + .span_note(attr_span, "required by this annotation") + .emit(); + + return Some(()); + } + + return None; + } + Some(item) => { + tcx.sess + .struct_span_err(item.span, "Not a function") + .span_note(attr_span, "required by this annotation") + .note( + "All `#[rustc_must_implement_one_of]` arguments \ + must be associated function names", + ) + .emit(); + } + None => { + tcx.sess + .struct_span_err(ident.span, "Function not found in this trait") + .emit(); + } + } + + Some(()) + }); + + (errors.count() == 0).then_some(list) + }) + // Check for duplicates + .and_then(|list| { + let mut set: FxHashMap = FxHashMap::default(); + let mut no_dups = true; + + for ident in &*list { + if let Some(dup) = set.insert(ident.name, ident.span) { + tcx.sess + .struct_span_err(vec![dup, ident.span], "Functions names are duplicated") + .note( + "All `#[rustc_must_implement_one_of]` arguments \ + must be unique", + ) + .emit(); + + no_dups = false; + } + } + + no_dups.then_some(list) + }); + + ty::TraitDef::new( + def_id, + unsafety, + paren_sugar, + is_auto, + is_marker, + skip_array_during_method_dispatch, + spec_kind, + must_implement_one_of, + ) +} + +fn has_late_bound_regions<'tcx>(tcx: TyCtxt<'tcx>, node: Node<'tcx>) -> Option { + struct LateBoundRegionsDetector<'tcx> { + tcx: TyCtxt<'tcx>, + outer_index: ty::DebruijnIndex, + has_late_bound_regions: Option, + } + + impl<'tcx> Visitor<'tcx> for LateBoundRegionsDetector<'tcx> { + fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) { + if self.has_late_bound_regions.is_some() { + return; + } + match ty.kind { + hir::TyKind::BareFn(..) => { + self.outer_index.shift_in(1); + intravisit::walk_ty(self, ty); + self.outer_index.shift_out(1); + } + _ => intravisit::walk_ty(self, ty), + } + } + + fn visit_poly_trait_ref( + &mut self, + tr: &'tcx hir::PolyTraitRef<'tcx>, + m: hir::TraitBoundModifier, + ) { + if self.has_late_bound_regions.is_some() { + return; + } + self.outer_index.shift_in(1); + intravisit::walk_poly_trait_ref(self, tr, m); + self.outer_index.shift_out(1); + } + + fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) { + if self.has_late_bound_regions.is_some() { + return; + } + + match self.tcx.named_region(lt.hir_id) { + Some(rl::Region::Static | rl::Region::EarlyBound(..)) => {} + Some(rl::Region::LateBound(debruijn, _, _)) if debruijn < self.outer_index => {} + Some(rl::Region::LateBound(..) | rl::Region::Free(..)) | None => { + self.has_late_bound_regions = Some(lt.span); + } + } + } + } + + fn has_late_bound_regions<'tcx>( + tcx: TyCtxt<'tcx>, + generics: &'tcx hir::Generics<'tcx>, + decl: &'tcx hir::FnDecl<'tcx>, + ) -> Option { + let mut visitor = LateBoundRegionsDetector { + tcx, + outer_index: ty::INNERMOST, + has_late_bound_regions: None, + }; + for param in generics.params { + if let GenericParamKind::Lifetime { .. } = param.kind { + if tcx.is_late_bound(param.hir_id) { + return Some(param.span); + } + } + } + visitor.visit_fn_decl(decl); + visitor.has_late_bound_regions + } + + match node { + Node::TraitItem(item) => match item.kind { + hir::TraitItemKind::Fn(ref sig, _) => { + has_late_bound_regions(tcx, &item.generics, sig.decl) + } + _ => None, + }, + Node::ImplItem(item) => match item.kind { + hir::ImplItemKind::Fn(ref sig, _) => { + has_late_bound_regions(tcx, &item.generics, sig.decl) + } + _ => None, + }, + Node::ForeignItem(item) => match item.kind { + hir::ForeignItemKind::Fn(fn_decl, _, ref generics) => { + has_late_bound_regions(tcx, generics, fn_decl) + } + _ => None, + }, + Node::Item(item) => match item.kind { + hir::ItemKind::Fn(ref sig, .., ref generics, _) => { + has_late_bound_regions(tcx, generics, sig.decl) + } + _ => None, + }, + _ => None, + } +} + +struct AnonConstInParamTyDetector { + in_param_ty: bool, + found_anon_const_in_param_ty: bool, + ct: HirId, +} + +impl<'v> Visitor<'v> for AnonConstInParamTyDetector { + fn visit_generic_param(&mut self, p: &'v hir::GenericParam<'v>) { + if let GenericParamKind::Const { ty, default: _ } = p.kind { + let prev = self.in_param_ty; + self.in_param_ty = true; + self.visit_ty(ty); + self.in_param_ty = prev; + } + } + + fn visit_anon_const(&mut self, c: &'v hir::AnonConst) { + if self.in_param_ty && self.ct == c.hir_id { + self.found_anon_const_in_param_ty = true; + } else { + intravisit::walk_anon_const(self, c) + } + } +} + +fn generics_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Generics { + use rustc_hir::*; + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + + let node = tcx.hir().get(hir_id); + let parent_def_id = match node { + Node::ImplItem(_) + | Node::TraitItem(_) + | Node::Variant(_) + | Node::Ctor(..) + | Node::Field(_) => { + let parent_id = tcx.hir().get_parent_item(hir_id); + Some(parent_id.to_def_id()) + } + // FIXME(#43408) always enable this once `lazy_normalization` is + // stable enough and does not need a feature gate anymore. + Node::AnonConst(_) => { + let parent_def_id = tcx.hir().get_parent_item(hir_id); + + let mut in_param_ty = false; + for (_parent, node) in tcx.hir().parent_iter(hir_id) { + if let Some(generics) = node.generics() { + let mut visitor = AnonConstInParamTyDetector { + in_param_ty: false, + found_anon_const_in_param_ty: false, + ct: hir_id, + }; + + visitor.visit_generics(generics); + in_param_ty = visitor.found_anon_const_in_param_ty; + break; + } + } + + if in_param_ty { + // We do not allow generic parameters in anon consts if we are inside + // of a const parameter type, e.g. `struct Foo` is not allowed. + None + } else if tcx.lazy_normalization() { + if let Some(param_id) = tcx.hir().opt_const_param_default_param_hir_id(hir_id) { + // If the def_id we are calling generics_of on is an anon ct default i.e: + // + // struct Foo; + // ^^^ ^ ^^^^^^ def id of this anon const + // ^ ^ param_id + // ^ parent_def_id + // + // then we only want to return generics for params to the left of `N`. If we don't do that we + // end up with that const looking like: `ty::ConstKind::Unevaluated(def_id, substs: [N#0])`. + // + // This causes ICEs (#86580) when building the substs for Foo in `fn foo() -> Foo { .. }` as + // we substitute the defaults with the partially built substs when we build the substs. Subst'ing + // the `N#0` on the unevaluated const indexes into the empty substs we're in the process of building. + // + // We fix this by having this function return the parent's generics ourselves and truncating the + // generics to only include non-forward declared params (with the exception of the `Self` ty) + // + // For the above code example that means we want `substs: []` + // For the following struct def we want `substs: [N#0]` when generics_of is called on + // the def id of the `{ N + 1 }` anon const + // struct Foo; + // + // This has some implications for how we get the predicates available to the anon const + // see `explicit_predicates_of` for more information on this + let generics = tcx.generics_of(parent_def_id.to_def_id()); + let param_def = tcx.hir().local_def_id(param_id).to_def_id(); + let param_def_idx = generics.param_def_id_to_index[¶m_def]; + // In the above example this would be .params[..N#0] + let params = generics.params[..param_def_idx as usize].to_owned(); + let param_def_id_to_index = + params.iter().map(|param| (param.def_id, param.index)).collect(); + + return ty::Generics { + // we set the parent of these generics to be our parent's parent so that we + // dont end up with substs: [N, M, N] for the const default on a struct like this: + // struct Foo; + parent: generics.parent, + parent_count: generics.parent_count, + params, + param_def_id_to_index, + has_self: generics.has_self, + has_late_bound_regions: generics.has_late_bound_regions, + }; + } + + // HACK(eddyb) this provides the correct generics when + // `feature(generic_const_expressions)` is enabled, so that const expressions + // used with const generics, e.g. `Foo<{N+1}>`, can work at all. + // + // Note that we do not supply the parent generics when using + // `min_const_generics`. + Some(parent_def_id.to_def_id()) + } else { + let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id)); + match parent_node { + // HACK(eddyb) this provides the correct generics for repeat + // expressions' count (i.e. `N` in `[x; N]`), and explicit + // `enum` discriminants (i.e. `D` in `enum Foo { Bar = D }`), + // as they shouldn't be able to cause query cycle errors. + Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. }) + if constant.hir_id() == hir_id => + { + Some(parent_def_id.to_def_id()) + } + Node::Variant(Variant { disr_expr: Some(ref constant), .. }) + if constant.hir_id == hir_id => + { + Some(parent_def_id.to_def_id()) + } + Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) => { + Some(tcx.typeck_root_def_id(def_id)) + } + // Exclude `GlobalAsm` here which cannot have generics. + Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. }) + if asm.operands.iter().any(|(op, _op_sp)| match op { + hir::InlineAsmOperand::Const { anon_const } + | hir::InlineAsmOperand::SymFn { anon_const } => { + anon_const.hir_id == hir_id + } + _ => false, + }) => + { + Some(parent_def_id.to_def_id()) + } + _ => None, + } + } + } + Node::Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => { + Some(tcx.typeck_root_def_id(def_id)) + } + Node::Item(item) => match item.kind { + ItemKind::OpaqueTy(hir::OpaqueTy { + origin: + hir::OpaqueTyOrigin::FnReturn(fn_def_id) | hir::OpaqueTyOrigin::AsyncFn(fn_def_id), + .. + }) => Some(fn_def_id.to_def_id()), + ItemKind::OpaqueTy(hir::OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => { + let parent_id = tcx.hir().get_parent_item(hir_id); + assert_ne!(parent_id, CRATE_DEF_ID); + debug!("generics_of: parent of opaque ty {:?} is {:?}", def_id, parent_id); + // Opaque types are always nested within another item, and + // inherit the generics of the item. + Some(parent_id.to_def_id()) + } + _ => None, + }, + _ => None, + }; + + let no_generics = hir::Generics::empty(); + let ast_generics = node.generics().unwrap_or(&no_generics); + let (opt_self, allow_defaults) = match node { + Node::Item(item) => { + match item.kind { + ItemKind::Trait(..) | ItemKind::TraitAlias(..) => { + // Add in the self type parameter. + // + // Something of a hack: use the node id for the trait, also as + // the node id for the Self type parameter. + let opt_self = Some(ty::GenericParamDef { + index: 0, + name: kw::SelfUpper, + def_id, + pure_wrt_drop: false, + kind: ty::GenericParamDefKind::Type { + has_default: false, + object_lifetime_default: rl::Set1::Empty, + synthetic: false, + }, + }); + + (opt_self, true) + } + ItemKind::TyAlias(..) + | ItemKind::Enum(..) + | ItemKind::Struct(..) + | ItemKind::OpaqueTy(..) + | ItemKind::Union(..) => (None, true), + _ => (None, false), + } + } + _ => (None, false), + }; + + let has_self = opt_self.is_some(); + let mut parent_has_self = false; + let mut own_start = has_self as u32; + let parent_count = parent_def_id.map_or(0, |def_id| { + let generics = tcx.generics_of(def_id); + assert!(!has_self); + parent_has_self = generics.has_self; + own_start = generics.count() as u32; + generics.parent_count + generics.params.len() + }); + + let mut params: Vec<_> = Vec::with_capacity(ast_generics.params.len() + has_self as usize); + + if let Some(opt_self) = opt_self { + params.push(opt_self); + } + + let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics); + params.extend(early_lifetimes.enumerate().map(|(i, param)| ty::GenericParamDef { + name: param.name.ident().name, + index: own_start + i as u32, + def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(), + pure_wrt_drop: param.pure_wrt_drop, + kind: ty::GenericParamDefKind::Lifetime, + })); + + let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id.owner); + + // Now create the real type and const parameters. + let type_start = own_start - has_self as u32 + params.len() as u32; + let mut i = 0; + + params.extend(ast_generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => None, + GenericParamKind::Type { ref default, synthetic, .. } => { + if !allow_defaults && default.is_some() { + if !tcx.features().default_type_parameter_fallback { + tcx.struct_span_lint_hir( + lint::builtin::INVALID_TYPE_PARAM_DEFAULT, + param.hir_id, + param.span, + |lint| { + lint.build( + "defaults for type parameters are only allowed in \ + `struct`, `enum`, `type`, or `trait` definitions", + ) + .emit(); + }, + ); + } + } + + let kind = ty::GenericParamDefKind::Type { + has_default: default.is_some(), + object_lifetime_default: object_lifetime_defaults + .as_ref() + .map_or(rl::Set1::Empty, |o| o[i]), + synthetic, + }; + + let param_def = ty::GenericParamDef { + index: type_start + i as u32, + name: param.name.ident().name, + def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(), + pure_wrt_drop: param.pure_wrt_drop, + kind, + }; + i += 1; + Some(param_def) + } + GenericParamKind::Const { default, .. } => { + if !allow_defaults && default.is_some() { + tcx.sess.span_err( + param.span, + "defaults for const parameters are only allowed in \ + `struct`, `enum`, `type`, or `trait` definitions", + ); + } + + let param_def = ty::GenericParamDef { + index: type_start + i as u32, + name: param.name.ident().name, + def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(), + pure_wrt_drop: param.pure_wrt_drop, + kind: ty::GenericParamDefKind::Const { has_default: default.is_some() }, + }; + i += 1; + Some(param_def) + } + })); + + // provide junk type parameter defs - the only place that + // cares about anything but the length is instantiation, + // and we don't do that for closures. + if let Node::Expr(&hir::Expr { + kind: hir::ExprKind::Closure(hir::Closure { movability: gen, .. }), + .. + }) = node + { + let dummy_args = if gen.is_some() { + &["", "", "", "", ""][..] + } else { + &["", "", ""][..] + }; + + params.extend(dummy_args.iter().enumerate().map(|(i, &arg)| ty::GenericParamDef { + index: type_start + i as u32, + name: Symbol::intern(arg), + def_id, + pure_wrt_drop: false, + kind: ty::GenericParamDefKind::Type { + has_default: false, + object_lifetime_default: rl::Set1::Empty, + synthetic: false, + }, + })); + } + + // provide junk type parameter defs for const blocks. + if let Node::AnonConst(_) = node { + let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id)); + if let Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) = parent_node { + params.push(ty::GenericParamDef { + index: type_start, + name: Symbol::intern(""), + def_id, + pure_wrt_drop: false, + kind: ty::GenericParamDefKind::Type { + has_default: false, + object_lifetime_default: rl::Set1::Empty, + synthetic: false, + }, + }); + } + } + + let param_def_id_to_index = params.iter().map(|param| (param.def_id, param.index)).collect(); + + ty::Generics { + parent: parent_def_id, + parent_count, + params, + param_def_id_to_index, + has_self: has_self || parent_has_self, + has_late_bound_regions: has_late_bound_regions(tcx, node), + } +} + +fn are_suggestable_generic_args(generic_args: &[hir::GenericArg<'_>]) -> bool { + generic_args.iter().any(|arg| match arg { + hir::GenericArg::Type(ty) => is_suggestable_infer_ty(ty), + hir::GenericArg::Infer(_) => true, + _ => false, + }) +} + +/// Whether `ty` is a type with `_` placeholders that can be inferred. Used in diagnostics only to +/// use inference to provide suggestions for the appropriate type if possible. +fn is_suggestable_infer_ty(ty: &hir::Ty<'_>) -> bool { + debug!(?ty); + use hir::TyKind::*; + match &ty.kind { + Infer => true, + Slice(ty) => is_suggestable_infer_ty(ty), + Array(ty, length) => { + is_suggestable_infer_ty(ty) || matches!(length, hir::ArrayLen::Infer(_, _)) + } + Tup(tys) => tys.iter().any(is_suggestable_infer_ty), + Ptr(mut_ty) | Rptr(_, mut_ty) => is_suggestable_infer_ty(mut_ty.ty), + OpaqueDef(_, generic_args) => are_suggestable_generic_args(generic_args), + Path(hir::QPath::TypeRelative(ty, segment)) => { + is_suggestable_infer_ty(ty) || are_suggestable_generic_args(segment.args().args) + } + Path(hir::QPath::Resolved(ty_opt, hir::Path { segments, .. })) => { + ty_opt.map_or(false, is_suggestable_infer_ty) + || segments.iter().any(|segment| are_suggestable_generic_args(segment.args().args)) + } + _ => false, + } +} + +pub fn get_infer_ret_ty<'hir>(output: &'hir hir::FnRetTy<'hir>) -> Option<&'hir hir::Ty<'hir>> { + if let hir::FnRetTy::Return(ty) = output { + if is_suggestable_infer_ty(ty) { + return Some(&*ty); + } + } + None +} + +fn fn_sig(tcx: TyCtxt<'_>, def_id: DefId) -> ty::PolyFnSig<'_> { + use rustc_hir::Node::*; + use rustc_hir::*; + + let def_id = def_id.expect_local(); + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + + let icx = ItemCtxt::new(tcx, def_id.to_def_id()); + + match tcx.hir().get(hir_id) { + TraitItem(hir::TraitItem { + kind: TraitItemKind::Fn(sig, TraitFn::Provided(_)), + generics, + .. + }) + | Item(hir::Item { kind: ItemKind::Fn(sig, generics, _), .. }) => { + infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx) + } + + ImplItem(hir::ImplItem { kind: ImplItemKind::Fn(sig, _), generics, .. }) => { + // Do not try to inference the return type for a impl method coming from a trait + if let Item(hir::Item { kind: ItemKind::Impl(i), .. }) = + tcx.hir().get(tcx.hir().get_parent_node(hir_id)) + && i.of_trait.is_some() + { + >::ty_of_fn( + &icx, + hir_id, + sig.header.unsafety, + sig.header.abi, + sig.decl, + Some(generics), + None, + ) + } else { + infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx) + } + } + + TraitItem(hir::TraitItem { + kind: TraitItemKind::Fn(FnSig { header, decl, span: _ }, _), + generics, + .. + }) => >::ty_of_fn( + &icx, + hir_id, + header.unsafety, + header.abi, + decl, + Some(generics), + None, + ), + + ForeignItem(&hir::ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => { + let abi = tcx.hir().get_foreign_abi(hir_id); + compute_sig_of_foreign_fn_decl(tcx, def_id.to_def_id(), fn_decl, abi) + } + + Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor_hir_id().is_some() => { + let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id)); + let inputs = + data.fields().iter().map(|f| tcx.type_of(tcx.hir().local_def_id(f.hir_id))); + ty::Binder::dummy(tcx.mk_fn_sig( + inputs, + ty, + false, + hir::Unsafety::Normal, + abi::Abi::Rust, + )) + } + + Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => { + // Closure signatures are not like other function + // signatures and cannot be accessed through `fn_sig`. For + // example, a closure signature excludes the `self` + // argument. In any case they are embedded within the + // closure type as part of the `ClosureSubsts`. + // + // To get the signature of a closure, you should use the + // `sig` method on the `ClosureSubsts`: + // + // substs.as_closure().sig(def_id, tcx) + bug!( + "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`", + ); + } + + x => { + bug!("unexpected sort of node in fn_sig(): {:?}", x); + } + } +} + +fn infer_return_ty_for_fn_sig<'tcx>( + tcx: TyCtxt<'tcx>, + sig: &hir::FnSig<'_>, + generics: &hir::Generics<'_>, + def_id: LocalDefId, + icx: &ItemCtxt<'tcx>, +) -> ty::PolyFnSig<'tcx> { + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + + match get_infer_ret_ty(&sig.decl.output) { + Some(ty) => { + let fn_sig = tcx.typeck(def_id).liberated_fn_sigs()[hir_id]; + // Typeck doesn't expect erased regions to be returned from `type_of`. + let fn_sig = tcx.fold_regions(fn_sig, |r, _| match *r { + ty::ReErased => tcx.lifetimes.re_static, + _ => r, + }); + let fn_sig = ty::Binder::dummy(fn_sig); + + let mut visitor = HirPlaceholderCollector::default(); + visitor.visit_ty(ty); + let mut diag = bad_placeholder(tcx, visitor.0, "return type"); + let ret_ty = fn_sig.skip_binder().output(); + if ret_ty.is_suggestable(tcx, false) { + diag.span_suggestion( + ty.span, + "replace with the correct return type", + ret_ty, + Applicability::MachineApplicable, + ); + } else if matches!(ret_ty.kind(), ty::FnDef(..)) { + let fn_sig = ret_ty.fn_sig(tcx); + if fn_sig + .skip_binder() + .inputs_and_output + .iter() + .all(|t| t.is_suggestable(tcx, false)) + { + diag.span_suggestion( + ty.span, + "replace with the correct return type", + fn_sig, + Applicability::MachineApplicable, + ); + } + } else if ret_ty.is_closure() { + // We're dealing with a closure, so we should suggest using `impl Fn` or trait bounds + // to prevent the user from getting a papercut while trying to use the unique closure + // syntax (e.g. `[closure@src/lib.rs:2:5: 2:9]`). + diag.help("consider using an `Fn`, `FnMut`, or `FnOnce` trait bound"); + diag.note("for more information on `Fn` traits and closure types, see https://doc.rust-lang.org/book/ch13-01-closures.html"); + } + diag.emit(); + + fn_sig + } + None => >::ty_of_fn( + icx, + hir_id, + sig.header.unsafety, + sig.header.abi, + sig.decl, + Some(generics), + None, + ), + } +} + +fn impl_trait_ref(tcx: TyCtxt<'_>, def_id: DefId) -> Option> { + let icx = ItemCtxt::new(tcx, def_id); + match tcx.hir().expect_item(def_id.expect_local()).kind { + hir::ItemKind::Impl(ref impl_) => impl_.of_trait.as_ref().map(|ast_trait_ref| { + let selfty = tcx.type_of(def_id); + >::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty) + }), + _ => bug!(), + } +} + +fn impl_polarity(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ImplPolarity { + let is_rustc_reservation = tcx.has_attr(def_id, sym::rustc_reservation_impl); + let item = tcx.hir().expect_item(def_id.expect_local()); + match &item.kind { + hir::ItemKind::Impl(hir::Impl { + polarity: hir::ImplPolarity::Negative(span), + of_trait, + .. + }) => { + if is_rustc_reservation { + let span = span.to(of_trait.as_ref().map_or(*span, |t| t.path.span)); + tcx.sess.span_err(span, "reservation impls can't be negative"); + } + ty::ImplPolarity::Negative + } + hir::ItemKind::Impl(hir::Impl { + polarity: hir::ImplPolarity::Positive, + of_trait: None, + .. + }) => { + if is_rustc_reservation { + tcx.sess.span_err(item.span, "reservation impls can't be inherent"); + } + ty::ImplPolarity::Positive + } + hir::ItemKind::Impl(hir::Impl { + polarity: hir::ImplPolarity::Positive, + of_trait: Some(_), + .. + }) => { + if is_rustc_reservation { + ty::ImplPolarity::Reservation + } else { + ty::ImplPolarity::Positive + } + } + item => bug!("impl_polarity: {:?} not an impl", item), + } +} + +/// Returns the early-bound lifetimes declared in this generics +/// listing. For anything other than fns/methods, this is just all +/// the lifetimes that are declared. For fns or methods, we have to +/// screen out those that do not appear in any where-clauses etc using +/// `resolve_lifetime::early_bound_lifetimes`. +fn early_bound_lifetimes_from_generics<'a, 'tcx: 'a>( + tcx: TyCtxt<'tcx>, + generics: &'a hir::Generics<'a>, +) -> impl Iterator> + Captures<'tcx> { + generics.params.iter().filter(move |param| match param.kind { + GenericParamKind::Lifetime { .. } => !tcx.is_late_bound(param.hir_id), + _ => false, + }) +} + +/// Returns a list of type predicates for the definition with ID `def_id`, including inferred +/// lifetime constraints. This includes all predicates returned by `explicit_predicates_of`, plus +/// inferred constraints concerning which regions outlive other regions. +fn predicates_defined_on(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> { + debug!("predicates_defined_on({:?})", def_id); + let mut result = tcx.explicit_predicates_of(def_id); + debug!("predicates_defined_on: explicit_predicates_of({:?}) = {:?}", def_id, result,); + let inferred_outlives = tcx.inferred_outlives_of(def_id); + if !inferred_outlives.is_empty() { + debug!( + "predicates_defined_on: inferred_outlives_of({:?}) = {:?}", + def_id, inferred_outlives, + ); + if result.predicates.is_empty() { + result.predicates = inferred_outlives; + } else { + result.predicates = tcx + .arena + .alloc_from_iter(result.predicates.iter().chain(inferred_outlives).copied()); + } + } + + debug!("predicates_defined_on({:?}) = {:?}", def_id, result); + result +} + +/// Returns a list of all type predicates (explicit and implicit) for the definition with +/// ID `def_id`. This includes all predicates returned by `predicates_defined_on`, plus +/// `Self: Trait` predicates for traits. +fn predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> { + let mut result = tcx.predicates_defined_on(def_id); + + if tcx.is_trait(def_id) { + // For traits, add `Self: Trait` predicate. This is + // not part of the predicates that a user writes, but it + // is something that one must prove in order to invoke a + // method or project an associated type. + // + // In the chalk setup, this predicate is not part of the + // "predicates" for a trait item. But it is useful in + // rustc because if you directly (e.g.) invoke a trait + // method like `Trait::method(...)`, you must naturally + // prove that the trait applies to the types that were + // used, and adding the predicate into this list ensures + // that this is done. + // + // We use a DUMMY_SP here as a way to signal trait bounds that come + // from the trait itself that *shouldn't* be shown as the source of + // an obligation and instead be skipped. Otherwise we'd use + // `tcx.def_span(def_id);` + + let constness = if tcx.has_attr(def_id, sym::const_trait) { + ty::BoundConstness::ConstIfConst + } else { + ty::BoundConstness::NotConst + }; + + let span = rustc_span::DUMMY_SP; + result.predicates = + tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(std::iter::once(( + ty::TraitRef::identity(tcx, def_id).with_constness(constness).to_predicate(tcx), + span, + )))); + } + debug!("predicates_of(def_id={:?}) = {:?}", def_id, result); + result +} + +/// Returns a list of user-specified type predicates for the definition with ID `def_id`. +/// N.B., this does not include any implied/inferred constraints. +fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> { + use rustc_hir::*; + + debug!("explicit_predicates_of(def_id={:?})", def_id); + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + let node = tcx.hir().get(hir_id); + + let mut is_trait = None; + let mut is_default_impl_trait = None; + + let icx = ItemCtxt::new(tcx, def_id); + + const NO_GENERICS: &hir::Generics<'_> = hir::Generics::empty(); + + // We use an `IndexSet` to preserves order of insertion. + // Preserving the order of insertion is important here so as not to break UI tests. + let mut predicates: FxIndexSet<(ty::Predicate<'_>, Span)> = FxIndexSet::default(); + + let ast_generics = match node { + Node::TraitItem(item) => item.generics, + + Node::ImplItem(item) => item.generics, + + Node::Item(item) => { + match item.kind { + ItemKind::Impl(ref impl_) => { + if impl_.defaultness.is_default() { + is_default_impl_trait = tcx.impl_trait_ref(def_id).map(ty::Binder::dummy); + } + &impl_.generics + } + ItemKind::Fn(.., ref generics, _) + | ItemKind::TyAlias(_, ref generics) + | ItemKind::Enum(_, ref generics) + | ItemKind::Struct(_, ref generics) + | ItemKind::Union(_, ref generics) => *generics, + + ItemKind::Trait(_, _, ref generics, ..) => { + is_trait = Some(ty::TraitRef::identity(tcx, def_id)); + *generics + } + ItemKind::TraitAlias(ref generics, _) => { + is_trait = Some(ty::TraitRef::identity(tcx, def_id)); + *generics + } + ItemKind::OpaqueTy(OpaqueTy { + origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..), + .. + }) => { + // return-position impl trait + // + // We don't inherit predicates from the parent here: + // If we have, say `fn f<'a, T: 'a>() -> impl Sized {}` + // then the return type is `f::<'static, T>::{{opaque}}`. + // + // If we inherited the predicates of `f` then we would + // require that `T: 'static` to show that the return + // type is well-formed. + // + // The only way to have something with this opaque type + // is from the return type of the containing function, + // which will ensure that the function's predicates + // hold. + return ty::GenericPredicates { parent: None, predicates: &[] }; + } + ItemKind::OpaqueTy(OpaqueTy { + ref generics, + origin: hir::OpaqueTyOrigin::TyAlias, + .. + }) => { + // type-alias impl trait + generics + } + + _ => NO_GENERICS, + } + } + + Node::ForeignItem(item) => match item.kind { + ForeignItemKind::Static(..) => NO_GENERICS, + ForeignItemKind::Fn(_, _, ref generics) => *generics, + ForeignItemKind::Type => NO_GENERICS, + }, + + _ => NO_GENERICS, + }; + + let generics = tcx.generics_of(def_id); + let parent_count = generics.parent_count as u32; + let has_own_self = generics.has_self && parent_count == 0; + + // Below we'll consider the bounds on the type parameters (including `Self`) + // and the explicit where-clauses, but to get the full set of predicates + // on a trait we need to add in the supertrait bounds and bounds found on + // associated types. + if let Some(_trait_ref) = is_trait { + predicates.extend(tcx.super_predicates_of(def_id).predicates.iter().cloned()); + } + + // In default impls, we can assume that the self type implements + // the trait. So in: + // + // default impl Foo for Bar { .. } + // + // we add a default where clause `Foo: Bar`. We do a similar thing for traits + // (see below). Recall that a default impl is not itself an impl, but rather a + // set of defaults that can be incorporated into another impl. + if let Some(trait_ref) = is_default_impl_trait { + predicates.insert((trait_ref.without_const().to_predicate(tcx), tcx.def_span(def_id))); + } + + // Collect the region predicates that were declared inline as + // well. In the case of parameters declared on a fn or method, we + // have to be careful to only iterate over early-bound regions. + let mut index = parent_count + + has_own_self as u32 + + early_bound_lifetimes_from_generics(tcx, ast_generics).count() as u32; + + // Collect the predicates that were written inline by the user on each + // type parameter (e.g., ``). + for param in ast_generics.params { + match param.kind { + // We already dealt with early bound lifetimes above. + GenericParamKind::Lifetime { .. } => (), + GenericParamKind::Type { .. } => { + let name = param.name.ident().name; + let param_ty = ty::ParamTy::new(index, name).to_ty(tcx); + index += 1; + + let mut bounds = Bounds::default(); + // Params are implicitly sized unless a `?Sized` bound is found + >::add_implicitly_sized( + &icx, + &mut bounds, + &[], + Some((param.hir_id, ast_generics.predicates)), + param.span, + ); + predicates.extend(bounds.predicates(tcx, param_ty)); + } + GenericParamKind::Const { .. } => { + // Bounds on const parameters are currently not possible. + index += 1; + } + } + } + + // Add in the bounds that appear in the where-clause. + for predicate in ast_generics.predicates { + match predicate { + hir::WherePredicate::BoundPredicate(bound_pred) => { + let ty = icx.to_ty(bound_pred.bounded_ty); + let bound_vars = icx.tcx.late_bound_vars(bound_pred.bounded_ty.hir_id); + + // Keep the type around in a dummy predicate, in case of no bounds. + // That way, `where Ty:` is not a complete noop (see #53696) and `Ty` + // is still checked for WF. + if bound_pred.bounds.is_empty() { + if let ty::Param(_) = ty.kind() { + // This is a `where T:`, which can be in the HIR from the + // transformation that moves `?Sized` to `T`'s declaration. + // We can skip the predicate because type parameters are + // trivially WF, but also we *should*, to avoid exposing + // users who never wrote `where Type:,` themselves, to + // compiler/tooling bugs from not handling WF predicates. + } else { + let span = bound_pred.bounded_ty.span; + let predicate = ty::Binder::bind_with_vars( + ty::PredicateKind::WellFormed(ty.into()), + bound_vars, + ); + predicates.insert((predicate.to_predicate(tcx), span)); + } + } + + let mut bounds = Bounds::default(); + >::add_bounds( + &icx, + ty, + bound_pred.bounds.iter(), + &mut bounds, + bound_vars, + ); + predicates.extend(bounds.predicates(tcx, ty)); + } + + hir::WherePredicate::RegionPredicate(region_pred) => { + let r1 = >::ast_region_to_region(&icx, ®ion_pred.lifetime, None); + predicates.extend(region_pred.bounds.iter().map(|bound| { + let (r2, span) = match bound { + hir::GenericBound::Outlives(lt) => { + (>::ast_region_to_region(&icx, lt, None), lt.span) + } + _ => bug!(), + }; + let pred = ty::Binder::dummy(ty::PredicateKind::RegionOutlives( + ty::OutlivesPredicate(r1, r2), + )) + .to_predicate(icx.tcx); + + (pred, span) + })) + } + + hir::WherePredicate::EqPredicate(..) => { + // FIXME(#20041) + } + } + } + + if tcx.features().generic_const_exprs { + predicates.extend(const_evaluatable_predicates_of(tcx, def_id.expect_local())); + } + + let mut predicates: Vec<_> = predicates.into_iter().collect(); + + // Subtle: before we store the predicates into the tcx, we + // sort them so that predicates like `T: Foo` come + // before uses of `U`. This avoids false ambiguity errors + // in trait checking. See `setup_constraining_predicates` + // for details. + if let Node::Item(&Item { kind: ItemKind::Impl { .. }, .. }) = node { + let self_ty = tcx.type_of(def_id); + let trait_ref = tcx.impl_trait_ref(def_id); + cgp::setup_constraining_predicates( + tcx, + &mut predicates, + trait_ref, + &mut cgp::parameters_for_impl(self_ty, trait_ref), + ); + } + + let result = ty::GenericPredicates { + parent: generics.parent, + predicates: tcx.arena.alloc_from_iter(predicates), + }; + debug!("explicit_predicates_of(def_id={:?}) = {:?}", def_id, result); + result +} + +fn const_evaluatable_predicates_of<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: LocalDefId, +) -> FxIndexSet<(ty::Predicate<'tcx>, Span)> { + struct ConstCollector<'tcx> { + tcx: TyCtxt<'tcx>, + preds: FxIndexSet<(ty::Predicate<'tcx>, Span)>, + } + + impl<'tcx> intravisit::Visitor<'tcx> for ConstCollector<'tcx> { + fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) { + let def_id = self.tcx.hir().local_def_id(c.hir_id); + let ct = ty::Const::from_anon_const(self.tcx, def_id); + if let ty::ConstKind::Unevaluated(uv) = ct.kind() { + assert_eq!(uv.promoted, None); + let span = self.tcx.hir().span(c.hir_id); + self.preds.insert(( + ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(uv.shrink())) + .to_predicate(self.tcx), + span, + )); + } + } + + fn visit_const_param_default(&mut self, _param: HirId, _ct: &'tcx hir::AnonConst) { + // Do not look into const param defaults, + // these get checked when they are actually instantiated. + // + // We do not want the following to error: + // + // struct Foo; + // struct Bar(Foo); + } + } + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let node = tcx.hir().get(hir_id); + + let mut collector = ConstCollector { tcx, preds: FxIndexSet::default() }; + if let hir::Node::Item(item) = node && let hir::ItemKind::Impl(ref impl_) = item.kind { + if let Some(of_trait) = &impl_.of_trait { + debug!("const_evaluatable_predicates_of({:?}): visit impl trait_ref", def_id); + collector.visit_trait_ref(of_trait); + } + + debug!("const_evaluatable_predicates_of({:?}): visit_self_ty", def_id); + collector.visit_ty(impl_.self_ty); + } + + if let Some(generics) = node.generics() { + debug!("const_evaluatable_predicates_of({:?}): visit_generics", def_id); + collector.visit_generics(generics); + } + + if let Some(fn_sig) = tcx.hir().fn_sig_by_hir_id(hir_id) { + debug!("const_evaluatable_predicates_of({:?}): visit_fn_decl", def_id); + collector.visit_fn_decl(fn_sig.decl); + } + debug!("const_evaluatable_predicates_of({:?}) = {:?}", def_id, collector.preds); + + collector.preds +} + +fn trait_explicit_predicates_and_bounds( + tcx: TyCtxt<'_>, + def_id: LocalDefId, +) -> ty::GenericPredicates<'_> { + assert_eq!(tcx.def_kind(def_id), DefKind::Trait); + gather_explicit_predicates_of(tcx, def_id.to_def_id()) +} + +fn explicit_predicates_of<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::GenericPredicates<'tcx> { + let def_kind = tcx.def_kind(def_id); + if let DefKind::Trait = def_kind { + // Remove bounds on associated types from the predicates, they will be + // returned by `explicit_item_bounds`. + let predicates_and_bounds = tcx.trait_explicit_predicates_and_bounds(def_id.expect_local()); + let trait_identity_substs = InternalSubsts::identity_for_item(tcx, def_id); + + let is_assoc_item_ty = |ty: Ty<'tcx>| { + // For a predicate from a where clause to become a bound on an + // associated type: + // * It must use the identity substs of the item. + // * Since any generic parameters on the item are not in scope, + // this means that the item is not a GAT, and its identity + // substs are the same as the trait's. + // * It must be an associated type for this trait (*not* a + // supertrait). + if let ty::Projection(projection) = ty.kind() { + projection.substs == trait_identity_substs + && tcx.associated_item(projection.item_def_id).container_id(tcx) == def_id + } else { + false + } + }; + + let predicates: Vec<_> = predicates_and_bounds + .predicates + .iter() + .copied() + .filter(|(pred, _)| match pred.kind().skip_binder() { + ty::PredicateKind::Trait(tr) => !is_assoc_item_ty(tr.self_ty()), + ty::PredicateKind::Projection(proj) => { + !is_assoc_item_ty(proj.projection_ty.self_ty()) + } + ty::PredicateKind::TypeOutlives(outlives) => !is_assoc_item_ty(outlives.0), + _ => true, + }) + .collect(); + if predicates.len() == predicates_and_bounds.predicates.len() { + predicates_and_bounds + } else { + ty::GenericPredicates { + parent: predicates_and_bounds.parent, + predicates: tcx.arena.alloc_slice(&predicates), + } + } + } else { + if matches!(def_kind, DefKind::AnonConst) && tcx.lazy_normalization() { + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + if tcx.hir().opt_const_param_default_param_hir_id(hir_id).is_some() { + // In `generics_of` we set the generics' parent to be our parent's parent which means that + // we lose out on the predicates of our actual parent if we dont return those predicates here. + // (See comment in `generics_of` for more information on why the parent shenanigans is necessary) + // + // struct Foo::ASSOC }>(T) where T: Trait; + // ^^^ ^^^^^^^^^^^^^^^^^^^^^^^ the def id we are calling + // ^^^ explicit_predicates_of on + // parent item we dont have set as the + // parent of generics returned by `generics_of` + // + // In the above code we want the anon const to have predicates in its param env for `T: Trait` + let item_def_id = tcx.hir().get_parent_item(hir_id); + // In the above code example we would be calling `explicit_predicates_of(Foo)` here + return tcx.explicit_predicates_of(item_def_id); + } + } + gather_explicit_predicates_of(tcx, def_id) + } +} + +/// Converts a specific `GenericBound` from the AST into a set of +/// predicates that apply to the self type. A vector is returned +/// because this can be anywhere from zero predicates (`T: ?Sized` adds no +/// predicates) to one (`T: Foo`) to many (`T: Bar` adds `T: Bar` +/// and `::X == i32`). +fn predicates_from_bound<'tcx>( + astconv: &dyn AstConv<'tcx>, + param_ty: Ty<'tcx>, + bound: &'tcx hir::GenericBound<'tcx>, + bound_vars: &'tcx ty::List, +) -> Vec<(ty::Predicate<'tcx>, Span)> { + let mut bounds = Bounds::default(); + astconv.add_bounds(param_ty, [bound].into_iter(), &mut bounds, bound_vars); + bounds.predicates(astconv.tcx(), param_ty).collect() +} + +fn compute_sig_of_foreign_fn_decl<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + decl: &'tcx hir::FnDecl<'tcx>, + abi: abi::Abi, +) -> ty::PolyFnSig<'tcx> { + let unsafety = if abi == abi::Abi::RustIntrinsic { + intrinsic_operation_unsafety(tcx.item_name(def_id)) + } else { + hir::Unsafety::Unsafe + }; + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + let fty = >::ty_of_fn( + &ItemCtxt::new(tcx, def_id), + hir_id, + unsafety, + abi, + decl, + None, + None, + ); + + // Feature gate SIMD types in FFI, since I am not sure that the + // ABIs are handled at all correctly. -huonw + if abi != abi::Abi::RustIntrinsic + && abi != abi::Abi::PlatformIntrinsic + && !tcx.features().simd_ffi + { + let check = |ast_ty: &hir::Ty<'_>, ty: Ty<'_>| { + if ty.is_simd() { + let snip = tcx + .sess + .source_map() + .span_to_snippet(ast_ty.span) + .map_or_else(|_| String::new(), |s| format!(" `{}`", s)); + tcx.sess + .struct_span_err( + ast_ty.span, + &format!( + "use of SIMD type{} in FFI is highly experimental and \ + may result in invalid code", + snip + ), + ) + .help("add `#![feature(simd_ffi)]` to the crate attributes to enable") + .emit(); + } + }; + for (input, ty) in iter::zip(decl.inputs, fty.inputs().skip_binder()) { + check(input, *ty) + } + if let hir::FnRetTy::Return(ref ty) = decl.output { + check(ty, fty.output().skip_binder()) + } + } + + fty +} + +fn is_foreign_item(tcx: TyCtxt<'_>, def_id: DefId) -> bool { + match tcx.hir().get_if_local(def_id) { + Some(Node::ForeignItem(..)) => true, + Some(_) => false, + _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id), + } +} + +fn generator_kind(tcx: TyCtxt<'_>, def_id: DefId) -> Option { + match tcx.hir().get_if_local(def_id) { + Some(Node::Expr(&rustc_hir::Expr { + kind: rustc_hir::ExprKind::Closure(&rustc_hir::Closure { body, .. }), + .. + })) => tcx.hir().body(body).generator_kind(), + Some(_) => None, + _ => bug!("generator_kind applied to non-local def-id {:?}", def_id), + } +} + +fn from_target_feature( + tcx: TyCtxt<'_>, + attr: &ast::Attribute, + supported_target_features: &FxHashMap>, + target_features: &mut Vec, +) { + let Some(list) = attr.meta_item_list() else { return }; + let bad_item = |span| { + let msg = "malformed `target_feature` attribute input"; + let code = "enable = \"..\""; + tcx.sess + .struct_span_err(span, msg) + .span_suggestion(span, "must be of the form", code, Applicability::HasPlaceholders) + .emit(); + }; + let rust_features = tcx.features(); + for item in list { + // Only `enable = ...` is accepted in the meta-item list. + if !item.has_name(sym::enable) { + bad_item(item.span()); + continue; + } + + // Must be of the form `enable = "..."` (a string). + let Some(value) = item.value_str() else { + bad_item(item.span()); + continue; + }; + + // We allow comma separation to enable multiple features. + target_features.extend(value.as_str().split(',').filter_map(|feature| { + let Some(feature_gate) = supported_target_features.get(feature) else { + let msg = + format!("the feature named `{}` is not valid for this target", feature); + let mut err = tcx.sess.struct_span_err(item.span(), &msg); + err.span_label( + item.span(), + format!("`{}` is not valid for this target", feature), + ); + if let Some(stripped) = feature.strip_prefix('+') { + let valid = supported_target_features.contains_key(stripped); + if valid { + err.help("consider removing the leading `+` in the feature name"); + } + } + err.emit(); + return None; + }; + + // Only allow features whose feature gates have been enabled. + let allowed = match feature_gate.as_ref().copied() { + Some(sym::arm_target_feature) => rust_features.arm_target_feature, + Some(sym::hexagon_target_feature) => rust_features.hexagon_target_feature, + Some(sym::powerpc_target_feature) => rust_features.powerpc_target_feature, + Some(sym::mips_target_feature) => rust_features.mips_target_feature, + Some(sym::riscv_target_feature) => rust_features.riscv_target_feature, + Some(sym::avx512_target_feature) => rust_features.avx512_target_feature, + Some(sym::sse4a_target_feature) => rust_features.sse4a_target_feature, + Some(sym::tbm_target_feature) => rust_features.tbm_target_feature, + Some(sym::wasm_target_feature) => rust_features.wasm_target_feature, + Some(sym::cmpxchg16b_target_feature) => rust_features.cmpxchg16b_target_feature, + Some(sym::movbe_target_feature) => rust_features.movbe_target_feature, + Some(sym::rtm_target_feature) => rust_features.rtm_target_feature, + Some(sym::f16c_target_feature) => rust_features.f16c_target_feature, + Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature, + Some(sym::bpf_target_feature) => rust_features.bpf_target_feature, + Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature, + Some(name) => bug!("unknown target feature gate {}", name), + None => true, + }; + if !allowed { + feature_err( + &tcx.sess.parse_sess, + feature_gate.unwrap(), + item.span(), + &format!("the target feature `{}` is currently unstable", feature), + ) + .emit(); + } + Some(Symbol::intern(feature)) + })); + } +} + +fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage { + use rustc_middle::mir::mono::Linkage::*; + + // Use the names from src/llvm/docs/LangRef.rst here. Most types are only + // applicable to variable declarations and may not really make sense for + // Rust code in the first place but allow them anyway and trust that the + // user knows what they're doing. Who knows, unanticipated use cases may pop + // up in the future. + // + // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported + // and don't have to be, LLVM treats them as no-ops. + match name { + "appending" => Appending, + "available_externally" => AvailableExternally, + "common" => Common, + "extern_weak" => ExternalWeak, + "external" => External, + "internal" => Internal, + "linkonce" => LinkOnceAny, + "linkonce_odr" => LinkOnceODR, + "private" => Private, + "weak" => WeakAny, + "weak_odr" => WeakODR, + _ => tcx.sess.span_fatal(tcx.def_span(def_id), "invalid linkage specified"), + } +} + +fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs { + if cfg!(debug_assertions) { + let def_kind = tcx.def_kind(did); + assert!( + def_kind.has_codegen_attrs(), + "unexpected `def_kind` in `codegen_fn_attrs`: {def_kind:?}", + ); + } + + let did = did.expect_local(); + let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(did)); + let mut codegen_fn_attrs = CodegenFnAttrs::new(); + if tcx.should_inherit_track_caller(did) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER; + } + + // The panic_no_unwind function called by TerminatorKind::Abort will never + // unwind. If the panic handler that it invokes unwind then it will simply + // call the panic handler again. + if Some(did.to_def_id()) == tcx.lang_items().panic_no_unwind() { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND; + } + + let supported_target_features = tcx.supported_target_features(LOCAL_CRATE); + + let mut inline_span = None; + let mut link_ordinal_span = None; + let mut no_sanitize_span = None; + for attr in attrs.iter() { + if attr.has_name(sym::cold) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD; + } else if attr.has_name(sym::rustc_allocator) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR; + } else if attr.has_name(sym::ffi_returns_twice) { + if tcx.is_foreign_item(did) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE; + } else { + // `#[ffi_returns_twice]` is only allowed `extern fn`s. + struct_span_err!( + tcx.sess, + attr.span, + E0724, + "`#[ffi_returns_twice]` may only be used on foreign functions" + ) + .emit(); + } + } else if attr.has_name(sym::ffi_pure) { + if tcx.is_foreign_item(did) { + if attrs.iter().any(|a| a.has_name(sym::ffi_const)) { + // `#[ffi_const]` functions cannot be `#[ffi_pure]` + struct_span_err!( + tcx.sess, + attr.span, + E0757, + "`#[ffi_const]` function cannot be `#[ffi_pure]`" + ) + .emit(); + } else { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE; + } + } else { + // `#[ffi_pure]` is only allowed on foreign functions + struct_span_err!( + tcx.sess, + attr.span, + E0755, + "`#[ffi_pure]` may only be used on foreign functions" + ) + .emit(); + } + } else if attr.has_name(sym::ffi_const) { + if tcx.is_foreign_item(did) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST; + } else { + // `#[ffi_const]` is only allowed on foreign functions + struct_span_err!( + tcx.sess, + attr.span, + E0756, + "`#[ffi_const]` may only be used on foreign functions" + ) + .emit(); + } + } else if attr.has_name(sym::rustc_allocator_nounwind) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND; + } else if attr.has_name(sym::rustc_reallocator) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR; + } else if attr.has_name(sym::rustc_deallocator) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR; + } else if attr.has_name(sym::rustc_allocator_zeroed) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED; + } else if attr.has_name(sym::naked) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED; + } else if attr.has_name(sym::no_mangle) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE; + } else if attr.has_name(sym::no_coverage) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE; + } else if attr.has_name(sym::rustc_std_internal_symbol) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL; + } else if attr.has_name(sym::used) { + let inner = attr.meta_item_list(); + match inner.as_deref() { + Some([item]) if item.has_name(sym::linker) => { + if !tcx.features().used_with_arg { + feature_err( + &tcx.sess.parse_sess, + sym::used_with_arg, + attr.span, + "`#[used(linker)]` is currently unstable", + ) + .emit(); + } + codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER; + } + Some([item]) if item.has_name(sym::compiler) => { + if !tcx.features().used_with_arg { + feature_err( + &tcx.sess.parse_sess, + sym::used_with_arg, + attr.span, + "`#[used(compiler)]` is currently unstable", + ) + .emit(); + } + codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED; + } + Some(_) => { + tcx.sess + .struct_span_err( + attr.span, + "expected `used`, `used(compiler)` or `used(linker)`", + ) + .emit(); + } + None => { + // Unfortunately, unconditionally using `llvm.used` causes + // issues in handling `.init_array` with the gold linker, + // but using `llvm.compiler.used` caused a nontrival amount + // of unintentional ecosystem breakage -- particularly on + // Mach-O targets. + // + // As a result, we emit `llvm.compiler.used` only on ELF + // targets. This is somewhat ad-hoc, but actually follows + // our pre-LLVM 13 behavior (prior to the ecosystem + // breakage), and seems to match `clang`'s behavior as well + // (both before and after LLVM 13), possibly because they + // have similar compatibility concerns to us. See + // https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146 + // and following comments for some discussion of this, as + // well as the comments in `rustc_codegen_llvm` where these + // flags are handled. + // + // Anyway, to be clear: this is still up in the air + // somewhat, and is subject to change in the future (which + // is a good thing, because this would ideally be a bit + // more firmed up). + let is_like_elf = !(tcx.sess.target.is_like_osx + || tcx.sess.target.is_like_windows + || tcx.sess.target.is_like_wasm); + codegen_fn_attrs.flags |= if is_like_elf { + CodegenFnAttrFlags::USED + } else { + CodegenFnAttrFlags::USED_LINKER + }; + } + } + } else if attr.has_name(sym::cmse_nonsecure_entry) { + if !matches!(tcx.fn_sig(did).abi(), abi::Abi::C { .. }) { + struct_span_err!( + tcx.sess, + attr.span, + E0776, + "`#[cmse_nonsecure_entry]` requires C ABI" + ) + .emit(); + } + if !tcx.sess.target.llvm_target.contains("thumbv8m") { + struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension") + .emit(); + } + codegen_fn_attrs.flags |= CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY; + } else if attr.has_name(sym::thread_local) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL; + } else if attr.has_name(sym::track_caller) { + if !tcx.is_closure(did.to_def_id()) && tcx.fn_sig(did).abi() != abi::Abi::Rust { + struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI") + .emit(); + } + if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller { + feature_err( + &tcx.sess.parse_sess, + sym::closure_track_caller, + attr.span, + "`#[track_caller]` on closures is currently unstable", + ) + .emit(); + } + codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER; + } else if attr.has_name(sym::export_name) { + if let Some(s) = attr.value_str() { + if s.as_str().contains('\0') { + // `#[export_name = ...]` will be converted to a null-terminated string, + // so it may not contain any null characters. + struct_span_err!( + tcx.sess, + attr.span, + E0648, + "`export_name` may not contain null characters" + ) + .emit(); + } + codegen_fn_attrs.export_name = Some(s); + } + } else if attr.has_name(sym::target_feature) { + if !tcx.is_closure(did.to_def_id()) + && tcx.fn_sig(did).unsafety() == hir::Unsafety::Normal + { + if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc { + // The `#[target_feature]` attribute is allowed on + // WebAssembly targets on all functions, including safe + // ones. Other targets require that `#[target_feature]` is + // only applied to unsafe functions (pending the + // `target_feature_11` feature) because on most targets + // execution of instructions that are not supported is + // considered undefined behavior. For WebAssembly which is a + // 100% safe target at execution time it's not possible to + // execute undefined instructions, and even if a future + // feature was added in some form for this it would be a + // deterministic trap. There is no undefined behavior when + // executing WebAssembly so `#[target_feature]` is allowed + // on safe functions (but again, only for WebAssembly) + // + // Note that this is also allowed if `actually_rustdoc` so + // if a target is documenting some wasm-specific code then + // it's not spuriously denied. + } else if !tcx.features().target_feature_11 { + let mut err = feature_err( + &tcx.sess.parse_sess, + sym::target_feature_11, + attr.span, + "`#[target_feature(..)]` can only be applied to `unsafe` functions", + ); + err.span_label(tcx.def_span(did), "not an `unsafe` function"); + err.emit(); + } else { + check_target_feature_trait_unsafe(tcx, did, attr.span); + } + } + from_target_feature( + tcx, + attr, + supported_target_features, + &mut codegen_fn_attrs.target_features, + ); + } else if attr.has_name(sym::linkage) { + if let Some(val) = attr.value_str() { + codegen_fn_attrs.linkage = Some(linkage_by_name(tcx, did, val.as_str())); + } + } else if attr.has_name(sym::link_section) { + if let Some(val) = attr.value_str() { + if val.as_str().bytes().any(|b| b == 0) { + let msg = format!( + "illegal null byte in link_section \ + value: `{}`", + &val + ); + tcx.sess.span_err(attr.span, &msg); + } else { + codegen_fn_attrs.link_section = Some(val); + } + } + } else if attr.has_name(sym::link_name) { + codegen_fn_attrs.link_name = attr.value_str(); + } else if attr.has_name(sym::link_ordinal) { + link_ordinal_span = Some(attr.span); + if let ordinal @ Some(_) = check_link_ordinal(tcx, attr) { + codegen_fn_attrs.link_ordinal = ordinal; + } + } else if attr.has_name(sym::no_sanitize) { + no_sanitize_span = Some(attr.span); + if let Some(list) = attr.meta_item_list() { + for item in list.iter() { + if item.has_name(sym::address) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::ADDRESS; + } else if item.has_name(sym::cfi) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI; + } else if item.has_name(sym::memory) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY; + } else if item.has_name(sym::memtag) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMTAG; + } else if item.has_name(sym::shadow_call_stack) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::SHADOWCALLSTACK; + } else if item.has_name(sym::thread) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD; + } else if item.has_name(sym::hwaddress) { + codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS; + } else { + tcx.sess + .struct_span_err(item.span(), "invalid argument for `no_sanitize`") + .note("expected one of: `address`, `cfi`, `hwaddress`, `memory`, `memtag`, `shadow-call-stack`, or `thread`") + .emit(); + } + } + } + } else if attr.has_name(sym::instruction_set) { + codegen_fn_attrs.instruction_set = match attr.meta_kind() { + Some(MetaItemKind::List(ref items)) => match items.as_slice() { + [NestedMetaItem::MetaItem(set)] => { + let segments = + set.path.segments.iter().map(|x| x.ident.name).collect::>(); + match segments.as_slice() { + [sym::arm, sym::a32] | [sym::arm, sym::t32] => { + if !tcx.sess.target.has_thumb_interworking { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0779, + "target does not support `#[instruction_set]`" + ) + .emit(); + None + } else if segments[1] == sym::a32 { + Some(InstructionSetAttr::ArmA32) + } else if segments[1] == sym::t32 { + Some(InstructionSetAttr::ArmT32) + } else { + unreachable!() + } + } + _ => { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0779, + "invalid instruction set specified", + ) + .emit(); + None + } + } + } + [] => { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0778, + "`#[instruction_set]` requires an argument" + ) + .emit(); + None + } + _ => { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0779, + "cannot specify more than one instruction set" + ) + .emit(); + None + } + }, + _ => { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0778, + "must specify an instruction set" + ) + .emit(); + None + } + }; + } else if attr.has_name(sym::repr) { + codegen_fn_attrs.alignment = match attr.meta_item_list() { + Some(items) => match items.as_slice() { + [item] => match item.name_value_literal() { + Some((sym::align, literal)) => { + let alignment = rustc_attr::parse_alignment(&literal.kind); + + match alignment { + Ok(align) => Some(align), + Err(msg) => { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0589, + "invalid `repr(align)` attribute: {}", + msg + ) + .emit(); + + None + } + } + } + _ => None, + }, + [] => None, + _ => None, + }, + None => None, + }; + } + } + + codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| { + if !attr.has_name(sym::inline) { + return ia; + } + match attr.meta_kind() { + Some(MetaItemKind::Word) => InlineAttr::Hint, + Some(MetaItemKind::List(ref items)) => { + inline_span = Some(attr.span); + if items.len() != 1 { + struct_span_err!( + tcx.sess.diagnostic(), + attr.span, + E0534, + "expected one argument" + ) + .emit(); + InlineAttr::None + } else if list_contains_name(&items, sym::always) { + InlineAttr::Always + } else if list_contains_name(&items, sym::never) { + InlineAttr::Never + } else { + struct_span_err!( + tcx.sess.diagnostic(), + items[0].span(), + E0535, + "invalid argument" + ) + .emit(); + + InlineAttr::None + } + } + Some(MetaItemKind::NameValue(_)) => ia, + None => ia, + } + }); + + codegen_fn_attrs.optimize = attrs.iter().fold(OptimizeAttr::None, |ia, attr| { + if !attr.has_name(sym::optimize) { + return ia; + } + let err = |sp, s| struct_span_err!(tcx.sess.diagnostic(), sp, E0722, "{}", s).emit(); + match attr.meta_kind() { + Some(MetaItemKind::Word) => { + err(attr.span, "expected one argument"); + ia + } + Some(MetaItemKind::List(ref items)) => { + inline_span = Some(attr.span); + if items.len() != 1 { + err(attr.span, "expected one argument"); + OptimizeAttr::None + } else if list_contains_name(&items, sym::size) { + OptimizeAttr::Size + } else if list_contains_name(&items, sym::speed) { + OptimizeAttr::Speed + } else { + err(items[0].span(), "invalid argument"); + OptimizeAttr::None + } + } + Some(MetaItemKind::NameValue(_)) => ia, + None => ia, + } + }); + + // #73631: closures inherit `#[target_feature]` annotations + if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) { + let owner_id = tcx.parent(did.to_def_id()); + if tcx.def_kind(owner_id).has_codegen_attrs() { + codegen_fn_attrs + .target_features + .extend(tcx.codegen_fn_attrs(owner_id).target_features.iter().copied()); + } + } + + // If a function uses #[target_feature] it can't be inlined into general + // purpose functions as they wouldn't have the right target features + // enabled. For that reason we also forbid #[inline(always)] as it can't be + // respected. + if !codegen_fn_attrs.target_features.is_empty() { + if codegen_fn_attrs.inline == InlineAttr::Always { + if let Some(span) = inline_span { + tcx.sess.span_err( + span, + "cannot use `#[inline(always)]` with \ + `#[target_feature]`", + ); + } + } + } + + if !codegen_fn_attrs.no_sanitize.is_empty() { + if codegen_fn_attrs.inline == InlineAttr::Always { + if let (Some(no_sanitize_span), Some(inline_span)) = (no_sanitize_span, inline_span) { + let hir_id = tcx.hir().local_def_id_to_hir_id(did); + tcx.struct_span_lint_hir( + lint::builtin::INLINE_NO_SANITIZE, + hir_id, + no_sanitize_span, + |lint| { + lint.build("`no_sanitize` will have no effect after inlining") + .span_note(inline_span, "inlining requested here") + .emit(); + }, + ) + } + } + } + + // Weak lang items have the same semantics as "std internal" symbols in the + // sense that they're preserved through all our LTO passes and only + // strippable by the linker. + // + // Additionally weak lang items have predetermined symbol names. + if tcx.is_weak_lang_item(did.to_def_id()) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL; + } + if let Some(name) = weak_lang_items::link_name(attrs) { + codegen_fn_attrs.export_name = Some(name); + codegen_fn_attrs.link_name = Some(name); + } + check_link_name_xor_ordinal(tcx, &codegen_fn_attrs, link_ordinal_span); + + // Internal symbols to the standard library all have no_mangle semantics in + // that they have defined symbol names present in the function name. This + // also applies to weak symbols where they all have known symbol names. + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE; + } + + // Any linkage to LLVM intrinsics for now forcibly marks them all as never + // unwinds since LLVM sometimes can't handle codegen which `invoke`s + // intrinsic functions. + if let Some(name) = &codegen_fn_attrs.link_name { + if name.as_str().starts_with("llvm.") { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND; + } + } + + codegen_fn_attrs +} + +/// Computes the set of target features used in a function for the purposes of +/// inline assembly. +fn asm_target_features<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx FxHashSet { + let mut target_features = tcx.sess.unstable_target_features.clone(); + if tcx.def_kind(did).has_codegen_attrs() { + let attrs = tcx.codegen_fn_attrs(did); + target_features.extend(&attrs.target_features); + match attrs.instruction_set { + None => {} + Some(InstructionSetAttr::ArmA32) => { + target_features.remove(&sym::thumb_mode); + } + Some(InstructionSetAttr::ArmT32) => { + target_features.insert(sym::thumb_mode); + } + } + } + + tcx.arena.alloc(target_features) +} + +/// Checks if the provided DefId is a method in a trait impl for a trait which has track_caller +/// applied to the method prototype. +fn should_inherit_track_caller(tcx: TyCtxt<'_>, def_id: DefId) -> bool { + if let Some(impl_item) = tcx.opt_associated_item(def_id) + && let ty::AssocItemContainer::ImplContainer = impl_item.container + && let Some(trait_item) = impl_item.trait_item_def_id + { + return tcx + .codegen_fn_attrs(trait_item) + .flags + .intersects(CodegenFnAttrFlags::TRACK_CALLER); + } + + false +} + +fn check_link_ordinal(tcx: TyCtxt<'_>, attr: &ast::Attribute) -> Option { + use rustc_ast::{Lit, LitIntType, LitKind}; + let meta_item_list = attr.meta_item_list(); + let meta_item_list: Option<&[ast::NestedMetaItem]> = meta_item_list.as_ref().map(Vec::as_ref); + let sole_meta_list = match meta_item_list { + Some([item]) => item.literal(), + Some(_) => { + tcx.sess + .struct_span_err(attr.span, "incorrect number of arguments to `#[link_ordinal]`") + .note("the attribute requires exactly one argument") + .emit(); + return None; + } + _ => None, + }; + if let Some(Lit { kind: LitKind::Int(ordinal, LitIntType::Unsuffixed), .. }) = sole_meta_list { + // According to the table at https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#import-header, + // the ordinal must fit into 16 bits. Similarly, the Ordinal field in COFFShortExport (defined + // in llvm/include/llvm/Object/COFFImportFile.h), which we use to communicate import information + // to LLVM for `#[link(kind = "raw-dylib"_])`, is also defined to be uint16_t. + // + // FIXME: should we allow an ordinal of 0? The MSVC toolchain has inconsistent support for this: + // both LINK.EXE and LIB.EXE signal errors and abort when given a .DEF file that specifies + // a zero ordinal. However, llvm-dlltool is perfectly happy to generate an import library + // for such a .DEF file, and MSVC's LINK.EXE is also perfectly happy to consume an import + // library produced by LLVM with an ordinal of 0, and it generates an .EXE. (I don't know yet + // if the resulting EXE runs, as I haven't yet built the necessary DLL -- see earlier comment + // about LINK.EXE failing.) + if *ordinal <= u16::MAX as u128 { + Some(*ordinal as u16) + } else { + let msg = format!("ordinal value in `link_ordinal` is too large: `{}`", &ordinal); + tcx.sess + .struct_span_err(attr.span, &msg) + .note("the value may not exceed `u16::MAX`") + .emit(); + None + } + } else { + tcx.sess + .struct_span_err(attr.span, "illegal ordinal format in `link_ordinal`") + .note("an unsuffixed integer value, e.g., `1`, is expected") + .emit(); + None + } +} + +fn check_link_name_xor_ordinal( + tcx: TyCtxt<'_>, + codegen_fn_attrs: &CodegenFnAttrs, + inline_span: Option, +) { + if codegen_fn_attrs.link_name.is_none() || codegen_fn_attrs.link_ordinal.is_none() { + return; + } + let msg = "cannot use `#[link_name]` with `#[link_ordinal]`"; + if let Some(span) = inline_span { + tcx.sess.span_err(span, msg); + } else { + tcx.sess.err(msg); + } +} + +/// Checks the function annotated with `#[target_feature]` is not a safe +/// trait method implementation, reporting an error if it is. +fn check_target_feature_trait_unsafe(tcx: TyCtxt<'_>, id: LocalDefId, attr_span: Span) { + let hir_id = tcx.hir().local_def_id_to_hir_id(id); + let node = tcx.hir().get(hir_id); + if let Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) = node { + let parent_id = tcx.hir().get_parent_item(hir_id); + let parent_item = tcx.hir().expect_item(parent_id); + if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) = parent_item.kind { + tcx.sess + .struct_span_err( + attr_span, + "`#[target_feature(..)]` cannot be applied to safe trait method", + ) + .span_label(attr_span, "cannot be applied to safe trait method") + .span_label(tcx.def_span(id), "not an `unsafe` function") + .emit(); + } + } +} diff --git a/compiler/rustc_typeck/src/collect/item_bounds.rs b/compiler/rustc_typeck/src/collect/item_bounds.rs new file mode 100644 index 000000000..0d2b75d33 --- /dev/null +++ b/compiler/rustc_typeck/src/collect/item_bounds.rs @@ -0,0 +1,102 @@ +use super::ItemCtxt; +use crate::astconv::AstConv; +use rustc_hir as hir; +use rustc_infer::traits::util; +use rustc_middle::ty::subst::InternalSubsts; +use rustc_middle::ty::{self, DefIdTree, TyCtxt}; +use rustc_span::def_id::DefId; +use rustc_span::Span; + +/// For associated types we include both bounds written on the type +/// (`type X: Trait`) and predicates from the trait: `where Self::X: Trait`. +/// +/// Note that this filtering is done with the items identity substs to +/// simplify checking that these bounds are met in impls. This means that +/// a bound such as `for<'b> >::U: Clone` can't be used, as in +/// `hr-associated-type-bound-1.rs`. +fn associated_type_bounds<'tcx>( + tcx: TyCtxt<'tcx>, + assoc_item_def_id: DefId, + ast_bounds: &'tcx [hir::GenericBound<'tcx>], + span: Span, +) -> &'tcx [(ty::Predicate<'tcx>, Span)] { + let item_ty = tcx.mk_projection( + assoc_item_def_id, + InternalSubsts::identity_for_item(tcx, assoc_item_def_id), + ); + + let icx = ItemCtxt::new(tcx, assoc_item_def_id); + let mut bounds = >::compute_bounds(&icx, item_ty, ast_bounds); + // Associated types are implicitly sized unless a `?Sized` bound is found + >::add_implicitly_sized(&icx, &mut bounds, ast_bounds, None, span); + + let trait_def_id = tcx.parent(assoc_item_def_id); + let trait_predicates = tcx.trait_explicit_predicates_and_bounds(trait_def_id.expect_local()); + + let bounds_from_parent = trait_predicates.predicates.iter().copied().filter(|(pred, _)| { + match pred.kind().skip_binder() { + ty::PredicateKind::Trait(tr) => tr.self_ty() == item_ty, + ty::PredicateKind::Projection(proj) => proj.projection_ty.self_ty() == item_ty, + ty::PredicateKind::TypeOutlives(outlives) => outlives.0 == item_ty, + _ => false, + } + }); + + let all_bounds = tcx + .arena + .alloc_from_iter(bounds.predicates(tcx, item_ty).into_iter().chain(bounds_from_parent)); + debug!("associated_type_bounds({}) = {:?}", tcx.def_path_str(assoc_item_def_id), all_bounds); + all_bounds +} + +/// Opaque types don't inherit bounds from their parent: for return position +/// impl trait it isn't possible to write a suitable predicate on the +/// containing function and for type-alias impl trait we don't have a backwards +/// compatibility issue. +fn opaque_type_bounds<'tcx>( + tcx: TyCtxt<'tcx>, + opaque_def_id: DefId, + ast_bounds: &'tcx [hir::GenericBound<'tcx>], + span: Span, +) -> &'tcx [(ty::Predicate<'tcx>, Span)] { + ty::print::with_no_queries!({ + let item_ty = + tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id)); + + let icx = ItemCtxt::new(tcx, opaque_def_id); + let mut bounds = >::compute_bounds(&icx, item_ty, ast_bounds); + // Opaque types are implicitly sized unless a `?Sized` bound is found + >::add_implicitly_sized(&icx, &mut bounds, ast_bounds, None, span); + tcx.arena.alloc_from_iter(bounds.predicates(tcx, item_ty)) + }) +} + +pub(super) fn explicit_item_bounds( + tcx: TyCtxt<'_>, + def_id: DefId, +) -> &'_ [(ty::Predicate<'_>, Span)] { + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + match tcx.hir().get(hir_id) { + hir::Node::TraitItem(hir::TraitItem { + kind: hir::TraitItemKind::Type(bounds, _), + span, + .. + }) => associated_type_bounds(tcx, def_id, bounds, *span), + hir::Node::Item(hir::Item { + kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }), + span, + .. + }) => opaque_type_bounds(tcx, def_id, bounds, *span), + _ => bug!("item_bounds called on {:?}", def_id), + } +} + +pub(super) fn item_bounds(tcx: TyCtxt<'_>, def_id: DefId) -> &'_ ty::List> { + tcx.mk_predicates( + util::elaborate_predicates( + tcx, + tcx.explicit_item_bounds(def_id).iter().map(|&(bound, _span)| bound), + ) + .map(|obligation| obligation.predicate), + ) +} diff --git a/compiler/rustc_typeck/src/collect/type_of.rs b/compiler/rustc_typeck/src/collect/type_of.rs new file mode 100644 index 000000000..534ddfa95 --- /dev/null +++ b/compiler/rustc_typeck/src/collect/type_of.rs @@ -0,0 +1,877 @@ +use rustc_errors::{Applicability, StashKey}; +use rustc_hir as hir; +use rustc_hir::def::Res; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::intravisit; +use rustc_hir::intravisit::Visitor; +use rustc_hir::{HirId, Node}; +use rustc_middle::hir::nested_filter; +use rustc_middle::ty::subst::InternalSubsts; +use rustc_middle::ty::util::IntTypeExt; +use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeVisitable}; +use rustc_span::symbol::Ident; +use rustc_span::{Span, DUMMY_SP}; + +use super::ItemCtxt; +use super::{bad_placeholder, is_suggestable_infer_ty}; +use crate::errors::UnconstrainedOpaqueType; + +/// Computes the relevant generic parameter for a potential generic const argument. +/// +/// This should be called using the query `tcx.opt_const_param_of`. +#[instrument(level = "debug", skip(tcx))] +pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option { + use hir::*; + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + + match tcx.hir().get(hir_id) { + Node::AnonConst(_) => (), + _ => return None, + }; + + let parent_node_id = tcx.hir().get_parent_node(hir_id); + let parent_node = tcx.hir().get(parent_node_id); + + let (generics, arg_idx) = match parent_node { + // This match arm is for when the def_id appears in a GAT whose + // path can't be resolved without typechecking e.g. + // + // trait Foo { + // type Assoc; + // fn foo() -> Self::Assoc<3>; + // } + // + // In the above code we would call this query with the def_id of 3 and + // the parent_node we match on would be the hir node for Self::Assoc<3> + // + // `Self::Assoc<3>` cant be resolved without typechecking here as we + // didnt write ::Assoc<3>. If we did then another match + // arm would handle this. + // + // I believe this match arm is only needed for GAT but I am not 100% sure - BoxyUwU + Node::Ty(hir_ty @ Ty { kind: TyKind::Path(QPath::TypeRelative(_, segment)), .. }) => { + // Find the Item containing the associated type so we can create an ItemCtxt. + // Using the ItemCtxt convert the HIR for the unresolved assoc type into a + // ty which is a fully resolved projection. + // For the code example above, this would mean converting Self::Assoc<3> + // into a ty::Projection(::Assoc<3>) + let item_hir_id = tcx + .hir() + .parent_iter(hir_id) + .filter(|(_, node)| matches!(node, Node::Item(_))) + .map(|(id, _)| id) + .next() + .unwrap(); + let item_did = tcx.hir().local_def_id(item_hir_id).to_def_id(); + let item_ctxt = &ItemCtxt::new(tcx, item_did) as &dyn crate::astconv::AstConv<'_>; + let ty = item_ctxt.ast_ty_to_ty(hir_ty); + + // Iterate through the generics of the projection to find the one that corresponds to + // the def_id that this query was called with. We filter to only const args here as a + // precaution for if it's ever allowed to elide lifetimes in GAT's. It currently isn't + // but it can't hurt to be safe ^^ + if let ty::Projection(projection) = ty.kind() { + let generics = tcx.generics_of(projection.item_def_id); + + let arg_index = segment + .args + .and_then(|args| { + args.args + .iter() + .filter(|arg| arg.is_ty_or_const()) + .position(|arg| arg.id() == hir_id) + }) + .unwrap_or_else(|| { + bug!("no arg matching AnonConst in segment"); + }); + + (generics, arg_index) + } else { + // I dont think it's possible to reach this but I'm not 100% sure - BoxyUwU + tcx.sess.delay_span_bug( + tcx.def_span(def_id), + "unexpected non-GAT usage of an anon const", + ); + return None; + } + } + Node::Expr(&Expr { + kind: + ExprKind::MethodCall(segment, ..) | ExprKind::Path(QPath::TypeRelative(_, segment)), + .. + }) => { + let body_owner = tcx.hir().enclosing_body_owner(hir_id); + let tables = tcx.typeck(body_owner); + // This may fail in case the method/path does not actually exist. + // As there is no relevant param for `def_id`, we simply return + // `None` here. + let type_dependent_def = tables.type_dependent_def_id(parent_node_id)?; + let idx = segment + .args + .and_then(|args| { + args.args + .iter() + .filter(|arg| arg.is_ty_or_const()) + .position(|arg| arg.id() == hir_id) + }) + .unwrap_or_else(|| { + bug!("no arg matching AnonConst in segment"); + }); + + (tcx.generics_of(type_dependent_def), idx) + } + + Node::Ty(&Ty { kind: TyKind::Path(_), .. }) + | Node::Expr(&Expr { kind: ExprKind::Path(_) | ExprKind::Struct(..), .. }) + | Node::TraitRef(..) + | Node::Pat(_) => { + let path = match parent_node { + Node::Ty(&Ty { kind: TyKind::Path(QPath::Resolved(_, path)), .. }) + | Node::TraitRef(&TraitRef { path, .. }) => &*path, + Node::Expr(&Expr { + kind: + ExprKind::Path(QPath::Resolved(_, path)) + | ExprKind::Struct(&QPath::Resolved(_, path), ..), + .. + }) => { + let body_owner = tcx.hir().enclosing_body_owner(hir_id); + let _tables = tcx.typeck(body_owner); + &*path + } + Node::Pat(pat) => { + if let Some(path) = get_path_containing_arg_in_pat(pat, hir_id) { + path + } else { + tcx.sess.delay_span_bug( + tcx.def_span(def_id), + &format!("unable to find const parent for {} in pat {:?}", hir_id, pat), + ); + return None; + } + } + _ => { + tcx.sess.delay_span_bug( + tcx.def_span(def_id), + &format!("unexpected const parent path {:?}", parent_node), + ); + return None; + } + }; + + // We've encountered an `AnonConst` in some path, so we need to + // figure out which generic parameter it corresponds to and return + // the relevant type. + let Some((arg_index, segment)) = path.segments.iter().find_map(|seg| { + let args = seg.args?; + args.args + .iter() + .filter(|arg| arg.is_ty_or_const()) + .position(|arg| arg.id() == hir_id) + .map(|index| (index, seg)).or_else(|| args.bindings + .iter() + .filter_map(TypeBinding::opt_const) + .position(|ct| ct.hir_id == hir_id) + .map(|idx| (idx, seg))) + }) else { + tcx.sess.delay_span_bug( + tcx.def_span(def_id), + "no arg matching AnonConst in path", + ); + return None; + }; + + // Try to use the segment resolution if it is valid, otherwise we + // default to the path resolution. + let res = segment.res.filter(|&r| r != Res::Err).unwrap_or(path.res); + let generics = match tcx.res_generics_def_id(res) { + Some(def_id) => tcx.generics_of(def_id), + None => { + tcx.sess.delay_span_bug( + tcx.def_span(def_id), + &format!("unexpected anon const res {:?} in path: {:?}", res, path), + ); + return None; + } + }; + + (generics, arg_index) + } + _ => return None, + }; + + debug!(?parent_node); + debug!(?generics, ?arg_idx); + generics + .params + .iter() + .filter(|param| param.kind.is_ty_or_const()) + .nth(match generics.has_self && generics.parent.is_none() { + true => arg_idx + 1, + false => arg_idx, + }) + .and_then(|param| match param.kind { + ty::GenericParamDefKind::Const { .. } => { + debug!(?param); + Some(param.def_id) + } + _ => None, + }) +} + +fn get_path_containing_arg_in_pat<'hir>( + pat: &'hir hir::Pat<'hir>, + arg_id: HirId, +) -> Option<&'hir hir::Path<'hir>> { + use hir::*; + + let is_arg_in_path = |p: &hir::Path<'_>| { + p.segments + .iter() + .filter_map(|seg| seg.args) + .flat_map(|args| args.args) + .any(|arg| arg.id() == arg_id) + }; + let mut arg_path = None; + pat.walk(|pat| match pat.kind { + PatKind::Struct(QPath::Resolved(_, path), _, _) + | PatKind::TupleStruct(QPath::Resolved(_, path), _, _) + | PatKind::Path(QPath::Resolved(_, path)) + if is_arg_in_path(path) => + { + arg_path = Some(path); + false + } + _ => true, + }); + arg_path +} + +pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> { + let def_id = def_id.expect_local(); + use rustc_hir::*; + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + + let icx = ItemCtxt::new(tcx, def_id.to_def_id()); + + match tcx.hir().get(hir_id) { + Node::TraitItem(item) => match item.kind { + TraitItemKind::Fn(..) => { + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + tcx.mk_fn_def(def_id.to_def_id(), substs) + } + TraitItemKind::Const(ty, body_id) => body_id + .and_then(|body_id| { + if is_suggestable_infer_ty(ty) { + Some(infer_placeholder_type( + tcx, def_id, body_id, ty.span, item.ident, "constant", + )) + } else { + None + } + }) + .unwrap_or_else(|| icx.to_ty(ty)), + TraitItemKind::Type(_, Some(ty)) => icx.to_ty(ty), + TraitItemKind::Type(_, None) => { + span_bug!(item.span, "associated type missing default"); + } + }, + + Node::ImplItem(item) => match item.kind { + ImplItemKind::Fn(..) => { + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + tcx.mk_fn_def(def_id.to_def_id(), substs) + } + ImplItemKind::Const(ty, body_id) => { + if is_suggestable_infer_ty(ty) { + infer_placeholder_type(tcx, def_id, body_id, ty.span, item.ident, "constant") + } else { + icx.to_ty(ty) + } + } + ImplItemKind::TyAlias(ty) => { + if tcx.impl_trait_ref(tcx.hir().get_parent_item(hir_id)).is_none() { + check_feature_inherent_assoc_ty(tcx, item.span); + } + + icx.to_ty(ty) + } + }, + + Node::Item(item) => { + match item.kind { + ItemKind::Static(ty, .., body_id) => { + if is_suggestable_infer_ty(ty) { + infer_placeholder_type( + tcx, + def_id, + body_id, + ty.span, + item.ident, + "static variable", + ) + } else { + icx.to_ty(ty) + } + } + ItemKind::Const(ty, body_id) => { + if is_suggestable_infer_ty(ty) { + infer_placeholder_type( + tcx, def_id, body_id, ty.span, item.ident, "constant", + ) + } else { + icx.to_ty(ty) + } + } + ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty), + ItemKind::Impl(hir::Impl { self_ty, .. }) => icx.to_ty(*self_ty), + ItemKind::Fn(..) => { + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + tcx.mk_fn_def(def_id.to_def_id(), substs) + } + ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => { + let def = tcx.adt_def(def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + tcx.mk_adt(def, substs) + } + ItemKind::OpaqueTy(OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => { + find_opaque_ty_constraints_for_tait(tcx, def_id) + } + // Opaque types desugared from `impl Trait`. + ItemKind::OpaqueTy(OpaqueTy { origin: hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner), .. }) => { + find_opaque_ty_constraints_for_rpit(tcx, def_id, owner) + } + ItemKind::Trait(..) + | ItemKind::TraitAlias(..) + | ItemKind::Macro(..) + | ItemKind::Mod(..) + | ItemKind::ForeignMod { .. } + | ItemKind::GlobalAsm(..) + | ItemKind::ExternCrate(..) + | ItemKind::Use(..) => { + span_bug!( + item.span, + "compute_type_of_item: unexpected item type: {:?}", + item.kind + ); + } + } + } + + Node::ForeignItem(foreign_item) => match foreign_item.kind { + ForeignItemKind::Fn(..) => { + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + tcx.mk_fn_def(def_id.to_def_id(), substs) + } + ForeignItemKind::Static(t, _) => icx.to_ty(t), + ForeignItemKind::Type => tcx.mk_foreign(def_id.to_def_id()), + }, + + Node::Ctor(&ref def) | Node::Variant(Variant { data: ref def, .. }) => match *def { + VariantData::Unit(..) | VariantData::Struct(..) => { + tcx.type_of(tcx.hir().get_parent_item(hir_id)) + } + VariantData::Tuple(..) => { + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + tcx.mk_fn_def(def_id.to_def_id(), substs) + } + }, + + Node::Field(field) => icx.to_ty(field.ty), + + Node::Expr(&Expr { kind: ExprKind::Closure{..}, .. }) => tcx.typeck(def_id).node_type(hir_id), + + Node::AnonConst(_) if let Some(param) = tcx.opt_const_param_of(def_id) => { + // We defer to `type_of` of the corresponding parameter + // for generic arguments. + tcx.type_of(param) + } + + Node::AnonConst(_) => { + let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id)); + match parent_node { + Node::Ty(&Ty { kind: TyKind::Array(_, ref constant), .. }) + | Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. }) + if constant.hir_id() == hir_id => + { + tcx.types.usize + } + Node::Ty(&Ty { kind: TyKind::Typeof(ref e), .. }) if e.hir_id == hir_id => { + tcx.typeck(def_id).node_type(e.hir_id) + } + + Node::Expr(&Expr { kind: ExprKind::ConstBlock(ref anon_const), .. }) + if anon_const.hir_id == hir_id => + { + let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + substs.as_inline_const().ty() + } + + Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. }) + | Node::Item(&Item { kind: ItemKind::GlobalAsm(asm), .. }) + if asm.operands.iter().any(|(op, _op_sp)| match op { + hir::InlineAsmOperand::Const { anon_const } + | hir::InlineAsmOperand::SymFn { anon_const } => anon_const.hir_id == hir_id, + _ => false, + }) => + { + tcx.typeck(def_id).node_type(hir_id) + } + + Node::Variant(Variant { disr_expr: Some(ref e), .. }) if e.hir_id == hir_id => tcx + .adt_def(tcx.hir().get_parent_item(hir_id)) + .repr() + .discr_type() + .to_ty(tcx), + + Node::TypeBinding(binding @ &TypeBinding { hir_id: binding_id, .. }) + if let Node::TraitRef(trait_ref) = tcx.hir().get( + tcx.hir().get_parent_node(binding_id) + ) => + { + let Some(trait_def_id) = trait_ref.trait_def_id() else { + return tcx.ty_error_with_message(DUMMY_SP, "Could not find trait"); + }; + let assoc_items = tcx.associated_items(trait_def_id); + let assoc_item = assoc_items.find_by_name_and_kind( + tcx, binding.ident, ty::AssocKind::Const, def_id.to_def_id(), + ); + if let Some(assoc_item) = assoc_item { + tcx.type_of(assoc_item.def_id) + } else { + // FIXME(associated_const_equality): add a useful error message here. + tcx.ty_error_with_message( + DUMMY_SP, + "Could not find associated const on trait", + ) + } + } + + Node::GenericParam(&GenericParam { + hir_id: param_hir_id, + kind: GenericParamKind::Const { default: Some(ct), .. }, + .. + }) if ct.hir_id == hir_id => tcx.type_of(tcx.hir().local_def_id(param_hir_id)), + + x => + tcx.ty_error_with_message( + DUMMY_SP, + &format!("unexpected const parent in type_of(): {x:?}"), + ), + } + } + + Node::GenericParam(param) => match ¶m.kind { + GenericParamKind::Type { default: Some(ty), .. } + | GenericParamKind::Const { ty, .. } => icx.to_ty(ty), + x => bug!("unexpected non-type Node::GenericParam: {:?}", x), + }, + + x => { + bug!("unexpected sort of node in type_of(): {:?}", x); + } + } +} + +#[instrument(skip(tcx), level = "debug")] +/// Checks "defining uses" of opaque `impl Trait` types to ensure that they meet the restrictions +/// laid for "higher-order pattern unification". +/// This ensures that inference is tractable. +/// In particular, definitions of opaque types can only use other generics as arguments, +/// and they cannot repeat an argument. Example: +/// +/// ```ignore (illustrative) +/// type Foo = impl Bar; +/// +/// // Okay -- `Foo` is applied to two distinct, generic types. +/// fn a() -> Foo { .. } +/// +/// // Not okay -- `Foo` is applied to `T` twice. +/// fn b() -> Foo { .. } +/// +/// // Not okay -- `Foo` is applied to a non-generic type. +/// fn b() -> Foo { .. } +/// ``` +/// +fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Ty<'_> { + use rustc_hir::{Expr, ImplItem, Item, TraitItem}; + + struct ConstraintLocator<'tcx> { + tcx: TyCtxt<'tcx>, + + /// def_id of the opaque type whose defining uses are being checked + def_id: LocalDefId, + + /// as we walk the defining uses, we are checking that all of them + /// define the same hidden type. This variable is set to `Some` + /// with the first type that we find, and then later types are + /// checked against it (we also carry the span of that first + /// type). + found: Option>, + } + + impl ConstraintLocator<'_> { + #[instrument(skip(self), level = "debug")] + fn check(&mut self, item_def_id: LocalDefId) { + // Don't try to check items that cannot possibly constrain the type. + if !self.tcx.has_typeck_results(item_def_id) { + debug!("no constraint: no typeck results"); + return; + } + // Calling `mir_borrowck` can lead to cycle errors through + // const-checking, avoid calling it if we don't have to. + // ```rust + // type Foo = impl Fn() -> usize; // when computing type for this + // const fn bar() -> Foo { + // || 0usize + // } + // const BAZR: Foo = bar(); // we would mir-borrowck this, causing cycles + // // because we again need to reveal `Foo` so we can check whether the + // // constant does not contain interior mutability. + // ``` + let tables = self.tcx.typeck(item_def_id); + if let Some(_) = tables.tainted_by_errors { + self.found = Some(ty::OpaqueHiddenType { span: DUMMY_SP, ty: self.tcx.ty_error() }); + return; + } + if !tables.concrete_opaque_types.contains_key(&self.def_id) { + debug!("no constraints in typeck results"); + return; + } + // Use borrowck to get the type with unerased regions. + let concrete_opaque_types = &self.tcx.mir_borrowck(item_def_id).concrete_opaque_types; + debug!(?concrete_opaque_types); + if let Some(&concrete_type) = concrete_opaque_types.get(&self.def_id) { + debug!(?concrete_type, "found constraint"); + if let Some(prev) = self.found { + if concrete_type.ty != prev.ty && !(concrete_type, prev).references_error() { + prev.report_mismatch(&concrete_type, self.tcx); + } + } else { + self.found = Some(concrete_type); + } + } + } + } + + impl<'tcx> intravisit::Visitor<'tcx> for ConstraintLocator<'tcx> { + type NestedFilter = nested_filter::All; + + fn nested_visit_map(&mut self) -> Self::Map { + self.tcx.hir() + } + fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) { + if let hir::ExprKind::Closure { .. } = ex.kind { + let def_id = self.tcx.hir().local_def_id(ex.hir_id); + self.check(def_id); + } + intravisit::walk_expr(self, ex); + } + fn visit_item(&mut self, it: &'tcx Item<'tcx>) { + trace!(?it.def_id); + // The opaque type itself or its children are not within its reveal scope. + if it.def_id != self.def_id { + self.check(it.def_id); + intravisit::walk_item(self, it); + } + } + fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) { + trace!(?it.def_id); + // The opaque type itself or its children are not within its reveal scope. + if it.def_id != self.def_id { + self.check(it.def_id); + intravisit::walk_impl_item(self, it); + } + } + fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) { + trace!(?it.def_id); + self.check(it.def_id); + intravisit::walk_trait_item(self, it); + } + } + + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let scope = tcx.hir().get_defining_scope(hir_id); + let mut locator = ConstraintLocator { def_id: def_id, tcx, found: None }; + + debug!(?scope); + + if scope == hir::CRATE_HIR_ID { + tcx.hir().walk_toplevel_module(&mut locator); + } else { + trace!("scope={:#?}", tcx.hir().get(scope)); + match tcx.hir().get(scope) { + // We explicitly call `visit_*` methods, instead of using `intravisit::walk_*` methods + // This allows our visitor to process the defining item itself, causing + // it to pick up any 'sibling' defining uses. + // + // For example, this code: + // ``` + // fn foo() { + // type Blah = impl Debug; + // let my_closure = || -> Blah { true }; + // } + // ``` + // + // requires us to explicitly process `foo()` in order + // to notice the defining usage of `Blah`. + Node::Item(it) => locator.visit_item(it), + Node::ImplItem(it) => locator.visit_impl_item(it), + Node::TraitItem(it) => locator.visit_trait_item(it), + other => bug!("{:?} is not a valid scope for an opaque type item", other), + } + } + + match locator.found { + Some(hidden) => hidden.ty, + None => { + tcx.sess.emit_err(UnconstrainedOpaqueType { + span: tcx.def_span(def_id), + name: tcx.item_name(tcx.local_parent(def_id).to_def_id()), + }); + tcx.ty_error() + } + } +} + +fn find_opaque_ty_constraints_for_rpit( + tcx: TyCtxt<'_>, + def_id: LocalDefId, + owner_def_id: LocalDefId, +) -> Ty<'_> { + use rustc_hir::{Expr, ImplItem, Item, TraitItem}; + + struct ConstraintChecker<'tcx> { + tcx: TyCtxt<'tcx>, + + /// def_id of the opaque type whose defining uses are being checked + def_id: LocalDefId, + + found: ty::OpaqueHiddenType<'tcx>, + } + + impl ConstraintChecker<'_> { + #[instrument(skip(self), level = "debug")] + fn check(&self, def_id: LocalDefId) { + // Use borrowck to get the type with unerased regions. + let concrete_opaque_types = &self.tcx.mir_borrowck(def_id).concrete_opaque_types; + debug!(?concrete_opaque_types); + for &(def_id, concrete_type) in concrete_opaque_types { + if def_id != self.def_id { + // Ignore constraints for other opaque types. + continue; + } + + debug!(?concrete_type, "found constraint"); + + if concrete_type.ty != self.found.ty + && !(concrete_type, self.found).references_error() + { + self.found.report_mismatch(&concrete_type, self.tcx); + } + } + } + } + + impl<'tcx> intravisit::Visitor<'tcx> for ConstraintChecker<'tcx> { + type NestedFilter = nested_filter::OnlyBodies; + + fn nested_visit_map(&mut self) -> Self::Map { + self.tcx.hir() + } + fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) { + if let hir::ExprKind::Closure { .. } = ex.kind { + let def_id = self.tcx.hir().local_def_id(ex.hir_id); + self.check(def_id); + } + intravisit::walk_expr(self, ex); + } + fn visit_item(&mut self, it: &'tcx Item<'tcx>) { + trace!(?it.def_id); + // The opaque type itself or its children are not within its reveal scope. + if it.def_id != self.def_id { + self.check(it.def_id); + intravisit::walk_item(self, it); + } + } + fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) { + trace!(?it.def_id); + // The opaque type itself or its children are not within its reveal scope. + if it.def_id != self.def_id { + self.check(it.def_id); + intravisit::walk_impl_item(self, it); + } + } + fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) { + trace!(?it.def_id); + self.check(it.def_id); + intravisit::walk_trait_item(self, it); + } + } + + let concrete = tcx.mir_borrowck(owner_def_id).concrete_opaque_types.get(&def_id).copied(); + + if let Some(concrete) = concrete { + let scope = tcx.hir().local_def_id_to_hir_id(owner_def_id); + debug!(?scope); + let mut locator = ConstraintChecker { def_id: def_id, tcx, found: concrete }; + + match tcx.hir().get(scope) { + Node::Item(it) => intravisit::walk_item(&mut locator, it), + Node::ImplItem(it) => intravisit::walk_impl_item(&mut locator, it), + Node::TraitItem(it) => intravisit::walk_trait_item(&mut locator, it), + other => bug!("{:?} is not a valid scope for an opaque type item", other), + } + } + + concrete.map(|concrete| concrete.ty).unwrap_or_else(|| { + let table = tcx.typeck(owner_def_id); + if let Some(_) = table.tainted_by_errors { + // Some error in the + // owner fn prevented us from populating + // the `concrete_opaque_types` table. + tcx.ty_error() + } else { + table + .concrete_opaque_types + .get(&def_id) + .copied() + .unwrap_or_else(|| { + // We failed to resolve the opaque type or it + // resolves to itself. We interpret this as the + // no values of the hidden type ever being constructed, + // so we can just make the hidden type be `!`. + // For backwards compatibility reasons, we fall back to + // `()` until we the diverging default is changed. + Some(tcx.mk_diverging_default()) + }) + .expect("RPIT always have a hidden type from typeck") + } + }) +} + +fn infer_placeholder_type<'a>( + tcx: TyCtxt<'a>, + def_id: LocalDefId, + body_id: hir::BodyId, + span: Span, + item_ident: Ident, + kind: &'static str, +) -> Ty<'a> { + // Attempts to make the type nameable by turning FnDefs into FnPtrs. + struct MakeNameable<'tcx> { + success: bool, + tcx: TyCtxt<'tcx>, + } + + impl<'tcx> MakeNameable<'tcx> { + fn new(tcx: TyCtxt<'tcx>) -> Self { + MakeNameable { success: true, tcx } + } + } + + impl<'tcx> TypeFolder<'tcx> for MakeNameable<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + if !self.success { + return ty; + } + + match ty.kind() { + ty::FnDef(def_id, _) => self.tcx.mk_fn_ptr(self.tcx.fn_sig(*def_id)), + // FIXME: non-capturing closures should also suggest a function pointer + ty::Closure(..) | ty::Generator(..) => { + self.success = false; + ty + } + _ => ty.super_fold_with(self), + } + } + } + + let ty = tcx.diagnostic_only_typeck(def_id).node_type(body_id.hir_id); + + // If this came from a free `const` or `static mut?` item, + // then the user may have written e.g. `const A = 42;`. + // In this case, the parser has stashed a diagnostic for + // us to improve in typeck so we do that now. + match tcx.sess.diagnostic().steal_diagnostic(span, StashKey::ItemNoType) { + Some(mut err) => { + if !ty.references_error() { + // The parser provided a sub-optimal `HasPlaceholders` suggestion for the type. + // We are typeck and have the real type, so remove that and suggest the actual type. + // FIXME(eddyb) this looks like it should be functionality on `Diagnostic`. + if let Ok(suggestions) = &mut err.suggestions { + suggestions.clear(); + } + + // Suggesting unnameable types won't help. + let mut mk_nameable = MakeNameable::new(tcx); + let ty = mk_nameable.fold_ty(ty); + let sugg_ty = if mk_nameable.success { Some(ty) } else { None }; + if let Some(sugg_ty) = sugg_ty { + err.span_suggestion( + span, + &format!("provide a type for the {item}", item = kind), + format!("{}: {}", item_ident, sugg_ty), + Applicability::MachineApplicable, + ); + } else { + err.span_note( + tcx.hir().body(body_id).value.span, + &format!("however, the inferred type `{}` cannot be named", ty), + ); + } + } + + err.emit(); + } + None => { + let mut diag = bad_placeholder(tcx, vec![span], kind); + + if !ty.references_error() { + let mut mk_nameable = MakeNameable::new(tcx); + let ty = mk_nameable.fold_ty(ty); + let sugg_ty = if mk_nameable.success { Some(ty) } else { None }; + if let Some(sugg_ty) = sugg_ty { + diag.span_suggestion( + span, + "replace with the correct type", + sugg_ty, + Applicability::MaybeIncorrect, + ); + } else { + diag.span_note( + tcx.hir().body(body_id).value.span, + &format!("however, the inferred type `{}` cannot be named", ty), + ); + } + } + + diag.emit(); + } + } + + // Typeck doesn't expect erased regions to be returned from `type_of`. + tcx.fold_regions(ty, |r, _| match *r { + ty::ReErased => tcx.lifetimes.re_static, + _ => r, + }) +} + +fn check_feature_inherent_assoc_ty(tcx: TyCtxt<'_>, span: Span) { + if !tcx.features().inherent_associated_types { + use rustc_session::parse::feature_err; + use rustc_span::symbol::sym; + feature_err( + &tcx.sess.parse_sess, + sym::inherent_associated_types, + span, + "inherent associated types are unstable", + ) + .emit(); + } +} diff --git a/compiler/rustc_typeck/src/constrained_generic_params.rs b/compiler/rustc_typeck/src/constrained_generic_params.rs new file mode 100644 index 000000000..8428e4664 --- /dev/null +++ b/compiler/rustc_typeck/src/constrained_generic_params.rs @@ -0,0 +1,221 @@ +use rustc_data_structures::fx::FxHashSet; +use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor}; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_span::source_map::Span; +use std::ops::ControlFlow; + +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct Parameter(pub u32); + +impl From for Parameter { + fn from(param: ty::ParamTy) -> Self { + Parameter(param.index) + } +} + +impl From for Parameter { + fn from(param: ty::EarlyBoundRegion) -> Self { + Parameter(param.index) + } +} + +impl From for Parameter { + fn from(param: ty::ParamConst) -> Self { + Parameter(param.index) + } +} + +/// Returns the set of parameters constrained by the impl header. +pub fn parameters_for_impl<'tcx>( + impl_self_ty: Ty<'tcx>, + impl_trait_ref: Option>, +) -> FxHashSet { + let vec = match impl_trait_ref { + Some(tr) => parameters_for(&tr, false), + None => parameters_for(&impl_self_ty, false), + }; + vec.into_iter().collect() +} + +/// If `include_nonconstraining` is false, returns the list of parameters that are +/// constrained by `t` - i.e., the value of each parameter in the list is +/// uniquely determined by `t` (see RFC 447). If it is true, return the list +/// of parameters whose values are needed in order to constrain `ty` - these +/// differ, with the latter being a superset, in the presence of projections. +pub fn parameters_for<'tcx>( + t: &impl TypeVisitable<'tcx>, + include_nonconstraining: bool, +) -> Vec { + let mut collector = ParameterCollector { parameters: vec![], include_nonconstraining }; + t.visit_with(&mut collector); + collector.parameters +} + +struct ParameterCollector { + parameters: Vec, + include_nonconstraining: bool, +} + +impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { + match *t.kind() { + ty::Projection(..) if !self.include_nonconstraining => { + // projections are not injective + return ControlFlow::CONTINUE; + } + ty::Param(data) => { + self.parameters.push(Parameter::from(data)); + } + _ => {} + } + + t.super_visit_with(self) + } + + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { + if let ty::ReEarlyBound(data) = *r { + self.parameters.push(Parameter::from(data)); + } + ControlFlow::CONTINUE + } + + fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow { + match c.kind() { + ty::ConstKind::Unevaluated(..) if !self.include_nonconstraining => { + // Constant expressions are not injective + return c.ty().visit_with(self); + } + ty::ConstKind::Param(data) => { + self.parameters.push(Parameter::from(data)); + } + _ => {} + } + + c.super_visit_with(self) + } +} + +pub fn identify_constrained_generic_params<'tcx>( + tcx: TyCtxt<'tcx>, + predicates: ty::GenericPredicates<'tcx>, + impl_trait_ref: Option>, + input_parameters: &mut FxHashSet, +) { + let mut predicates = predicates.predicates.to_vec(); + setup_constraining_predicates(tcx, &mut predicates, impl_trait_ref, input_parameters); +} + +/// Order the predicates in `predicates` such that each parameter is +/// constrained before it is used, if that is possible, and add the +/// parameters so constrained to `input_parameters`. For example, +/// imagine the following impl: +/// ```ignore (illustrative) +/// impl> Trait for U +/// ``` +/// The impl's predicates are collected from left to right. Ignoring +/// the implicit `Sized` bounds, these are +/// * T: Debug +/// * U: Iterator +/// * ::Item = T -- a desugared ProjectionPredicate +/// +/// When we, for example, try to go over the trait-reference +/// `IntoIter as Trait`, we substitute the impl parameters with fresh +/// variables and match them with the impl trait-ref, so we know that +/// `$U = IntoIter`. +/// +/// However, in order to process the `$T: Debug` predicate, we must first +/// know the value of `$T` - which is only given by processing the +/// projection. As we occasionally want to process predicates in a single +/// pass, we want the projection to come first. In fact, as projections +/// can (acyclically) depend on one another - see RFC447 for details - we +/// need to topologically sort them. +/// +/// We *do* have to be somewhat careful when projection targets contain +/// projections themselves, for example in +/// impl Trait for U where +/// /* 0 */ S: Iterator, +/// /* - */ U: Iterator, +/// /* 1 */ ::Item: ToOwned::Item)> +/// /* 2 */ W: Iterator +/// /* 3 */ V: Debug +/// we have to evaluate the projections in the order I wrote them: +/// `V: Debug` requires `V` to be evaluated. The only projection that +/// *determines* `V` is 2 (1 contains it, but *does not determine it*, +/// as it is only contained within a projection), but that requires `W` +/// which is determined by 1, which requires `U`, that is determined +/// by 0. I should probably pick a less tangled example, but I can't +/// think of any. +pub fn setup_constraining_predicates<'tcx>( + tcx: TyCtxt<'tcx>, + predicates: &mut [(ty::Predicate<'tcx>, Span)], + impl_trait_ref: Option>, + input_parameters: &mut FxHashSet, +) { + // The canonical way of doing the needed topological sort + // would be a DFS, but getting the graph and its ownership + // right is annoying, so I am using an in-place fixed-point iteration, + // which is `O(nt)` where `t` is the depth of type-parameter constraints, + // remembering that `t` should be less than 7 in practice. + // + // Basically, I iterate over all projections and swap every + // "ready" projection to the start of the list, such that + // all of the projections before `i` are topologically sorted + // and constrain all the parameters in `input_parameters`. + // + // In the example, `input_parameters` starts by containing `U` - which + // is constrained by the trait-ref - and so on the first pass we + // observe that `::Item = T` is a "ready" projection that + // constrains `T` and swap it to front. As it is the sole projection, + // no more swaps can take place afterwards, with the result being + // * ::Item = T + // * T: Debug + // * U: Iterator + debug!( + "setup_constraining_predicates: predicates={:?} \ + impl_trait_ref={:?} input_parameters={:?}", + predicates, impl_trait_ref, input_parameters + ); + let mut i = 0; + let mut changed = true; + while changed { + changed = false; + + for j in i..predicates.len() { + // Note that we don't have to care about binders here, + // as the impl trait ref never contains any late-bound regions. + if let ty::PredicateKind::Projection(projection) = predicates[j].0.kind().skip_binder() + { + // Special case: watch out for some kind of sneaky attempt + // to project out an associated type defined by this very + // trait. + let unbound_trait_ref = projection.projection_ty.trait_ref(tcx); + if Some(unbound_trait_ref) == impl_trait_ref { + continue; + } + + // A projection depends on its input types and determines its output + // type. For example, if we have + // `<::Baz as Iterator>::Output = ::Output` + // Then the projection only applies if `T` is known, but it still + // does not determine `U`. + let inputs = parameters_for(&projection.projection_ty, true); + let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(p)); + if !relies_only_on_inputs { + continue; + } + input_parameters.extend(parameters_for(&projection.term, false)); + } else { + continue; + } + // fancy control flow to bypass borrow checker + predicates.swap(i, j); + i += 1; + changed = true; + } + debug!( + "setup_constraining_predicates: predicates={:?} \ + i={} impl_trait_ref={:?} input_parameters={:?}", + predicates, i, impl_trait_ref, input_parameters + ); + } +} diff --git a/compiler/rustc_typeck/src/errors.rs b/compiler/rustc_typeck/src/errors.rs new file mode 100644 index 000000000..0438ac02e --- /dev/null +++ b/compiler/rustc_typeck/src/errors.rs @@ -0,0 +1,326 @@ +//! Errors emitted by typeck. +use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed}; +use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic}; +use rustc_middle::ty::Ty; +use rustc_session::{parse::ParseSess, SessionDiagnostic}; +use rustc_span::{symbol::Ident, Span, Symbol}; + +#[derive(SessionDiagnostic)] +#[error(typeck::field_multiply_specified_in_initializer, code = "E0062")] +pub struct FieldMultiplySpecifiedInInitializer { + #[primary_span] + #[label] + pub span: Span, + #[label(typeck::previous_use_label)] + pub prev_span: Span, + pub ident: Ident, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::unrecognized_atomic_operation, code = "E0092")] +pub struct UnrecognizedAtomicOperation<'a> { + #[primary_span] + #[label] + pub span: Span, + pub op: &'a str, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::wrong_number_of_generic_arguments_to_intrinsic, code = "E0094")] +pub struct WrongNumberOfGenericArgumentsToIntrinsic<'a> { + #[primary_span] + #[label] + pub span: Span, + pub found: usize, + pub expected: usize, + pub descr: &'a str, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::unrecognized_intrinsic_function, code = "E0093")] +pub struct UnrecognizedIntrinsicFunction { + #[primary_span] + #[label] + pub span: Span, + pub name: Symbol, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::lifetimes_or_bounds_mismatch_on_trait, code = "E0195")] +pub struct LifetimesOrBoundsMismatchOnTrait { + #[primary_span] + #[label] + pub span: Span, + #[label(typeck::generics_label)] + pub generics_span: Option, + pub item_kind: &'static str, + pub ident: Ident, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::drop_impl_on_wrong_item, code = "E0120")] +pub struct DropImplOnWrongItem { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::field_already_declared, code = "E0124")] +pub struct FieldAlreadyDeclared { + pub field_name: Ident, + #[primary_span] + #[label] + pub span: Span, + #[label(typeck::previous_decl_label)] + pub prev_span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::copy_impl_on_type_with_dtor, code = "E0184")] +pub struct CopyImplOnTypeWithDtor { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::multiple_relaxed_default_bounds, code = "E0203")] +pub struct MultipleRelaxedDefaultBounds { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::copy_impl_on_non_adt, code = "E0206")] +pub struct CopyImplOnNonAdt { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::trait_object_declared_with_no_traits, code = "E0224")] +pub struct TraitObjectDeclaredWithNoTraits { + #[primary_span] + pub span: Span, + #[label(typeck::alias_span)] + pub trait_alias_span: Option, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::ambiguous_lifetime_bound, code = "E0227")] +pub struct AmbiguousLifetimeBound { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::assoc_type_binding_not_allowed, code = "E0229")] +pub struct AssocTypeBindingNotAllowed { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::functional_record_update_on_non_struct, code = "E0436")] +pub struct FunctionalRecordUpdateOnNonStruct { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::typeof_reserved_keyword_used, code = "E0516")] +pub struct TypeofReservedKeywordUsed<'tcx> { + pub ty: Ty<'tcx>, + #[primary_span] + #[label] + pub span: Span, + #[suggestion_verbose(code = "{ty}")] + pub opt_sugg: Option<(Span, Applicability)>, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::return_stmt_outside_of_fn_body, code = "E0572")] +pub struct ReturnStmtOutsideOfFnBody { + #[primary_span] + pub span: Span, + #[label(typeck::encl_body_label)] + pub encl_body_span: Option, + #[label(typeck::encl_fn_label)] + pub encl_fn_span: Option, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::yield_expr_outside_of_generator, code = "E0627")] +pub struct YieldExprOutsideOfGenerator { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::struct_expr_non_exhaustive, code = "E0639")] +pub struct StructExprNonExhaustive { + #[primary_span] + pub span: Span, + pub what: &'static str, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::method_call_on_unknown_type, code = "E0699")] +pub struct MethodCallOnUnknownType { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::value_of_associated_struct_already_specified, code = "E0719")] +pub struct ValueOfAssociatedStructAlreadySpecified { + #[primary_span] + #[label] + pub span: Span, + #[label(typeck::previous_bound_label)] + pub prev_span: Span, + pub item_name: Ident, + pub def_path: String, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::address_of_temporary_taken, code = "E0745")] +pub struct AddressOfTemporaryTaken { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(SessionSubdiagnostic)] +pub enum AddReturnTypeSuggestion<'tcx> { + #[suggestion( + typeck::add_return_type_add, + code = "-> {found} ", + applicability = "machine-applicable" + )] + Add { + #[primary_span] + span: Span, + found: Ty<'tcx>, + }, + #[suggestion( + typeck::add_return_type_missing_here, + code = "-> _ ", + applicability = "has-placeholders" + )] + MissingHere { + #[primary_span] + span: Span, + }, +} + +#[derive(SessionSubdiagnostic)] +pub enum ExpectedReturnTypeLabel<'tcx> { + #[label(typeck::expected_default_return_type)] + Unit { + #[primary_span] + span: Span, + }, + #[label(typeck::expected_return_type)] + Other { + #[primary_span] + span: Span, + expected: Ty<'tcx>, + }, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::unconstrained_opaque_type)] +#[note] +pub struct UnconstrainedOpaqueType { + #[primary_span] + pub span: Span, + pub name: Symbol, +} + +pub struct MissingTypeParams { + pub span: Span, + pub def_span: Span, + pub missing_type_params: Vec, + pub empty_generic_args: bool, +} + +// Manual implementation of `SessionDiagnostic` to be able to call `span_to_snippet`. +impl<'a> SessionDiagnostic<'a> for MissingTypeParams { + fn into_diagnostic(self, sess: &'a ParseSess) -> DiagnosticBuilder<'a, ErrorGuaranteed> { + let mut err = sess.span_diagnostic.struct_span_err_with_code( + self.span, + rustc_errors::fluent::typeck::missing_type_params, + error_code!(E0393), + ); + err.set_arg("parameterCount", self.missing_type_params.len()); + err.set_arg( + "parameters", + self.missing_type_params + .iter() + .map(|n| format!("`{}`", n)) + .collect::>() + .join(", "), + ); + + err.span_label(self.def_span, rustc_errors::fluent::typeck::label); + + let mut suggested = false; + if let (Ok(snippet), true) = ( + sess.source_map().span_to_snippet(self.span), + // Don't suggest setting the type params if there are some already: the order is + // tricky to get right and the user will already know what the syntax is. + self.empty_generic_args, + ) { + if snippet.ends_with('>') { + // The user wrote `Trait<'a, T>` or similar. To provide an accurate suggestion + // we would have to preserve the right order. For now, as clearly the user is + // aware of the syntax, we do nothing. + } else { + // The user wrote `Iterator`, so we don't have a type we can suggest, but at + // least we can clue them to the correct syntax `Iterator`. + err.span_suggestion( + self.span, + rustc_errors::fluent::typeck::suggestion, + format!( + "{}<{}>", + snippet, + self.missing_type_params + .iter() + .map(|n| n.to_string()) + .collect::>() + .join(", ") + ), + Applicability::HasPlaceholders, + ); + suggested = true; + } + } + if !suggested { + err.span_label(self.span, rustc_errors::fluent::typeck::no_suggestion_label); + } + + err.note(rustc_errors::fluent::typeck::note); + err + } +} + +#[derive(SessionDiagnostic)] +#[error(typeck::manual_implementation, code = "E0183")] +#[help] +pub struct ManualImplementation { + #[primary_span] + #[label] + pub span: Span, + pub trait_name: String, +} + +#[derive(SessionDiagnostic)] +#[error(typeck::substs_on_overridden_impl)] +pub struct SubstsOnOverriddenImpl { + #[primary_span] + pub span: Span, +} diff --git a/compiler/rustc_typeck/src/expr_use_visitor.rs b/compiler/rustc_typeck/src/expr_use_visitor.rs new file mode 100644 index 000000000..74a5b6e42 --- /dev/null +++ b/compiler/rustc_typeck/src/expr_use_visitor.rs @@ -0,0 +1,914 @@ +//! A different sort of visitor for walking fn bodies. Unlike the +//! normal visitor, which just walks the entire body in one shot, the +//! `ExprUseVisitor` determines how expressions are being used. + +use std::slice::from_ref; + +use hir::def::DefKind; +use hir::Expr; +// Export these here so that Clippy can use them. +pub use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection}; + +use rustc_data_structures::fx::FxIndexMap; +use rustc_hir as hir; +use rustc_hir::def::Res; +use rustc_hir::def_id::LocalDefId; +use rustc_hir::PatKind; +use rustc_index::vec::Idx; +use rustc_infer::infer::InferCtxt; +use rustc_middle::hir::place::ProjectionKind; +use rustc_middle::mir::FakeReadCause; +use rustc_middle::ty::{self, adjustment, AdtKind, Ty, TyCtxt}; +use rustc_target::abi::VariantIdx; +use ty::BorrowKind::ImmBorrow; + +use crate::mem_categorization as mc; + +/// This trait defines the callbacks you can expect to receive when +/// employing the ExprUseVisitor. +pub trait Delegate<'tcx> { + /// The value found at `place` is moved, depending + /// on `mode`. Where `diag_expr_id` is the id used for diagnostics for `place`. + /// + /// Use of a `Copy` type in a ByValue context is considered a use + /// by `ImmBorrow` and `borrow` is called instead. This is because + /// a shared borrow is the "minimum access" that would be needed + /// to perform a copy. + /// + /// + /// The parameter `diag_expr_id` indicates the HIR id that ought to be used for + /// diagnostics. Around pattern matching such as `let pat = expr`, the diagnostic + /// id will be the id of the expression `expr` but the place itself will have + /// the id of the binding in the pattern `pat`. + fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId); + + /// The value found at `place` is being borrowed with kind `bk`. + /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details). + fn borrow( + &mut self, + place_with_id: &PlaceWithHirId<'tcx>, + diag_expr_id: hir::HirId, + bk: ty::BorrowKind, + ); + + /// The value found at `place` is being copied. + /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details). + fn copy(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) { + // In most cases, copying data from `x` is equivalent to doing `*&x`, so by default + // we treat a copy of `x` as a borrow of `x`. + self.borrow(place_with_id, diag_expr_id, ty::BorrowKind::ImmBorrow) + } + + /// The path at `assignee_place` is being assigned to. + /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details). + fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId); + + /// The path at `binding_place` is a binding that is being initialized. + /// + /// This covers cases such as `let x = 42;` + fn bind(&mut self, binding_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) { + // Bindings can normally be treated as a regular assignment, so by default we + // forward this to the mutate callback. + self.mutate(binding_place, diag_expr_id) + } + + /// The `place` should be a fake read because of specified `cause`. + fn fake_read( + &mut self, + place_with_id: &PlaceWithHirId<'tcx>, + cause: FakeReadCause, + diag_expr_id: hir::HirId, + ); +} + +#[derive(Copy, Clone, PartialEq, Debug)] +enum ConsumeMode { + /// reference to x where x has a type that copies + Copy, + /// reference to x where x has a type that moves + Move, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum MutateMode { + Init, + /// Example: `x = y` + JustWrite, + /// Example: `x += y` + WriteAndRead, +} + +/// The ExprUseVisitor type +/// +/// This is the code that actually walks the tree. +pub struct ExprUseVisitor<'a, 'tcx> { + mc: mc::MemCategorizationContext<'a, 'tcx>, + body_owner: LocalDefId, + delegate: &'a mut dyn Delegate<'tcx>, +} + +/// If the MC results in an error, it's because the type check +/// failed (or will fail, when the error is uncovered and reported +/// during writeback). In this case, we just ignore this part of the +/// code. +/// +/// Note that this macro appears similar to try!(), but, unlike try!(), +/// it does not propagate the error. +macro_rules! return_if_err { + ($inp: expr) => { + match $inp { + Ok(v) => v, + Err(()) => { + debug!("mc reported err"); + return; + } + } + }; +} + +impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> { + /// Creates the ExprUseVisitor, configuring it with the various options provided: + /// + /// - `delegate` -- who receives the callbacks + /// - `param_env` --- parameter environment for trait lookups (esp. pertaining to `Copy`) + /// - `typeck_results` --- typeck results for the code being analyzed + pub fn new( + delegate: &'a mut (dyn Delegate<'tcx> + 'a), + infcx: &'a InferCtxt<'a, 'tcx>, + body_owner: LocalDefId, + param_env: ty::ParamEnv<'tcx>, + typeck_results: &'a ty::TypeckResults<'tcx>, + ) -> Self { + ExprUseVisitor { + mc: mc::MemCategorizationContext::new(infcx, param_env, body_owner, typeck_results), + body_owner, + delegate, + } + } + + #[instrument(skip(self), level = "debug")] + pub fn consume_body(&mut self, body: &hir::Body<'_>) { + for param in body.params { + let param_ty = return_if_err!(self.mc.pat_ty_adjusted(param.pat)); + debug!("consume_body: param_ty = {:?}", param_ty); + + let param_place = self.mc.cat_rvalue(param.hir_id, param.pat.span, param_ty); + + self.walk_irrefutable_pat(¶m_place, param.pat); + } + + self.consume_expr(&body.value); + } + + fn tcx(&self) -> TyCtxt<'tcx> { + self.mc.tcx() + } + + fn delegate_consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) { + delegate_consume(&self.mc, self.delegate, place_with_id, diag_expr_id) + } + + fn consume_exprs(&mut self, exprs: &[hir::Expr<'_>]) { + for expr in exprs { + self.consume_expr(expr); + } + } + + pub fn consume_expr(&mut self, expr: &hir::Expr<'_>) { + debug!("consume_expr(expr={:?})", expr); + + let place_with_id = return_if_err!(self.mc.cat_expr(expr)); + self.delegate_consume(&place_with_id, place_with_id.hir_id); + self.walk_expr(expr); + } + + fn mutate_expr(&mut self, expr: &hir::Expr<'_>) { + let place_with_id = return_if_err!(self.mc.cat_expr(expr)); + self.delegate.mutate(&place_with_id, place_with_id.hir_id); + self.walk_expr(expr); + } + + fn borrow_expr(&mut self, expr: &hir::Expr<'_>, bk: ty::BorrowKind) { + debug!("borrow_expr(expr={:?}, bk={:?})", expr, bk); + + let place_with_id = return_if_err!(self.mc.cat_expr(expr)); + self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk); + + self.walk_expr(expr) + } + + fn select_from_expr(&mut self, expr: &hir::Expr<'_>) { + self.walk_expr(expr) + } + + pub fn walk_expr(&mut self, expr: &hir::Expr<'_>) { + debug!("walk_expr(expr={:?})", expr); + + self.walk_adjustment(expr); + + match expr.kind { + hir::ExprKind::Path(_) => {} + + hir::ExprKind::Type(subexpr, _) => self.walk_expr(subexpr), + + hir::ExprKind::Unary(hir::UnOp::Deref, base) => { + // *base + self.select_from_expr(base); + } + + hir::ExprKind::Field(base, _) => { + // base.f + self.select_from_expr(base); + } + + hir::ExprKind::Index(lhs, rhs) => { + // lhs[rhs] + self.select_from_expr(lhs); + self.consume_expr(rhs); + } + + hir::ExprKind::Call(callee, args) => { + // callee(args) + self.consume_expr(callee); + self.consume_exprs(args); + } + + hir::ExprKind::MethodCall(.., args, _) => { + // callee.m(args) + self.consume_exprs(args); + } + + hir::ExprKind::Struct(_, fields, ref opt_with) => { + self.walk_struct_expr(fields, opt_with); + } + + hir::ExprKind::Tup(exprs) => { + self.consume_exprs(exprs); + } + + hir::ExprKind::If(ref cond_expr, ref then_expr, ref opt_else_expr) => { + self.consume_expr(cond_expr); + self.consume_expr(then_expr); + if let Some(ref else_expr) = *opt_else_expr { + self.consume_expr(else_expr); + } + } + + hir::ExprKind::Let(hir::Let { pat, init, .. }) => { + self.walk_local(init, pat, None, |t| t.borrow_expr(init, ty::ImmBorrow)) + } + + hir::ExprKind::Match(ref discr, arms, _) => { + let discr_place = return_if_err!(self.mc.cat_expr(discr)); + self.maybe_read_scrutinee( + discr, + discr_place.clone(), + arms.iter().map(|arm| arm.pat), + ); + + // treatment of the discriminant is handled while walking the arms. + for arm in arms { + self.walk_arm(&discr_place, arm); + } + } + + hir::ExprKind::Array(exprs) => { + self.consume_exprs(exprs); + } + + hir::ExprKind::AddrOf(_, m, ref base) => { + // &base + // make sure that the thing we are pointing out stays valid + // for the lifetime `scope_r` of the resulting ptr: + let bk = ty::BorrowKind::from_mutbl(m); + self.borrow_expr(base, bk); + } + + hir::ExprKind::InlineAsm(asm) => { + for (op, _op_sp) in asm.operands { + match op { + hir::InlineAsmOperand::In { expr, .. } => self.consume_expr(expr), + hir::InlineAsmOperand::Out { expr: Some(expr), .. } + | hir::InlineAsmOperand::InOut { expr, .. } => { + self.mutate_expr(expr); + } + hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => { + self.consume_expr(in_expr); + if let Some(out_expr) = out_expr { + self.mutate_expr(out_expr); + } + } + hir::InlineAsmOperand::Out { expr: None, .. } + | hir::InlineAsmOperand::Const { .. } + | hir::InlineAsmOperand::SymFn { .. } + | hir::InlineAsmOperand::SymStatic { .. } => {} + } + } + } + + hir::ExprKind::Continue(..) + | hir::ExprKind::Lit(..) + | hir::ExprKind::ConstBlock(..) + | hir::ExprKind::Err => {} + + hir::ExprKind::Loop(blk, ..) => { + self.walk_block(blk); + } + + hir::ExprKind::Unary(_, lhs) => { + self.consume_expr(lhs); + } + + hir::ExprKind::Binary(_, lhs, rhs) => { + self.consume_expr(lhs); + self.consume_expr(rhs); + } + + hir::ExprKind::Block(blk, _) => { + self.walk_block(blk); + } + + hir::ExprKind::Break(_, ref opt_expr) | hir::ExprKind::Ret(ref opt_expr) => { + if let Some(expr) = *opt_expr { + self.consume_expr(expr); + } + } + + hir::ExprKind::Assign(lhs, rhs, _) => { + self.mutate_expr(lhs); + self.consume_expr(rhs); + } + + hir::ExprKind::Cast(base, _) => { + self.consume_expr(base); + } + + hir::ExprKind::DropTemps(expr) => { + self.consume_expr(expr); + } + + hir::ExprKind::AssignOp(_, lhs, rhs) => { + if self.mc.typeck_results.is_method_call(expr) { + self.consume_expr(lhs); + } else { + self.mutate_expr(lhs); + } + self.consume_expr(rhs); + } + + hir::ExprKind::Repeat(base, _) => { + self.consume_expr(base); + } + + hir::ExprKind::Closure { .. } => { + self.walk_captures(expr); + } + + hir::ExprKind::Box(ref base) => { + self.consume_expr(base); + } + + hir::ExprKind::Yield(value, _) => { + self.consume_expr(value); + } + } + } + + fn walk_stmt(&mut self, stmt: &hir::Stmt<'_>) { + match stmt.kind { + hir::StmtKind::Local(hir::Local { pat, init: Some(expr), els, .. }) => { + self.walk_local(expr, pat, *els, |_| {}) + } + + hir::StmtKind::Local(_) => {} + + hir::StmtKind::Item(_) => { + // We don't visit nested items in this visitor, + // only the fn body we were given. + } + + hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => { + self.consume_expr(expr); + } + } + } + + fn maybe_read_scrutinee<'t>( + &mut self, + discr: &Expr<'_>, + discr_place: PlaceWithHirId<'tcx>, + pats: impl Iterator>, + ) { + // Matching should not always be considered a use of the place, hence + // discr does not necessarily need to be borrowed. + // We only want to borrow discr if the pattern contain something other + // than wildcards. + let ExprUseVisitor { ref mc, body_owner: _, delegate: _ } = *self; + let mut needs_to_be_read = false; + for pat in pats { + return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| { + match &pat.kind { + PatKind::Binding(.., opt_sub_pat) => { + // If the opt_sub_pat is None, than the binding does not count as + // a wildcard for the purpose of borrowing discr. + if opt_sub_pat.is_none() { + needs_to_be_read = true; + } + } + PatKind::Path(qpath) => { + // A `Path` pattern is just a name like `Foo`. This is either a + // named constant or else it refers to an ADT variant + + let res = self.mc.typeck_results.qpath_res(qpath, pat.hir_id); + match res { + Res::Def(DefKind::Const, _) | Res::Def(DefKind::AssocConst, _) => { + // Named constants have to be equated with the value + // being matched, so that's a read of the value being matched. + // + // FIXME: We don't actually reads for ZSTs. + needs_to_be_read = true; + } + _ => { + // Otherwise, this is a struct/enum variant, and so it's + // only a read if we need to read the discriminant. + needs_to_be_read |= is_multivariant_adt(place.place.ty()); + } + } + } + PatKind::TupleStruct(..) | PatKind::Struct(..) | PatKind::Tuple(..) => { + // For `Foo(..)`, `Foo { ... }` and `(...)` patterns, check if we are matching + // against a multivariant enum or struct. In that case, we have to read + // the discriminant. Otherwise this kind of pattern doesn't actually + // read anything (we'll get invoked for the `...`, which may indeed + // perform some reads). + + let place_ty = place.place.ty(); + needs_to_be_read |= is_multivariant_adt(place_ty); + } + PatKind::Lit(_) | PatKind::Range(..) => { + // If the PatKind is a Lit or a Range then we want + // to borrow discr. + needs_to_be_read = true; + } + PatKind::Or(_) + | PatKind::Box(_) + | PatKind::Slice(..) + | PatKind::Ref(..) + | PatKind::Wild => { + // If the PatKind is Or, Box, Slice or Ref, the decision is made later + // as these patterns contains subpatterns + // If the PatKind is Wild, the decision is made based on the other patterns being + // examined + } + } + })); + } + + if needs_to_be_read { + self.borrow_expr(discr, ty::ImmBorrow); + } else { + let closure_def_id = match discr_place.place.base { + PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id), + _ => None, + }; + + self.delegate.fake_read( + &discr_place, + FakeReadCause::ForMatchedPlace(closure_def_id), + discr_place.hir_id, + ); + + // We always want to walk the discriminant. We want to make sure, for instance, + // that the discriminant has been initialized. + self.walk_expr(discr); + } + } + + fn walk_local( + &mut self, + expr: &hir::Expr<'_>, + pat: &hir::Pat<'_>, + els: Option<&hir::Block<'_>>, + mut f: F, + ) where + F: FnMut(&mut Self), + { + self.walk_expr(expr); + let expr_place = return_if_err!(self.mc.cat_expr(expr)); + f(self); + if let Some(els) = els { + // borrowing because we need to test the descriminant + self.maybe_read_scrutinee(expr, expr_place.clone(), from_ref(pat).iter()); + self.walk_block(els) + } + self.walk_irrefutable_pat(&expr_place, &pat); + } + + /// Indicates that the value of `blk` will be consumed, meaning either copied or moved + /// depending on its type. + fn walk_block(&mut self, blk: &hir::Block<'_>) { + debug!("walk_block(blk.hir_id={})", blk.hir_id); + + for stmt in blk.stmts { + self.walk_stmt(stmt); + } + + if let Some(ref tail_expr) = blk.expr { + self.consume_expr(tail_expr); + } + } + + fn walk_struct_expr<'hir>( + &mut self, + fields: &[hir::ExprField<'_>], + opt_with: &Option<&'hir hir::Expr<'_>>, + ) { + // Consume the expressions supplying values for each field. + for field in fields { + self.consume_expr(field.expr); + } + + let with_expr = match *opt_with { + Some(w) => &*w, + None => { + return; + } + }; + + let with_place = return_if_err!(self.mc.cat_expr(with_expr)); + + // Select just those fields of the `with` + // expression that will actually be used + match with_place.place.ty().kind() { + ty::Adt(adt, substs) if adt.is_struct() => { + // Consume those fields of the with expression that are needed. + for (f_index, with_field) in adt.non_enum_variant().fields.iter().enumerate() { + let is_mentioned = fields.iter().any(|f| { + self.tcx().field_index(f.hir_id, self.mc.typeck_results) == f_index + }); + if !is_mentioned { + let field_place = self.mc.cat_projection( + &*with_expr, + with_place.clone(), + with_field.ty(self.tcx(), substs), + ProjectionKind::Field(f_index as u32, VariantIdx::new(0)), + ); + self.delegate_consume(&field_place, field_place.hir_id); + } + } + } + _ => { + // the base expression should always evaluate to a + // struct; however, when EUV is run during typeck, it + // may not. This will generate an error earlier in typeck, + // so we can just ignore it. + if !self.tcx().sess.has_errors().is_some() { + span_bug!(with_expr.span, "with expression doesn't evaluate to a struct"); + } + } + } + + // walk the with expression so that complex expressions + // are properly handled. + self.walk_expr(with_expr); + } + + /// Invoke the appropriate delegate calls for anything that gets + /// consumed or borrowed as part of the automatic adjustment + /// process. + fn walk_adjustment(&mut self, expr: &hir::Expr<'_>) { + let adjustments = self.mc.typeck_results.expr_adjustments(expr); + let mut place_with_id = return_if_err!(self.mc.cat_expr_unadjusted(expr)); + for adjustment in adjustments { + debug!("walk_adjustment expr={:?} adj={:?}", expr, adjustment); + match adjustment.kind { + adjustment::Adjust::NeverToAny | adjustment::Adjust::Pointer(_) => { + // Creating a closure/fn-pointer or unsizing consumes + // the input and stores it into the resulting rvalue. + self.delegate_consume(&place_with_id, place_with_id.hir_id); + } + + adjustment::Adjust::Deref(None) => {} + + // Autoderefs for overloaded Deref calls in fact reference + // their receiver. That is, if we have `(*x)` where `x` + // is of type `Rc`, then this in fact is equivalent to + // `x.deref()`. Since `deref()` is declared with `&self`, + // this is an autoref of `x`. + adjustment::Adjust::Deref(Some(ref deref)) => { + let bk = ty::BorrowKind::from_mutbl(deref.mutbl); + self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk); + } + + adjustment::Adjust::Borrow(ref autoref) => { + self.walk_autoref(expr, &place_with_id, autoref); + } + } + place_with_id = + return_if_err!(self.mc.cat_expr_adjusted(expr, place_with_id, adjustment)); + } + } + + /// Walks the autoref `autoref` applied to the autoderef'd + /// `expr`. `base_place` is the mem-categorized form of `expr` + /// after all relevant autoderefs have occurred. + fn walk_autoref( + &mut self, + expr: &hir::Expr<'_>, + base_place: &PlaceWithHirId<'tcx>, + autoref: &adjustment::AutoBorrow<'tcx>, + ) { + debug!( + "walk_autoref(expr.hir_id={} base_place={:?} autoref={:?})", + expr.hir_id, base_place, autoref + ); + + match *autoref { + adjustment::AutoBorrow::Ref(_, m) => { + self.delegate.borrow( + base_place, + base_place.hir_id, + ty::BorrowKind::from_mutbl(m.into()), + ); + } + + adjustment::AutoBorrow::RawPtr(m) => { + debug!("walk_autoref: expr.hir_id={} base_place={:?}", expr.hir_id, base_place); + + self.delegate.borrow(base_place, base_place.hir_id, ty::BorrowKind::from_mutbl(m)); + } + } + } + + fn walk_arm(&mut self, discr_place: &PlaceWithHirId<'tcx>, arm: &hir::Arm<'_>) { + let closure_def_id = match discr_place.place.base { + PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id), + _ => None, + }; + + self.delegate.fake_read( + discr_place, + FakeReadCause::ForMatchedPlace(closure_def_id), + discr_place.hir_id, + ); + self.walk_pat(discr_place, arm.pat, arm.guard.is_some()); + + if let Some(hir::Guard::If(e)) = arm.guard { + self.consume_expr(e) + } else if let Some(hir::Guard::IfLet(ref l)) = arm.guard { + self.consume_expr(l.init) + } + + self.consume_expr(arm.body); + } + + /// Walks a pat that occurs in isolation (i.e., top-level of fn argument or + /// let binding, and *not* a match arm or nested pat.) + fn walk_irrefutable_pat(&mut self, discr_place: &PlaceWithHirId<'tcx>, pat: &hir::Pat<'_>) { + let closure_def_id = match discr_place.place.base { + PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id), + _ => None, + }; + + self.delegate.fake_read( + discr_place, + FakeReadCause::ForLet(closure_def_id), + discr_place.hir_id, + ); + self.walk_pat(discr_place, pat, false); + } + + /// The core driver for walking a pattern + fn walk_pat( + &mut self, + discr_place: &PlaceWithHirId<'tcx>, + pat: &hir::Pat<'_>, + has_guard: bool, + ) { + debug!("walk_pat(discr_place={:?}, pat={:?}, has_guard={:?})", discr_place, pat, has_guard); + + let tcx = self.tcx(); + let ExprUseVisitor { ref mc, body_owner: _, ref mut delegate } = *self; + return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| { + if let PatKind::Binding(_, canonical_id, ..) = pat.kind { + debug!("walk_pat: binding place={:?} pat={:?}", place, pat); + if let Some(bm) = + mc.typeck_results.extract_binding_mode(tcx.sess, pat.hir_id, pat.span) + { + debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); + + // pat_ty: the type of the binding being produced. + let pat_ty = return_if_err!(mc.node_ty(pat.hir_id)); + debug!("walk_pat: pat_ty={:?}", pat_ty); + + let def = Res::Local(canonical_id); + if let Ok(ref binding_place) = mc.cat_res(pat.hir_id, pat.span, pat_ty, def) { + delegate.bind(binding_place, binding_place.hir_id); + } + + // Subtle: MIR desugaring introduces immutable borrows for each pattern + // binding when lowering pattern guards to ensure that the guard does not + // modify the scrutinee. + if has_guard { + delegate.borrow(place, discr_place.hir_id, ImmBorrow); + } + + // It is also a borrow or copy/move of the value being matched. + // In a cases of pattern like `let pat = upvar`, don't use the span + // of the pattern, as this just looks confusing, instead use the span + // of the discriminant. + match bm { + ty::BindByReference(m) => { + let bk = ty::BorrowKind::from_mutbl(m); + delegate.borrow(place, discr_place.hir_id, bk); + } + ty::BindByValue(..) => { + debug!("walk_pat binding consuming pat"); + delegate_consume(mc, *delegate, place, discr_place.hir_id); + } + } + } + } + })); + } + + /// Handle the case where the current body contains a closure. + /// + /// When the current body being handled is a closure, then we must make sure that + /// - The parent closure only captures Places from the nested closure that are not local to it. + /// + /// In the following example the closures `c` only captures `p.x` even though `incr` + /// is a capture of the nested closure + /// + /// ``` + /// struct P { x: i32 } + /// let mut p = P { x: 4 }; + /// let c = || { + /// let incr = 10; + /// let nested = || p.x += incr; + /// }; + /// ``` + /// + /// - When reporting the Place back to the Delegate, ensure that the UpvarId uses the enclosing + /// closure as the DefId. + fn walk_captures(&mut self, closure_expr: &hir::Expr<'_>) { + fn upvar_is_local_variable<'tcx>( + upvars: Option<&'tcx FxIndexMap>, + upvar_id: hir::HirId, + body_owner_is_closure: bool, + ) -> bool { + upvars.map(|upvars| !upvars.contains_key(&upvar_id)).unwrap_or(body_owner_is_closure) + } + + debug!("walk_captures({:?})", closure_expr); + + let tcx = self.tcx(); + let closure_def_id = tcx.hir().local_def_id(closure_expr.hir_id); + let upvars = tcx.upvars_mentioned(self.body_owner); + + // For purposes of this function, generator and closures are equivalent. + let body_owner_is_closure = + matches!(tcx.hir().body_owner_kind(self.body_owner), hir::BodyOwnerKind::Closure,); + + // If we have a nested closure, we want to include the fake reads present in the nested closure. + if let Some(fake_reads) = self.mc.typeck_results.closure_fake_reads.get(&closure_def_id) { + for (fake_read, cause, hir_id) in fake_reads.iter() { + match fake_read.base { + PlaceBase::Upvar(upvar_id) => { + if upvar_is_local_variable( + upvars, + upvar_id.var_path.hir_id, + body_owner_is_closure, + ) { + // The nested closure might be fake reading the current (enclosing) closure's local variables. + // The only places we want to fake read before creating the parent closure are the ones that + // are not local to it/ defined by it. + // + // ```rust,ignore(cannot-test-this-because-pseudo-code) + // let v1 = (0, 1); + // let c = || { // fake reads: v1 + // let v2 = (0, 1); + // let e = || { // fake reads: v1, v2 + // let (_, t1) = v1; + // let (_, t2) = v2; + // } + // } + // ``` + // This check is performed when visiting the body of the outermost closure (`c`) and ensures + // that we don't add a fake read of v2 in c. + continue; + } + } + _ => { + bug!( + "Do not know how to get HirId out of Rvalue and StaticItem {:?}", + fake_read.base + ); + } + }; + self.delegate.fake_read( + &PlaceWithHirId { place: fake_read.clone(), hir_id: *hir_id }, + *cause, + *hir_id, + ); + } + } + + if let Some(min_captures) = self.mc.typeck_results.closure_min_captures.get(&closure_def_id) + { + for (var_hir_id, min_list) in min_captures.iter() { + if upvars.map_or(body_owner_is_closure, |upvars| !upvars.contains_key(var_hir_id)) { + // The nested closure might be capturing the current (enclosing) closure's local variables. + // We check if the root variable is ever mentioned within the enclosing closure, if not + // then for the current body (if it's a closure) these aren't captures, we will ignore them. + continue; + } + for captured_place in min_list { + let place = &captured_place.place; + let capture_info = captured_place.info; + + let place_base = if body_owner_is_closure { + // Mark the place to be captured by the enclosing closure + PlaceBase::Upvar(ty::UpvarId::new(*var_hir_id, self.body_owner)) + } else { + // If the body owner isn't a closure then the variable must + // be a local variable + PlaceBase::Local(*var_hir_id) + }; + let place_with_id = PlaceWithHirId::new( + capture_info.path_expr_id.unwrap_or( + capture_info.capture_kind_expr_id.unwrap_or(closure_expr.hir_id), + ), + place.base_ty, + place_base, + place.projections.clone(), + ); + + match capture_info.capture_kind { + ty::UpvarCapture::ByValue => { + self.delegate_consume(&place_with_id, place_with_id.hir_id); + } + ty::UpvarCapture::ByRef(upvar_borrow) => { + self.delegate.borrow( + &place_with_id, + place_with_id.hir_id, + upvar_borrow, + ); + } + } + } + } + } + } +} + +fn copy_or_move<'a, 'tcx>( + mc: &mc::MemCategorizationContext<'a, 'tcx>, + place_with_id: &PlaceWithHirId<'tcx>, +) -> ConsumeMode { + if !mc.type_is_copy_modulo_regions( + place_with_id.place.ty(), + mc.tcx().hir().span(place_with_id.hir_id), + ) { + ConsumeMode::Move + } else { + ConsumeMode::Copy + } +} + +// - If a place is used in a `ByValue` context then move it if it's not a `Copy` type. +// - If the place that is a `Copy` type consider it an `ImmBorrow`. +fn delegate_consume<'a, 'tcx>( + mc: &mc::MemCategorizationContext<'a, 'tcx>, + delegate: &mut (dyn Delegate<'tcx> + 'a), + place_with_id: &PlaceWithHirId<'tcx>, + diag_expr_id: hir::HirId, +) { + debug!("delegate_consume(place_with_id={:?})", place_with_id); + + let mode = copy_or_move(mc, place_with_id); + + match mode { + ConsumeMode::Move => delegate.consume(place_with_id, diag_expr_id), + ConsumeMode::Copy => delegate.copy(place_with_id, diag_expr_id), + } +} + +fn is_multivariant_adt(ty: Ty<'_>) -> bool { + if let ty::Adt(def, _) = ty.kind() { + // Note that if a non-exhaustive SingleVariant is defined in another crate, we need + // to assume that more cases will be added to the variant in the future. This mean + // that we should handle non-exhaustive SingleVariant the same way we would handle + // a MultiVariant. + // If the variant is not local it must be defined in another crate. + let is_non_exhaustive = match def.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + def.non_enum_variant().is_field_list_non_exhaustive() + } + AdtKind::Enum => def.is_variant_list_non_exhaustive(), + }; + def.variants().len() > 1 || (!def.did().is_local() && is_non_exhaustive) + } else { + false + } +} diff --git a/compiler/rustc_typeck/src/hir_wf_check.rs b/compiler/rustc_typeck/src/hir_wf_check.rs new file mode 100644 index 000000000..55c7a15f9 --- /dev/null +++ b/compiler/rustc_typeck/src/hir_wf_check.rs @@ -0,0 +1,188 @@ +use crate::collect::ItemCtxt; +use rustc_hir as hir; +use rustc_hir::intravisit::{self, Visitor}; +use rustc_hir::{ForeignItem, ForeignItemKind, HirId}; +use rustc_infer::infer::TyCtxtInferExt; +use rustc_infer::traits::TraitEngine; +use rustc_infer::traits::{ObligationCause, WellFormedLoc}; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::{self, Region, ToPredicate, TyCtxt, TypeFoldable, TypeFolder}; +use rustc_trait_selection::traits::{self, TraitEngineExt}; + +pub fn provide(providers: &mut Providers) { + *providers = Providers { diagnostic_hir_wf_check, ..*providers }; +} + +// Ideally, this would be in `rustc_trait_selection`, but we +// need access to `ItemCtxt` +fn diagnostic_hir_wf_check<'tcx>( + tcx: TyCtxt<'tcx>, + (predicate, loc): (ty::Predicate<'tcx>, WellFormedLoc), +) -> Option> { + let hir = tcx.hir(); + + let def_id = match loc { + WellFormedLoc::Ty(def_id) => def_id, + WellFormedLoc::Param { function, param_idx: _ } => function, + }; + let hir_id = hir.local_def_id_to_hir_id(def_id); + + // HIR wfcheck should only ever happen as part of improving an existing error + tcx.sess + .delay_span_bug(tcx.def_span(def_id), "Performed HIR wfcheck without an existing error!"); + + let icx = ItemCtxt::new(tcx, def_id.to_def_id()); + + // To perform HIR-based WF checking, we iterate over all HIR types + // that occur 'inside' the item we're checking. For example, + // given the type `Option>`, we will check + // `Option>`, `MyStruct`, and `u8`. + // For each type, we perform a well-formed check, and see if we get + // an error that matches our expected predicate. We save + // the `ObligationCause` corresponding to the *innermost* type, + // which is the most specific type that we can point to. + // In general, the different components of an `hir::Ty` may have + // completely different spans due to macro invocations. Pointing + // to the most accurate part of the type can be the difference + // between a useless span (e.g. the macro invocation site) + // and a useful span (e.g. a user-provided type passed into the macro). + // + // This approach is quite inefficient - we redo a lot of work done + // by the normal WF checker. However, this code is run at most once + // per reported error - it will have no impact when compilation succeeds, + // and should only have an impact if a very large number of errors is + // displayed to the user. + struct HirWfCheck<'tcx> { + tcx: TyCtxt<'tcx>, + predicate: ty::Predicate<'tcx>, + cause: Option>, + cause_depth: usize, + icx: ItemCtxt<'tcx>, + hir_id: HirId, + param_env: ty::ParamEnv<'tcx>, + depth: usize, + } + + impl<'tcx> Visitor<'tcx> for HirWfCheck<'tcx> { + fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) { + self.tcx.infer_ctxt().enter(|infcx| { + let mut fulfill = >::new(self.tcx); + let tcx_ty = + self.icx.to_ty(ty).fold_with(&mut EraseAllBoundRegions { tcx: self.tcx }); + let cause = traits::ObligationCause::new( + ty.span, + self.hir_id, + traits::ObligationCauseCode::WellFormed(None), + ); + fulfill.register_predicate_obligation( + &infcx, + traits::Obligation::new( + cause, + self.param_env, + ty::Binder::dummy(ty::PredicateKind::WellFormed(tcx_ty.into())) + .to_predicate(self.tcx), + ), + ); + + let errors = fulfill.select_all_or_error(&infcx); + if !errors.is_empty() { + debug!("Wf-check got errors for {:?}: {:?}", ty, errors); + for error in errors { + if error.obligation.predicate == self.predicate { + // Save the cause from the greatest depth - this corresponds + // to picking more-specific types (e.g. `MyStruct`) + // over less-specific types (e.g. `Option>`) + if self.depth >= self.cause_depth { + self.cause = Some(error.obligation.cause); + self.cause_depth = self.depth + } + } + } + } + }); + self.depth += 1; + intravisit::walk_ty(self, ty); + self.depth -= 1; + } + } + + let mut visitor = HirWfCheck { + tcx, + predicate, + cause: None, + cause_depth: 0, + icx, + hir_id, + param_env: tcx.param_env(def_id.to_def_id()), + depth: 0, + }; + + // Get the starting `hir::Ty` using our `WellFormedLoc`. + // We will walk 'into' this type to try to find + // a more precise span for our predicate. + let ty = match loc { + WellFormedLoc::Ty(_) => match hir.get(hir_id) { + hir::Node::ImplItem(item) => match item.kind { + hir::ImplItemKind::TyAlias(ty) => Some(ty), + hir::ImplItemKind::Const(ty, _) => Some(ty), + ref item => bug!("Unexpected ImplItem {:?}", item), + }, + hir::Node::TraitItem(item) => match item.kind { + hir::TraitItemKind::Type(_, ty) => ty, + hir::TraitItemKind::Const(ty, _) => Some(ty), + ref item => bug!("Unexpected TraitItem {:?}", item), + }, + hir::Node::Item(item) => match item.kind { + hir::ItemKind::Static(ty, _, _) | hir::ItemKind::Const(ty, _) => Some(ty), + hir::ItemKind::Impl(ref impl_) => { + assert!(impl_.of_trait.is_none(), "Unexpected trait impl: {:?}", impl_); + Some(impl_.self_ty) + } + ref item => bug!("Unexpected item {:?}", item), + }, + hir::Node::Field(field) => Some(field.ty), + hir::Node::ForeignItem(ForeignItem { + kind: ForeignItemKind::Static(ty, _), .. + }) => Some(*ty), + ref node => bug!("Unexpected node {:?}", node), + }, + WellFormedLoc::Param { function: _, param_idx } => { + let fn_decl = hir.fn_decl_by_hir_id(hir_id).unwrap(); + // Get return type + if param_idx as usize == fn_decl.inputs.len() { + match fn_decl.output { + hir::FnRetTy::Return(ty) => Some(ty), + // The unit type `()` is always well-formed + hir::FnRetTy::DefaultReturn(_span) => None, + } + } else { + Some(&fn_decl.inputs[param_idx as usize]) + } + } + }; + if let Some(ty) = ty { + visitor.visit_ty(ty); + } + visitor.cause +} + +struct EraseAllBoundRegions<'tcx> { + tcx: TyCtxt<'tcx>, +} + +// Higher ranked regions are complicated. +// To make matters worse, the HIR WF check can instantiate them +// outside of a `Binder`, due to the way we (ab)use +// `ItemCtxt::to_ty`. To make things simpler, we just erase all +// of them, regardless of depth. At worse, this will give +// us an inaccurate span for an error message, but cannot +// lead to unsoundness (we call `delay_span_bug` at the start +// of `diagnostic_hir_wf_check`). +impl<'tcx> TypeFolder<'tcx> for EraseAllBoundRegions<'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'tcx> { + self.tcx + } + fn fold_region(&mut self, r: Region<'tcx>) -> Region<'tcx> { + if r.is_late_bound() { self.tcx.lifetimes.re_erased } else { r } + } +} diff --git a/compiler/rustc_typeck/src/impl_wf_check.rs b/compiler/rustc_typeck/src/impl_wf_check.rs new file mode 100644 index 000000000..9fee1eaae --- /dev/null +++ b/compiler/rustc_typeck/src/impl_wf_check.rs @@ -0,0 +1,228 @@ +//! This pass enforces various "well-formedness constraints" on impls. +//! Logically, it is part of wfcheck -- but we do it early so that we +//! can stop compilation afterwards, since part of the trait matching +//! infrastructure gets very grumpy if these conditions don't hold. In +//! particular, if there are type parameters that are not part of the +//! impl, then coherence will report strange inference ambiguity +//! errors; if impls have duplicate items, we get misleading +//! specialization errors. These things can (and probably should) be +//! fixed, but for the moment it's easier to do these checks early. + +use crate::constrained_generic_params as cgp; +use min_specialization::check_min_specialization; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::struct_span_err; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::LocalDefId; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::{self, TyCtxt, TypeVisitable}; +use rustc_span::{Span, Symbol}; + +use std::collections::hash_map::Entry::{Occupied, Vacant}; + +mod min_specialization; + +/// Checks that all the type/lifetime parameters on an impl also +/// appear in the trait ref or self type (or are constrained by a +/// where-clause). These rules are needed to ensure that, given a +/// trait ref like `>`, we can derive the values of all +/// parameters on the impl (which is needed to make specialization +/// possible). +/// +/// However, in the case of lifetimes, we only enforce these rules if +/// the lifetime parameter is used in an associated type. This is a +/// concession to backwards compatibility; see comment at the end of +/// the fn for details. +/// +/// Example: +/// +/// ```rust,ignore (pseudo-Rust) +/// impl Trait for Bar { ... } +/// // ^ T does not appear in `Foo` or `Bar`, error! +/// +/// impl Trait> for Bar { ... } +/// // ^ T appears in `Foo`, ok. +/// +/// impl Trait for Bar where Bar: Iterator { ... } +/// // ^ T is bound to `::Item`, ok. +/// +/// impl<'a> Trait for Bar { } +/// // ^ 'a is unused, but for back-compat we allow it +/// +/// impl<'a> Trait for Bar { type X = &'a i32; } +/// // ^ 'a is unused and appears in assoc type, error +/// ``` +fn check_mod_impl_wf(tcx: TyCtxt<'_>, module_def_id: LocalDefId) { + let min_specialization = tcx.features().min_specialization; + let module = tcx.hir_module_items(module_def_id); + for id in module.items() { + if matches!(tcx.def_kind(id.def_id), DefKind::Impl) { + enforce_impl_params_are_constrained(tcx, id.def_id); + enforce_impl_items_are_distinct(tcx, id.def_id); + if min_specialization { + check_min_specialization(tcx, id.def_id); + } + } + } +} + +pub fn provide(providers: &mut Providers) { + *providers = Providers { check_mod_impl_wf, ..*providers }; +} + +fn enforce_impl_params_are_constrained(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) { + // Every lifetime used in an associated type must be constrained. + let impl_self_ty = tcx.type_of(impl_def_id); + if impl_self_ty.references_error() { + // Don't complain about unconstrained type params when self ty isn't known due to errors. + // (#36836) + tcx.sess.delay_span_bug( + tcx.def_span(impl_def_id), + &format!( + "potentially unconstrained type parameters weren't evaluated: {:?}", + impl_self_ty, + ), + ); + return; + } + let impl_generics = tcx.generics_of(impl_def_id); + let impl_predicates = tcx.predicates_of(impl_def_id); + let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); + + let mut input_parameters = cgp::parameters_for_impl(impl_self_ty, impl_trait_ref); + cgp::identify_constrained_generic_params( + tcx, + impl_predicates, + impl_trait_ref, + &mut input_parameters, + ); + + // Disallow unconstrained lifetimes, but only if they appear in assoc types. + let lifetimes_in_associated_types: FxHashSet<_> = tcx + .associated_item_def_ids(impl_def_id) + .iter() + .flat_map(|def_id| { + let item = tcx.associated_item(def_id); + match item.kind { + ty::AssocKind::Type => { + if item.defaultness(tcx).has_value() { + cgp::parameters_for(&tcx.type_of(def_id), true) + } else { + Vec::new() + } + } + ty::AssocKind::Fn | ty::AssocKind::Const => Vec::new(), + } + }) + .collect(); + + for param in &impl_generics.params { + match param.kind { + // Disallow ANY unconstrained type parameters. + ty::GenericParamDefKind::Type { .. } => { + let param_ty = ty::ParamTy::for_def(param); + if !input_parameters.contains(&cgp::Parameter::from(param_ty)) { + report_unused_parameter(tcx, tcx.def_span(param.def_id), "type", param_ty.name); + } + } + ty::GenericParamDefKind::Lifetime => { + let param_lt = cgp::Parameter::from(param.to_early_bound_region_data()); + if lifetimes_in_associated_types.contains(¶m_lt) && // (*) + !input_parameters.contains(¶m_lt) + { + report_unused_parameter( + tcx, + tcx.def_span(param.def_id), + "lifetime", + param.name, + ); + } + } + ty::GenericParamDefKind::Const { .. } => { + let param_ct = ty::ParamConst::for_def(param); + if !input_parameters.contains(&cgp::Parameter::from(param_ct)) { + report_unused_parameter( + tcx, + tcx.def_span(param.def_id), + "const", + param_ct.name, + ); + } + } + } + } + + // (*) This is a horrible concession to reality. I think it'd be + // better to just ban unconstrained lifetimes outright, but in + // practice people do non-hygienic macros like: + // + // ``` + // macro_rules! __impl_slice_eq1 { + // ($Lhs: ty, $Rhs: ty, $Bound: ident) => { + // impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq { + // .... + // } + // } + // } + // ``` + // + // In a concession to backwards compatibility, we continue to + // permit those, so long as the lifetimes aren't used in + // associated types. I believe this is sound, because lifetimes + // used elsewhere are not projected back out. +} + +fn report_unused_parameter(tcx: TyCtxt<'_>, span: Span, kind: &str, name: Symbol) { + let mut err = struct_span_err!( + tcx.sess, + span, + E0207, + "the {} parameter `{}` is not constrained by the \ + impl trait, self type, or predicates", + kind, + name + ); + err.span_label(span, format!("unconstrained {} parameter", kind)); + if kind == "const" { + err.note( + "expressions using a const parameter must map each value to a distinct output value", + ); + err.note( + "proving the result of expressions other than the parameter are unique is not supported", + ); + } + err.emit(); +} + +/// Enforce that we do not have two items in an impl with the same name. +fn enforce_impl_items_are_distinct(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) { + let mut seen_type_items = FxHashMap::default(); + let mut seen_value_items = FxHashMap::default(); + for &impl_item_ref in tcx.associated_item_def_ids(impl_def_id) { + let impl_item = tcx.associated_item(impl_item_ref); + let seen_items = match impl_item.kind { + ty::AssocKind::Type => &mut seen_type_items, + _ => &mut seen_value_items, + }; + let span = tcx.def_span(impl_item_ref); + let ident = impl_item.ident(tcx); + match seen_items.entry(ident.normalize_to_macros_2_0()) { + Occupied(entry) => { + let mut err = struct_span_err!( + tcx.sess, + span, + E0201, + "duplicate definitions with name `{}`:", + ident + ); + err.span_label(*entry.get(), format!("previous definition of `{}` here", ident)); + err.span_label(span, "duplicate definition"); + err.emit(); + } + Vacant(entry) => { + entry.insert(span); + } + } + } +} diff --git a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs new file mode 100644 index 000000000..74abb71a1 --- /dev/null +++ b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs @@ -0,0 +1,439 @@ +//! # Minimal Specialization +//! +//! This module contains the checks for sound specialization used when the +//! `min_specialization` feature is enabled. This requires that the impl is +//! *always applicable*. +//! +//! If `impl1` specializes `impl2` then `impl1` is always applicable if we know +//! that all the bounds of `impl2` are satisfied, and all of the bounds of +//! `impl1` are satisfied for some choice of lifetimes then we know that +//! `impl1` applies for any choice of lifetimes. +//! +//! ## Basic approach +//! +//! To enforce this requirement on specializations we take the following +//! approach: +//! +//! 1. Match up the substs for `impl2` so that the implemented trait and +//! self-type match those for `impl1`. +//! 2. Check for any direct use of `'static` in the substs of `impl2`. +//! 3. Check that all of the generic parameters of `impl1` occur at most once +//! in the *unconstrained* substs for `impl2`. A parameter is constrained if +//! its value is completely determined by an associated type projection +//! predicate. +//! 4. Check that all predicates on `impl1` either exist on `impl2` (after +//! matching substs), or are well-formed predicates for the trait's type +//! arguments. +//! +//! ## Example +//! +//! Suppose we have the following always applicable impl: +//! +//! ```ignore (illustrative) +//! impl SpecExtend for std::vec::IntoIter { /* specialized impl */ } +//! impl> SpecExtend for I { /* default impl */ } +//! ``` +//! +//! We get that the subst for `impl2` are `[T, std::vec::IntoIter]`. `T` is +//! constrained to be `::Item`, so we check only +//! `std::vec::IntoIter` for repeated parameters, which it doesn't have. The +//! predicates of `impl1` are only `T: Sized`, which is also a predicate of +//! `impl2`. So this specialization is sound. +//! +//! ## Extensions +//! +//! Unfortunately not all specializations in the standard library are allowed +//! by this. So there are two extensions to these rules that allow specializing +//! on some traits: that is, using them as bounds on the specializing impl, +//! even when they don't occur in the base impl. +//! +//! ### rustc_specialization_trait +//! +//! If a trait is always applicable, then it's sound to specialize on it. We +//! check trait is always applicable in the same way as impls, except that step +//! 4 is now "all predicates on `impl1` are always applicable". We require that +//! `specialization` or `min_specialization` is enabled to implement these +//! traits. +//! +//! ### rustc_unsafe_specialization_marker +//! +//! There are also some specialization on traits with no methods, including the +//! stable `FusedIterator` trait. We allow marking marker traits with an +//! unstable attribute that means we ignore them in point 3 of the checks +//! above. This is unsound, in the sense that the specialized impl may be used +//! when it doesn't apply, but we allow it in the short term since it can't +//! cause use after frees with purely safe code in the same way as specializing +//! on traits with methods can. + +use crate::check::regionck::OutlivesEnvironmentExt; +use crate::check::wfcheck::impl_implied_bounds; +use crate::constrained_generic_params as cgp; +use crate::errors::SubstsOnOverriddenImpl; + +use rustc_data_structures::fx::FxHashSet; +use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_infer::infer::outlives::env::OutlivesEnvironment; +use rustc_infer::infer::{InferCtxt, TyCtxtInferExt}; +use rustc_infer::traits::specialization_graph::Node; +use rustc_middle::ty::subst::{GenericArg, InternalSubsts, SubstsRef}; +use rustc_middle::ty::trait_def::TraitSpecializationKind; +use rustc_middle::ty::{self, TyCtxt, TypeVisitable}; +use rustc_span::Span; +use rustc_trait_selection::traits::{self, translate_substs, wf}; + +pub(super) fn check_min_specialization(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) { + if let Some(node) = parent_specialization_node(tcx, impl_def_id) { + tcx.infer_ctxt().enter(|infcx| { + check_always_applicable(&infcx, impl_def_id, node); + }); + } +} + +fn parent_specialization_node(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId) -> Option { + let trait_ref = tcx.impl_trait_ref(impl1_def_id)?; + let trait_def = tcx.trait_def(trait_ref.def_id); + + let impl2_node = trait_def.ancestors(tcx, impl1_def_id.to_def_id()).ok()?.nth(1)?; + + let always_applicable_trait = + matches!(trait_def.specialization_kind, TraitSpecializationKind::AlwaysApplicable); + if impl2_node.is_from_trait() && !always_applicable_trait { + // Implementing a normal trait isn't a specialization. + return None; + } + Some(impl2_node) +} + +/// Check that `impl1` is a sound specialization +fn check_always_applicable(infcx: &InferCtxt<'_, '_>, impl1_def_id: LocalDefId, impl2_node: Node) { + if let Some((impl1_substs, impl2_substs)) = get_impl_substs(infcx, impl1_def_id, impl2_node) { + let impl2_def_id = impl2_node.def_id(); + debug!( + "check_always_applicable(\nimpl1_def_id={:?},\nimpl2_def_id={:?},\nimpl2_substs={:?}\n)", + impl1_def_id, impl2_def_id, impl2_substs + ); + + let tcx = infcx.tcx; + + let parent_substs = if impl2_node.is_from_trait() { + impl2_substs.to_vec() + } else { + unconstrained_parent_impl_substs(tcx, impl2_def_id, impl2_substs) + }; + + let span = tcx.def_span(impl1_def_id); + check_static_lifetimes(tcx, &parent_substs, span); + check_duplicate_params(tcx, impl1_substs, &parent_substs, span); + check_predicates(infcx, impl1_def_id, impl1_substs, impl2_node, impl2_substs, span); + } +} + +/// Given a specializing impl `impl1`, and the base impl `impl2`, returns two +/// substitutions `(S1, S2)` that equate their trait references. The returned +/// types are expressed in terms of the generics of `impl1`. +/// +/// Example +/// +/// impl Foo for B { /* impl2 */ } +/// impl Foo> for C { /* impl1 */ } +/// +/// Would return `S1 = [C]` and `S2 = [Vec, C]`. +fn get_impl_substs<'tcx>( + infcx: &InferCtxt<'_, 'tcx>, + impl1_def_id: LocalDefId, + impl2_node: Node, +) -> Option<(SubstsRef<'tcx>, SubstsRef<'tcx>)> { + let tcx = infcx.tcx; + let param_env = tcx.param_env(impl1_def_id); + + let impl1_substs = InternalSubsts::identity_for_item(tcx, impl1_def_id.to_def_id()); + let impl2_substs = + translate_substs(infcx, param_env, impl1_def_id.to_def_id(), impl1_substs, impl2_node); + + let mut outlives_env = OutlivesEnvironment::new(param_env); + let implied_bounds = + impl_implied_bounds(infcx.tcx, param_env, impl1_def_id, tcx.def_span(impl1_def_id)); + outlives_env.add_implied_bounds( + infcx, + implied_bounds, + tcx.hir().local_def_id_to_hir_id(impl1_def_id), + ); + infcx.check_region_obligations_and_report_errors(impl1_def_id, &outlives_env); + let Ok(impl2_substs) = infcx.fully_resolve(impl2_substs) else { + let span = tcx.def_span(impl1_def_id); + tcx.sess.emit_err(SubstsOnOverriddenImpl { span }); + return None; + }; + Some((impl1_substs, impl2_substs)) +} + +/// Returns a list of all of the unconstrained subst of the given impl. +/// +/// For example given the impl: +/// +/// impl<'a, T, I> ... where &'a I: IntoIterator +/// +/// This would return the substs corresponding to `['a, I]`, because knowing +/// `'a` and `I` determines the value of `T`. +fn unconstrained_parent_impl_substs<'tcx>( + tcx: TyCtxt<'tcx>, + impl_def_id: DefId, + impl_substs: SubstsRef<'tcx>, +) -> Vec> { + let impl_generic_predicates = tcx.predicates_of(impl_def_id); + let mut unconstrained_parameters = FxHashSet::default(); + let mut constrained_params = FxHashSet::default(); + let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); + + // Unfortunately the functions in `constrained_generic_parameters` don't do + // what we want here. We want only a list of constrained parameters while + // the functions in `cgp` add the constrained parameters to a list of + // unconstrained parameters. + for (predicate, _) in impl_generic_predicates.predicates.iter() { + if let ty::PredicateKind::Projection(proj) = predicate.kind().skip_binder() { + let projection_ty = proj.projection_ty; + let projected_ty = proj.term; + + let unbound_trait_ref = projection_ty.trait_ref(tcx); + if Some(unbound_trait_ref) == impl_trait_ref { + continue; + } + + unconstrained_parameters.extend(cgp::parameters_for(&projection_ty, true)); + + for param in cgp::parameters_for(&projected_ty, false) { + if !unconstrained_parameters.contains(¶m) { + constrained_params.insert(param.0); + } + } + + unconstrained_parameters.extend(cgp::parameters_for(&projected_ty, true)); + } + } + + impl_substs + .iter() + .enumerate() + .filter(|&(idx, _)| !constrained_params.contains(&(idx as u32))) + .map(|(_, arg)| arg) + .collect() +} + +/// Check that parameters of the derived impl don't occur more than once in the +/// equated substs of the base impl. +/// +/// For example forbid the following: +/// +/// impl Tr for A { } +/// impl Tr for (B, B) { } +/// +/// Note that only consider the unconstrained parameters of the base impl: +/// +/// impl> Tr for I { } +/// impl Tr for Vec { } +/// +/// The substs for the parent impl here are `[T, Vec]`, which repeats `T`, +/// but `S` is constrained in the parent impl, so `parent_substs` is only +/// `[Vec]`. This means we allow this impl. +fn check_duplicate_params<'tcx>( + tcx: TyCtxt<'tcx>, + impl1_substs: SubstsRef<'tcx>, + parent_substs: &Vec>, + span: Span, +) { + let mut base_params = cgp::parameters_for(parent_substs, true); + base_params.sort_by_key(|param| param.0); + if let (_, [duplicate, ..]) = base_params.partition_dedup() { + let param = impl1_substs[duplicate.0 as usize]; + tcx.sess + .struct_span_err(span, &format!("specializing impl repeats parameter `{}`", param)) + .emit(); + } +} + +/// Check that `'static` lifetimes are not introduced by the specializing impl. +/// +/// For example forbid the following: +/// +/// impl Tr for A { } +/// impl Tr for &'static i32 { } +fn check_static_lifetimes<'tcx>( + tcx: TyCtxt<'tcx>, + parent_substs: &Vec>, + span: Span, +) { + if tcx.any_free_region_meets(parent_substs, |r| r.is_static()) { + tcx.sess.struct_span_err(span, "cannot specialize on `'static` lifetime").emit(); + } +} + +/// Check whether predicates on the specializing impl (`impl1`) are allowed. +/// +/// Each predicate `P` must be: +/// +/// * global (not reference any parameters) +/// * `T: Tr` predicate where `Tr` is an always-applicable trait +/// * on the base `impl impl2` +/// * Currently this check is done using syntactic equality, which is +/// conservative but generally sufficient. +/// * a well-formed predicate of a type argument of the trait being implemented, +/// including the `Self`-type. +fn check_predicates<'tcx>( + infcx: &InferCtxt<'_, 'tcx>, + impl1_def_id: LocalDefId, + impl1_substs: SubstsRef<'tcx>, + impl2_node: Node, + impl2_substs: SubstsRef<'tcx>, + span: Span, +) { + let tcx = infcx.tcx; + let instantiated = tcx.predicates_of(impl1_def_id).instantiate(tcx, impl1_substs); + let impl1_predicates: Vec<_> = traits::elaborate_predicates_with_span( + tcx, + std::iter::zip( + instantiated.predicates, + // Don't drop predicates (unsound!) because `spans` is too short + instantiated.spans.into_iter().chain(std::iter::repeat(span)), + ), + ) + .map(|obligation| (obligation.predicate, obligation.cause.span)) + .collect(); + + let mut impl2_predicates = if impl2_node.is_from_trait() { + // Always applicable traits have to be always applicable without any + // assumptions. + Vec::new() + } else { + traits::elaborate_predicates( + tcx, + tcx.predicates_of(impl2_node.def_id()) + .instantiate(tcx, impl2_substs) + .predicates + .into_iter(), + ) + .map(|obligation| obligation.predicate) + .collect() + }; + debug!( + "check_always_applicable(\nimpl1_predicates={:?},\nimpl2_predicates={:?}\n)", + impl1_predicates, impl2_predicates, + ); + + // Since impls of always applicable traits don't get to assume anything, we + // can also assume their supertraits apply. + // + // For example, we allow: + // + // #[rustc_specialization_trait] + // trait AlwaysApplicable: Debug { } + // + // impl Tr for T { } + // impl Tr for T { } + // + // Specializing on `AlwaysApplicable` allows also specializing on `Debug` + // which is sound because we forbid impls like the following + // + // impl AlwaysApplicable for D { } + let always_applicable_traits = impl1_predicates.iter().copied().filter(|&(predicate, _)| { + matches!( + trait_predicate_kind(tcx, predicate), + Some(TraitSpecializationKind::AlwaysApplicable) + ) + }); + + // Include the well-formed predicates of the type parameters of the impl. + for arg in tcx.impl_trait_ref(impl1_def_id).unwrap().substs { + if let Some(obligations) = wf::obligations( + infcx, + tcx.param_env(impl1_def_id), + tcx.hir().local_def_id_to_hir_id(impl1_def_id), + 0, + arg, + span, + ) { + impl2_predicates.extend( + traits::elaborate_obligations(tcx, obligations) + .map(|obligation| obligation.predicate), + ) + } + } + impl2_predicates.extend( + traits::elaborate_predicates_with_span(tcx, always_applicable_traits) + .map(|obligation| obligation.predicate), + ); + + for (predicate, span) in impl1_predicates { + if !impl2_predicates.contains(&predicate) { + check_specialization_on(tcx, predicate, span) + } + } +} + +fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tcx>, span: Span) { + debug!("can_specialize_on(predicate = {:?})", predicate); + match predicate.kind().skip_binder() { + // Global predicates are either always true or always false, so we + // are fine to specialize on. + _ if predicate.is_global() => (), + // We allow specializing on explicitly marked traits with no associated + // items. + ty::PredicateKind::Trait(ty::TraitPredicate { + trait_ref, + constness: ty::BoundConstness::NotConst, + polarity: _, + }) => { + if !matches!( + trait_predicate_kind(tcx, predicate), + Some(TraitSpecializationKind::Marker) + ) { + tcx.sess + .struct_span_err( + span, + &format!( + "cannot specialize on trait `{}`", + tcx.def_path_str(trait_ref.def_id), + ), + ) + .emit(); + } + } + ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => { + tcx.sess + .struct_span_err( + span, + &format!("cannot specialize on associated type `{projection_ty} == {term}`",), + ) + .emit(); + } + _ => { + tcx.sess + .struct_span_err(span, &format!("cannot specialize on predicate `{}`", predicate)) + .emit(); + } + } +} + +fn trait_predicate_kind<'tcx>( + tcx: TyCtxt<'tcx>, + predicate: ty::Predicate<'tcx>, +) -> Option { + match predicate.kind().skip_binder() { + ty::PredicateKind::Trait(ty::TraitPredicate { + trait_ref, + constness: ty::BoundConstness::NotConst, + polarity: _, + }) => Some(tcx.trait_def(trait_ref.def_id).specialization_kind), + ty::PredicateKind::Trait(_) + | ty::PredicateKind::RegionOutlives(_) + | ty::PredicateKind::TypeOutlives(_) + | ty::PredicateKind::Projection(_) + | ty::PredicateKind::WellFormed(_) + | ty::PredicateKind::Subtype(_) + | ty::PredicateKind::Coerce(_) + | ty::PredicateKind::ObjectSafe(_) + | ty::PredicateKind::ClosureKind(..) + | ty::PredicateKind::ConstEvaluatable(..) + | ty::PredicateKind::ConstEquate(..) + | ty::PredicateKind::TypeWellFormedFromEnv(..) => None, + } +} diff --git a/compiler/rustc_typeck/src/lib.rs b/compiler/rustc_typeck/src/lib.rs new file mode 100644 index 000000000..f98ae46c5 --- /dev/null +++ b/compiler/rustc_typeck/src/lib.rs @@ -0,0 +1,579 @@ +/*! + +# typeck + +The type checker is responsible for: + +1. Determining the type of each expression. +2. Resolving methods and traits. +3. Guaranteeing that most type rules are met. ("Most?", you say, "why most?" + Well, dear reader, read on.) + +The main entry point is [`check_crate()`]. Type checking operates in +several major phases: + +1. The collect phase first passes over all items and determines their + type, without examining their "innards". + +2. Variance inference then runs to compute the variance of each parameter. + +3. Coherence checks for overlapping or orphaned impls. + +4. Finally, the check phase then checks function bodies and so forth. + Within the check phase, we check each function body one at a time + (bodies of function expressions are checked as part of the + containing function). Inference is used to supply types wherever + they are unknown. The actual checking of a function itself has + several phases (check, regionck, writeback), as discussed in the + documentation for the [`check`] module. + +The type checker is defined into various submodules which are documented +independently: + +- astconv: converts the AST representation of types + into the `ty` representation. + +- collect: computes the types of each top-level item and enters them into + the `tcx.types` table for later use. + +- coherence: enforces coherence rules, builds some tables. + +- variance: variance inference + +- outlives: outlives inference + +- check: walks over function bodies and type checks them, inferring types for + local variables, type parameters, etc as necessary. + +- infer: finds the types to use for each type variable such that + all subtyping and assignment constraints are met. In essence, the check + module specifies the constraints, and the infer module solves them. + +## Note + +This API is completely unstable and subject to change. + +*/ + +#![allow(rustc::potential_query_instability)] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] +#![feature(box_patterns)] +#![feature(control_flow_enum)] +#![feature(drain_filter)] +#![feature(hash_drain_filter)] +#![feature(if_let_guard)] +#![feature(is_sorted)] +#![feature(iter_intersperse)] +#![feature(label_break_value)] +#![feature(let_chains)] +#![feature(let_else)] +#![feature(min_specialization)] +#![feature(never_type)] +#![feature(once_cell)] +#![feature(slice_partition_dedup)] +#![feature(try_blocks)] +#![feature(is_some_with)] +#![recursion_limit = "256"] + +#[macro_use] +extern crate tracing; + +#[macro_use] +extern crate rustc_middle; + +// These are used by Clippy. +pub mod check; +pub mod expr_use_visitor; + +mod astconv; +mod bounds; +mod check_unused; +mod coherence; +mod collect; +mod constrained_generic_params; +mod errors; +pub mod hir_wf_check; +mod impl_wf_check; +mod mem_categorization; +mod outlives; +mod structured_errors; +mod variance; + +use rustc_errors::{struct_span_err, ErrorGuaranteed}; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_hir::{Node, CRATE_HIR_ID}; +use rustc_infer::infer::{InferOk, TyCtxtInferExt}; +use rustc_infer::traits::TraitEngineExt as _; +use rustc_middle::middle; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::util; +use rustc_session::config::EntryFnType; +use rustc_span::{symbol::sym, Span, DUMMY_SP}; +use rustc_target::spec::abi::Abi; +use rustc_trait_selection::infer::InferCtxtExt; +use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _; +use rustc_trait_selection::traits::{ + self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt as _, +}; + +use std::iter; + +use astconv::AstConv; +use bounds::Bounds; + +fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi, span: Span) { + match (decl.c_variadic, abi) { + // The function has the correct calling convention, or isn't a "C-variadic" function. + (false, _) | (true, Abi::C { .. }) | (true, Abi::Cdecl { .. }) => {} + // The function is a "C-variadic" function with an incorrect calling convention. + (true, _) => { + let mut err = struct_span_err!( + tcx.sess, + span, + E0045, + "C-variadic function must have C or cdecl calling convention" + ); + err.span_label(span, "C-variadics require C or cdecl calling convention").emit(); + } + } +} + +fn require_same_types<'tcx>( + tcx: TyCtxt<'tcx>, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + actual: Ty<'tcx>, +) -> bool { + tcx.infer_ctxt().enter(|ref infcx| { + let param_env = ty::ParamEnv::empty(); + let mut fulfill_cx = >::new(infcx.tcx); + match infcx.at(cause, param_env).eq(expected, actual) { + Ok(InferOk { obligations, .. }) => { + fulfill_cx.register_predicate_obligations(infcx, obligations); + } + Err(err) => { + infcx.report_mismatched_types(cause, expected, actual, err).emit(); + return false; + } + } + + match fulfill_cx.select_all_or_error(infcx).as_slice() { + [] => true, + errors => { + infcx.report_fulfillment_errors(errors, None, false); + false + } + } + }) +} + +fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) { + let main_fnsig = tcx.fn_sig(main_def_id); + let main_span = tcx.def_span(main_def_id); + + fn main_fn_diagnostics_hir_id(tcx: TyCtxt<'_>, def_id: DefId, sp: Span) -> hir::HirId { + if let Some(local_def_id) = def_id.as_local() { + let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id); + let hir_type = tcx.type_of(local_def_id); + if !matches!(hir_type.kind(), ty::FnDef(..)) { + span_bug!(sp, "main has a non-function type: found `{}`", hir_type); + } + hir_id + } else { + CRATE_HIR_ID + } + } + + fn main_fn_generics_params_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option { + if !def_id.is_local() { + return None; + } + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + match tcx.hir().find(hir_id) { + Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => { + if !generics.params.is_empty() { + Some(generics.span) + } else { + None + } + } + _ => { + span_bug!(tcx.def_span(def_id), "main has a non-function type"); + } + } + } + + fn main_fn_where_clauses_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option { + if !def_id.is_local() { + return None; + } + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + match tcx.hir().find(hir_id) { + Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => { + Some(generics.where_clause_span) + } + _ => { + span_bug!(tcx.def_span(def_id), "main has a non-function type"); + } + } + } + + fn main_fn_asyncness_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option { + if !def_id.is_local() { + return None; + } + Some(tcx.def_span(def_id)) + } + + fn main_fn_return_type_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option { + if !def_id.is_local() { + return None; + } + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + match tcx.hir().find(hir_id) { + Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(ref fn_sig, _, _), .. })) => { + Some(fn_sig.decl.output.span()) + } + _ => { + span_bug!(tcx.def_span(def_id), "main has a non-function type"); + } + } + } + + let mut error = false; + let main_diagnostics_hir_id = main_fn_diagnostics_hir_id(tcx, main_def_id, main_span); + let main_fn_generics = tcx.generics_of(main_def_id); + let main_fn_predicates = tcx.predicates_of(main_def_id); + if main_fn_generics.count() != 0 || !main_fnsig.bound_vars().is_empty() { + let generics_param_span = main_fn_generics_params_span(tcx, main_def_id); + let msg = "`main` function is not allowed to have generic \ + parameters"; + let mut diag = + struct_span_err!(tcx.sess, generics_param_span.unwrap_or(main_span), E0131, "{}", msg); + if let Some(generics_param_span) = generics_param_span { + let label = "`main` cannot have generic parameters"; + diag.span_label(generics_param_span, label); + } + diag.emit(); + error = true; + } else if !main_fn_predicates.predicates.is_empty() { + // generics may bring in implicit predicates, so we skip this check if generics is present. + let generics_where_clauses_span = main_fn_where_clauses_span(tcx, main_def_id); + let mut diag = struct_span_err!( + tcx.sess, + generics_where_clauses_span.unwrap_or(main_span), + E0646, + "`main` function is not allowed to have a `where` clause" + ); + if let Some(generics_where_clauses_span) = generics_where_clauses_span { + diag.span_label(generics_where_clauses_span, "`main` cannot have a `where` clause"); + } + diag.emit(); + error = true; + } + + let main_asyncness = tcx.asyncness(main_def_id); + if let hir::IsAsync::Async = main_asyncness { + let mut diag = struct_span_err!( + tcx.sess, + main_span, + E0752, + "`main` function is not allowed to be `async`" + ); + let asyncness_span = main_fn_asyncness_span(tcx, main_def_id); + if let Some(asyncness_span) = asyncness_span { + diag.span_label(asyncness_span, "`main` function is not allowed to be `async`"); + } + diag.emit(); + error = true; + } + + for attr in tcx.get_attrs(main_def_id, sym::track_caller) { + tcx.sess + .struct_span_err(attr.span, "`main` function is not allowed to be `#[track_caller]`") + .span_label(main_span, "`main` function is not allowed to be `#[track_caller]`") + .emit(); + error = true; + } + + if error { + return; + } + + let expected_return_type; + if let Some(term_id) = tcx.lang_items().termination() { + let return_ty = main_fnsig.output(); + let return_ty_span = main_fn_return_type_span(tcx, main_def_id).unwrap_or(main_span); + if !return_ty.bound_vars().is_empty() { + let msg = "`main` function return type is not allowed to have generic \ + parameters"; + struct_span_err!(tcx.sess, return_ty_span, E0131, "{}", msg).emit(); + error = true; + } + let return_ty = return_ty.skip_binder(); + tcx.infer_ctxt().enter(|infcx| { + let cause = traits::ObligationCause::new( + return_ty_span, + main_diagnostics_hir_id, + ObligationCauseCode::MainFunctionType, + ); + let mut fulfillment_cx = traits::FulfillmentContext::new(); + // normalize any potential projections in the return type, then add + // any possible obligations to the fulfillment context. + // HACK(ThePuzzlemaker) this feels symptomatic of a problem within + // checking trait fulfillment, not this here. I'm not sure why it + // works in the example in `fn test()` given in #88609? This also + // probably isn't the best way to do this. + let InferOk { value: norm_return_ty, obligations } = infcx + .partially_normalize_associated_types_in( + cause.clone(), + ty::ParamEnv::empty(), + return_ty, + ); + fulfillment_cx.register_predicate_obligations(&infcx, obligations); + fulfillment_cx.register_bound( + &infcx, + ty::ParamEnv::empty(), + norm_return_ty, + term_id, + cause, + ); + let errors = fulfillment_cx.select_all_or_error(&infcx); + if !errors.is_empty() { + infcx.report_fulfillment_errors(&errors, None, false); + error = true; + } + }); + // now we can take the return type of the given main function + expected_return_type = main_fnsig.output(); + } else { + // standard () main return type + expected_return_type = ty::Binder::dummy(tcx.mk_unit()); + } + + if error { + return; + } + + let se_ty = tcx.mk_fn_ptr(expected_return_type.map_bound(|expected_return_type| { + tcx.mk_fn_sig(iter::empty(), expected_return_type, false, hir::Unsafety::Normal, Abi::Rust) + })); + + require_same_types( + tcx, + &ObligationCause::new( + main_span, + main_diagnostics_hir_id, + ObligationCauseCode::MainFunctionType, + ), + se_ty, + tcx.mk_fn_ptr(main_fnsig), + ); +} +fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) { + let start_def_id = start_def_id.expect_local(); + let start_id = tcx.hir().local_def_id_to_hir_id(start_def_id); + let start_span = tcx.def_span(start_def_id); + let start_t = tcx.type_of(start_def_id); + match start_t.kind() { + ty::FnDef(..) => { + if let Some(Node::Item(it)) = tcx.hir().find(start_id) { + if let hir::ItemKind::Fn(ref sig, ref generics, _) = it.kind { + let mut error = false; + if !generics.params.is_empty() { + struct_span_err!( + tcx.sess, + generics.span, + E0132, + "start function is not allowed to have type parameters" + ) + .span_label(generics.span, "start function cannot have type parameters") + .emit(); + error = true; + } + if generics.has_where_clause_predicates { + struct_span_err!( + tcx.sess, + generics.where_clause_span, + E0647, + "start function is not allowed to have a `where` clause" + ) + .span_label( + generics.where_clause_span, + "start function cannot have a `where` clause", + ) + .emit(); + error = true; + } + if let hir::IsAsync::Async = sig.header.asyncness { + let span = tcx.def_span(it.def_id); + struct_span_err!( + tcx.sess, + span, + E0752, + "`start` is not allowed to be `async`" + ) + .span_label(span, "`start` is not allowed to be `async`") + .emit(); + error = true; + } + + let attrs = tcx.hir().attrs(start_id); + for attr in attrs { + if attr.has_name(sym::track_caller) { + tcx.sess + .struct_span_err( + attr.span, + "`start` is not allowed to be `#[track_caller]`", + ) + .span_label( + start_span, + "`start` is not allowed to be `#[track_caller]`", + ) + .emit(); + error = true; + } + } + + if error { + return; + } + } + } + + let se_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + [tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))].iter().cloned(), + tcx.types.isize, + false, + hir::Unsafety::Normal, + Abi::Rust, + ))); + + require_same_types( + tcx, + &ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType), + se_ty, + tcx.mk_fn_ptr(tcx.fn_sig(start_def_id)), + ); + } + _ => { + span_bug!(start_span, "start has a non-function type: found `{}`", start_t); + } + } +} + +fn check_for_entry_fn(tcx: TyCtxt<'_>) { + match tcx.entry_fn(()) { + Some((def_id, EntryFnType::Main)) => check_main_fn_ty(tcx, def_id), + Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id), + _ => {} + } +} + +pub fn provide(providers: &mut Providers) { + collect::provide(providers); + coherence::provide(providers); + check::provide(providers); + variance::provide(providers); + outlives::provide(providers); + impl_wf_check::provide(providers); + hir_wf_check::provide(providers); +} + +pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> { + let _prof_timer = tcx.sess.timer("type_check_crate"); + + // this ensures that later parts of type checking can assume that items + // have valid types and not error + // FIXME(matthewjasper) We shouldn't need to use `track_errors`. + tcx.sess.track_errors(|| { + tcx.sess.time("type_collecting", || { + tcx.hir().for_each_module(|module| tcx.ensure().collect_mod_item_types(module)) + }); + })?; + + if tcx.features().rustc_attrs { + tcx.sess.track_errors(|| { + tcx.sess.time("outlives_testing", || outlives::test::test_inferred_outlives(tcx)); + })?; + } + + tcx.sess.track_errors(|| { + tcx.sess.time("impl_wf_inference", || { + tcx.hir().for_each_module(|module| tcx.ensure().check_mod_impl_wf(module)) + }); + })?; + + tcx.sess.track_errors(|| { + tcx.sess.time("coherence_checking", || { + for &trait_def_id in tcx.all_local_trait_impls(()).keys() { + tcx.ensure().coherent_trait(trait_def_id); + } + + // these queries are executed for side-effects (error reporting): + tcx.ensure().crate_inherent_impls(()); + tcx.ensure().crate_inherent_impls_overlap_check(()); + }); + })?; + + if tcx.features().rustc_attrs { + tcx.sess.track_errors(|| { + tcx.sess.time("variance_testing", || variance::test::test_variance(tcx)); + })?; + } + + tcx.sess.track_errors(|| { + tcx.sess.time("wf_checking", || { + tcx.hir().par_for_each_module(|module| tcx.ensure().check_mod_type_wf(module)) + }); + })?; + + // NOTE: This is copy/pasted in librustdoc/core.rs and should be kept in sync. + tcx.sess.time("item_types_checking", || { + tcx.hir().for_each_module(|module| tcx.ensure().check_mod_item_types(module)) + }); + + tcx.sess.time("item_bodies_checking", || tcx.typeck_item_bodies(())); + + check_unused::check_crate(tcx); + check_for_entry_fn(tcx); + + if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) } +} + +/// A quasi-deprecated helper used in rustdoc and clippy to get +/// the type from a HIR node. +pub fn hir_ty_to_ty<'tcx>(tcx: TyCtxt<'tcx>, hir_ty: &hir::Ty<'_>) -> Ty<'tcx> { + // In case there are any projections, etc., find the "environment" + // def-ID that will be used to determine the traits/predicates in + // scope. This is derived from the enclosing item-like thing. + let env_def_id = tcx.hir().get_parent_item(hir_ty.hir_id); + let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id()); + >::ast_ty_to_ty(&item_cx, hir_ty) +} + +pub fn hir_trait_to_predicates<'tcx>( + tcx: TyCtxt<'tcx>, + hir_trait: &hir::TraitRef<'_>, + self_ty: Ty<'tcx>, +) -> Bounds<'tcx> { + // In case there are any projections, etc., find the "environment" + // def-ID that will be used to determine the traits/predicates in + // scope. This is derived from the enclosing item-like thing. + let env_def_id = tcx.hir().get_parent_item(hir_trait.hir_ref_id); + let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id()); + let mut bounds = Bounds::default(); + let _ = >::instantiate_poly_trait_ref( + &item_cx, + hir_trait, + DUMMY_SP, + ty::BoundConstness::NotConst, + self_ty, + &mut bounds, + true, + ); + + bounds +} diff --git a/compiler/rustc_typeck/src/mem_categorization.rs b/compiler/rustc_typeck/src/mem_categorization.rs new file mode 100644 index 000000000..ced919f66 --- /dev/null +++ b/compiler/rustc_typeck/src/mem_categorization.rs @@ -0,0 +1,786 @@ +//! # Categorization +//! +//! The job of the categorization module is to analyze an expression to +//! determine what kind of memory is used in evaluating it (for example, +//! where dereferences occur and what kind of pointer is dereferenced; +//! whether the memory is mutable, etc.). +//! +//! Categorization effectively transforms all of our expressions into +//! expressions of the following forms (the actual enum has many more +//! possibilities, naturally, but they are all variants of these base +//! forms): +//! ```ignore (not-rust) +//! E = rvalue // some computed rvalue +//! | x // address of a local variable or argument +//! | *E // deref of a ptr +//! | E.comp // access to an interior component +//! ``` +//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an +//! address where the result is to be found. If Expr is a place, then this +//! is the address of the place. If `Expr` is an rvalue, this is the address of +//! some temporary spot in memory where the result is stored. +//! +//! Now, `cat_expr()` classifies the expression `Expr` and the address `A = ToAddr(Expr)` +//! as follows: +//! +//! - `cat`: what kind of expression was this? This is a subset of the +//! full expression forms which only includes those that we care about +//! for the purpose of the analysis. +//! - `mutbl`: mutability of the address `A`. +//! - `ty`: the type of data found at the address `A`. +//! +//! The resulting categorization tree differs somewhat from the expressions +//! themselves. For example, auto-derefs are explicit. Also, an index `a[b]` is +//! decomposed into two operations: a dereference to reach the array data and +//! then an index to jump forward to the relevant item. +//! +//! ## By-reference upvars +//! +//! One part of the codegen which may be non-obvious is that we translate +//! closure upvars into the dereference of a borrowed pointer; this more closely +//! resembles the runtime codegen. So, for example, if we had: +//! +//! let mut x = 3; +//! let y = 5; +//! let inc = || x += y; +//! +//! Then when we categorize `x` (*within* the closure) we would yield a +//! result of `*x'`, effectively, where `x'` is a `Categorization::Upvar` reference +//! tied to `x`. The type of `x'` will be a borrowed pointer. + +use rustc_middle::hir::place::*; +use rustc_middle::ty::adjustment; +use rustc_middle::ty::fold::TypeFoldable; +use rustc_middle::ty::visit::TypeVisitable; +use rustc_middle::ty::{self, Ty, TyCtxt}; + +use rustc_data_structures::fx::FxIndexMap; +use rustc_hir as hir; +use rustc_hir::def::{CtorOf, DefKind, Res}; +use rustc_hir::def_id::LocalDefId; +use rustc_hir::pat_util::EnumerateAndAdjustIterator; +use rustc_hir::PatKind; +use rustc_index::vec::Idx; +use rustc_infer::infer::InferCtxt; +use rustc_span::Span; +use rustc_target::abi::VariantIdx; +use rustc_trait_selection::infer::InferCtxtExt; + +pub(crate) trait HirNode { + fn hir_id(&self) -> hir::HirId; + fn span(&self) -> Span; +} + +impl HirNode for hir::Expr<'_> { + fn hir_id(&self) -> hir::HirId { + self.hir_id + } + fn span(&self) -> Span { + self.span + } +} + +impl HirNode for hir::Pat<'_> { + fn hir_id(&self) -> hir::HirId { + self.hir_id + } + fn span(&self) -> Span { + self.span + } +} + +#[derive(Clone)] +pub(crate) struct MemCategorizationContext<'a, 'tcx> { + pub(crate) typeck_results: &'a ty::TypeckResults<'tcx>, + infcx: &'a InferCtxt<'a, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + body_owner: LocalDefId, + upvars: Option<&'tcx FxIndexMap>, +} + +pub(crate) type McResult = Result; + +impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> { + /// Creates a `MemCategorizationContext`. + pub(crate) fn new( + infcx: &'a InferCtxt<'a, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + body_owner: LocalDefId, + typeck_results: &'a ty::TypeckResults<'tcx>, + ) -> MemCategorizationContext<'a, 'tcx> { + MemCategorizationContext { + typeck_results, + infcx, + param_env, + body_owner, + upvars: infcx.tcx.upvars_mentioned(body_owner), + } + } + + pub(crate) fn tcx(&self) -> TyCtxt<'tcx> { + self.infcx.tcx + } + + pub(crate) fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool { + self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) + } + + fn resolve_vars_if_possible(&self, value: T) -> T + where + T: TypeFoldable<'tcx>, + { + self.infcx.resolve_vars_if_possible(value) + } + + fn is_tainted_by_errors(&self) -> bool { + self.infcx.is_tainted_by_errors() + } + + fn resolve_type_vars_or_error( + &self, + id: hir::HirId, + ty: Option>, + ) -> McResult> { + match ty { + Some(ty) => { + let ty = self.resolve_vars_if_possible(ty); + if ty.references_error() || ty.is_ty_var() { + debug!("resolve_type_vars_or_error: error from {:?}", ty); + Err(()) + } else { + Ok(ty) + } + } + // FIXME + None if self.is_tainted_by_errors() => Err(()), + None => { + bug!( + "no type for node {}: {} in mem_categorization", + id, + self.tcx().hir().node_to_string(id) + ); + } + } + } + + pub(crate) fn node_ty(&self, hir_id: hir::HirId) -> McResult> { + self.resolve_type_vars_or_error(hir_id, self.typeck_results.node_type_opt(hir_id)) + } + + fn expr_ty(&self, expr: &hir::Expr<'_>) -> McResult> { + self.resolve_type_vars_or_error(expr.hir_id, self.typeck_results.expr_ty_opt(expr)) + } + + pub(crate) fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> McResult> { + self.resolve_type_vars_or_error(expr.hir_id, self.typeck_results.expr_ty_adjusted_opt(expr)) + } + + /// Returns the type of value that this pattern matches against. + /// Some non-obvious cases: + /// + /// - a `ref x` binding matches against a value of type `T` and gives + /// `x` the type `&T`; we return `T`. + /// - a pattern with implicit derefs (thanks to default binding + /// modes #42640) may look like `Some(x)` but in fact have + /// implicit deref patterns attached (e.g., it is really + /// `&Some(x)`). In that case, we return the "outermost" type + /// (e.g., `&Option). + pub(crate) fn pat_ty_adjusted(&self, pat: &hir::Pat<'_>) -> McResult> { + // Check for implicit `&` types wrapping the pattern; note + // that these are never attached to binding patterns, so + // actually this is somewhat "disjoint" from the code below + // that aims to account for `ref x`. + if let Some(vec) = self.typeck_results.pat_adjustments().get(pat.hir_id) { + if let Some(first_ty) = vec.first() { + debug!("pat_ty(pat={:?}) found adjusted ty `{:?}`", pat, first_ty); + return Ok(*first_ty); + } + } + + self.pat_ty_unadjusted(pat) + } + + /// Like `pat_ty`, but ignores implicit `&` patterns. + fn pat_ty_unadjusted(&self, pat: &hir::Pat<'_>) -> McResult> { + let base_ty = self.node_ty(pat.hir_id)?; + debug!("pat_ty(pat={:?}) base_ty={:?}", pat, base_ty); + + // This code detects whether we are looking at a `ref x`, + // and if so, figures out what the type *being borrowed* is. + let ret_ty = match pat.kind { + PatKind::Binding(..) => { + let bm = *self + .typeck_results + .pat_binding_modes() + .get(pat.hir_id) + .expect("missing binding mode"); + + if let ty::BindByReference(_) = bm { + // a bind-by-ref means that the base_ty will be the type of the ident itself, + // but what we want here is the type of the underlying value being borrowed. + // So peel off one-level, turning the &T into T. + match base_ty.builtin_deref(false) { + Some(t) => t.ty, + None => { + debug!("By-ref binding of non-derefable type {:?}", base_ty); + return Err(()); + } + } + } else { + base_ty + } + } + _ => base_ty, + }; + debug!("pat_ty(pat={:?}) ret_ty={:?}", pat, ret_ty); + + Ok(ret_ty) + } + + pub(crate) fn cat_expr(&self, expr: &hir::Expr<'_>) -> McResult> { + // This recursion helper avoids going through *too many* + // adjustments, since *only* non-overloaded deref recurses. + fn helper<'a, 'tcx>( + mc: &MemCategorizationContext<'a, 'tcx>, + expr: &hir::Expr<'_>, + adjustments: &[adjustment::Adjustment<'tcx>], + ) -> McResult> { + match adjustments.split_last() { + None => mc.cat_expr_unadjusted(expr), + Some((adjustment, previous)) => { + mc.cat_expr_adjusted_with(expr, || helper(mc, expr, previous), adjustment) + } + } + } + + helper(self, expr, self.typeck_results.expr_adjustments(expr)) + } + + pub(crate) fn cat_expr_adjusted( + &self, + expr: &hir::Expr<'_>, + previous: PlaceWithHirId<'tcx>, + adjustment: &adjustment::Adjustment<'tcx>, + ) -> McResult> { + self.cat_expr_adjusted_with(expr, || Ok(previous), adjustment) + } + + fn cat_expr_adjusted_with( + &self, + expr: &hir::Expr<'_>, + previous: F, + adjustment: &adjustment::Adjustment<'tcx>, + ) -> McResult> + where + F: FnOnce() -> McResult>, + { + debug!("cat_expr_adjusted_with({:?}): {:?}", adjustment, expr); + let target = self.resolve_vars_if_possible(adjustment.target); + match adjustment.kind { + adjustment::Adjust::Deref(overloaded) => { + // Equivalent to *expr or something similar. + let base = if let Some(deref) = overloaded { + let ref_ty = self + .tcx() + .mk_ref(deref.region, ty::TypeAndMut { ty: target, mutbl: deref.mutbl }); + self.cat_rvalue(expr.hir_id, expr.span, ref_ty) + } else { + previous()? + }; + self.cat_deref(expr, base) + } + + adjustment::Adjust::NeverToAny + | adjustment::Adjust::Pointer(_) + | adjustment::Adjust::Borrow(_) => { + // Result is an rvalue. + Ok(self.cat_rvalue(expr.hir_id, expr.span, target)) + } + } + } + + pub(crate) fn cat_expr_unadjusted( + &self, + expr: &hir::Expr<'_>, + ) -> McResult> { + debug!("cat_expr: id={} expr={:?}", expr.hir_id, expr); + + let expr_ty = self.expr_ty(expr)?; + match expr.kind { + hir::ExprKind::Unary(hir::UnOp::Deref, ref e_base) => { + if self.typeck_results.is_method_call(expr) { + self.cat_overloaded_place(expr, e_base) + } else { + let base = self.cat_expr(e_base)?; + self.cat_deref(expr, base) + } + } + + hir::ExprKind::Field(ref base, _) => { + let base = self.cat_expr(base)?; + debug!("cat_expr(cat_field): id={} expr={:?} base={:?}", expr.hir_id, expr, base); + + let field_idx = self + .typeck_results + .field_indices() + .get(expr.hir_id) + .cloned() + .expect("Field index not found"); + + Ok(self.cat_projection( + expr, + base, + expr_ty, + ProjectionKind::Field(field_idx as u32, VariantIdx::new(0)), + )) + } + + hir::ExprKind::Index(ref base, _) => { + if self.typeck_results.is_method_call(expr) { + // If this is an index implemented by a method call, then it + // will include an implicit deref of the result. + // The call to index() returns a `&T` value, which + // is an rvalue. That is what we will be + // dereferencing. + self.cat_overloaded_place(expr, base) + } else { + let base = self.cat_expr(base)?; + Ok(self.cat_projection(expr, base, expr_ty, ProjectionKind::Index)) + } + } + + hir::ExprKind::Path(ref qpath) => { + let res = self.typeck_results.qpath_res(qpath, expr.hir_id); + self.cat_res(expr.hir_id, expr.span, expr_ty, res) + } + + hir::ExprKind::Type(ref e, _) => self.cat_expr(e), + + hir::ExprKind::AddrOf(..) + | hir::ExprKind::Call(..) + | hir::ExprKind::Assign(..) + | hir::ExprKind::AssignOp(..) + | hir::ExprKind::Closure { .. } + | hir::ExprKind::Ret(..) + | hir::ExprKind::Unary(..) + | hir::ExprKind::Yield(..) + | hir::ExprKind::MethodCall(..) + | hir::ExprKind::Cast(..) + | hir::ExprKind::DropTemps(..) + | hir::ExprKind::Array(..) + | hir::ExprKind::If(..) + | hir::ExprKind::Tup(..) + | hir::ExprKind::Binary(..) + | hir::ExprKind::Block(..) + | hir::ExprKind::Let(..) + | hir::ExprKind::Loop(..) + | hir::ExprKind::Match(..) + | hir::ExprKind::Lit(..) + | hir::ExprKind::ConstBlock(..) + | hir::ExprKind::Break(..) + | hir::ExprKind::Continue(..) + | hir::ExprKind::Struct(..) + | hir::ExprKind::Repeat(..) + | hir::ExprKind::InlineAsm(..) + | hir::ExprKind::Box(..) + | hir::ExprKind::Err => Ok(self.cat_rvalue(expr.hir_id, expr.span, expr_ty)), + } + } + + pub(crate) fn cat_res( + &self, + hir_id: hir::HirId, + span: Span, + expr_ty: Ty<'tcx>, + res: Res, + ) -> McResult> { + debug!("cat_res: id={:?} expr={:?} def={:?}", hir_id, expr_ty, res); + + match res { + Res::Def( + DefKind::Ctor(..) + | DefKind::Const + | DefKind::ConstParam + | DefKind::AssocConst + | DefKind::Fn + | DefKind::AssocFn, + _, + ) + | Res::SelfCtor(..) => Ok(self.cat_rvalue(hir_id, span, expr_ty)), + + Res::Def(DefKind::Static(_), _) => { + Ok(PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::StaticItem, Vec::new())) + } + + Res::Local(var_id) => { + if self.upvars.map_or(false, |upvars| upvars.contains_key(&var_id)) { + self.cat_upvar(hir_id, var_id) + } else { + Ok(PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Local(var_id), Vec::new())) + } + } + + def => span_bug!(span, "unexpected definition in memory categorization: {:?}", def), + } + } + + /// Categorize an upvar. + /// + /// Note: the actual upvar access contains invisible derefs of closure + /// environment and upvar reference as appropriate. Only regionck cares + /// about these dereferences, so we let it compute them as needed. + fn cat_upvar(&self, hir_id: hir::HirId, var_id: hir::HirId) -> McResult> { + let closure_expr_def_id = self.body_owner; + + let upvar_id = ty::UpvarId { + var_path: ty::UpvarPath { hir_id: var_id }, + closure_expr_id: closure_expr_def_id, + }; + let var_ty = self.node_ty(var_id)?; + + let ret = PlaceWithHirId::new(hir_id, var_ty, PlaceBase::Upvar(upvar_id), Vec::new()); + + debug!("cat_upvar ret={:?}", ret); + Ok(ret) + } + + pub(crate) fn cat_rvalue( + &self, + hir_id: hir::HirId, + span: Span, + expr_ty: Ty<'tcx>, + ) -> PlaceWithHirId<'tcx> { + debug!("cat_rvalue hir_id={:?}, expr_ty={:?}, span={:?}", hir_id, expr_ty, span); + let ret = PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Rvalue, Vec::new()); + debug!("cat_rvalue ret={:?}", ret); + ret + } + + pub(crate) fn cat_projection( + &self, + node: &N, + base_place: PlaceWithHirId<'tcx>, + ty: Ty<'tcx>, + kind: ProjectionKind, + ) -> PlaceWithHirId<'tcx> { + let mut projections = base_place.place.projections; + projections.push(Projection { kind, ty }); + let ret = PlaceWithHirId::new( + node.hir_id(), + base_place.place.base_ty, + base_place.place.base, + projections, + ); + debug!("cat_field ret {:?}", ret); + ret + } + + fn cat_overloaded_place( + &self, + expr: &hir::Expr<'_>, + base: &hir::Expr<'_>, + ) -> McResult> { + debug!("cat_overloaded_place(expr={:?}, base={:?})", expr, base); + + // Reconstruct the output assuming it's a reference with the + // same region and mutability as the receiver. This holds for + // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`. + let place_ty = self.expr_ty(expr)?; + let base_ty = self.expr_ty_adjusted(base)?; + + let ty::Ref(region, _, mutbl) = *base_ty.kind() else { + span_bug!(expr.span, "cat_overloaded_place: base is not a reference"); + }; + let ref_ty = self.tcx().mk_ref(region, ty::TypeAndMut { ty: place_ty, mutbl }); + + let base = self.cat_rvalue(expr.hir_id, expr.span, ref_ty); + self.cat_deref(expr, base) + } + + fn cat_deref( + &self, + node: &impl HirNode, + base_place: PlaceWithHirId<'tcx>, + ) -> McResult> { + debug!("cat_deref: base_place={:?}", base_place); + + let base_curr_ty = base_place.place.ty(); + let deref_ty = match base_curr_ty.builtin_deref(true) { + Some(mt) => mt.ty, + None => { + debug!("explicit deref of non-derefable type: {:?}", base_curr_ty); + return Err(()); + } + }; + let mut projections = base_place.place.projections; + projections.push(Projection { kind: ProjectionKind::Deref, ty: deref_ty }); + + let ret = PlaceWithHirId::new( + node.hir_id(), + base_place.place.base_ty, + base_place.place.base, + projections, + ); + debug!("cat_deref ret {:?}", ret); + Ok(ret) + } + + pub(crate) fn cat_pattern( + &self, + place: PlaceWithHirId<'tcx>, + pat: &hir::Pat<'_>, + mut op: F, + ) -> McResult<()> + where + F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>), + { + self.cat_pattern_(place, pat, &mut op) + } + + /// Returns the variant index for an ADT used within a Struct or TupleStruct pattern + /// Here `pat_hir_id` is the HirId of the pattern itself. + fn variant_index_for_adt( + &self, + qpath: &hir::QPath<'_>, + pat_hir_id: hir::HirId, + span: Span, + ) -> McResult { + let res = self.typeck_results.qpath_res(qpath, pat_hir_id); + let ty = self.typeck_results.node_type(pat_hir_id); + let ty::Adt(adt_def, _) = ty.kind() else { + self.tcx() + .sess + .delay_span_bug(span, "struct or tuple struct pattern not applied to an ADT"); + return Err(()); + }; + + match res { + Res::Def(DefKind::Variant, variant_id) => Ok(adt_def.variant_index_with_id(variant_id)), + Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_id) => { + Ok(adt_def.variant_index_with_ctor_id(variant_ctor_id)) + } + Res::Def(DefKind::Ctor(CtorOf::Struct, ..), _) + | Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _) + | Res::SelfCtor(..) + | Res::SelfTy { .. } => { + // Structs and Unions have only have one variant. + Ok(VariantIdx::new(0)) + } + _ => bug!("expected ADT path, found={:?}", res), + } + } + + /// Returns the total number of fields in an ADT variant used within a pattern. + /// Here `pat_hir_id` is the HirId of the pattern itself. + fn total_fields_in_adt_variant( + &self, + pat_hir_id: hir::HirId, + variant_index: VariantIdx, + span: Span, + ) -> McResult { + let ty = self.typeck_results.node_type(pat_hir_id); + match ty.kind() { + ty::Adt(adt_def, _) => Ok(adt_def.variant(variant_index).fields.len()), + _ => { + self.tcx() + .sess + .delay_span_bug(span, "struct or tuple struct pattern not applied to an ADT"); + Err(()) + } + } + } + + /// Returns the total number of fields in a tuple used within a Tuple pattern. + /// Here `pat_hir_id` is the HirId of the pattern itself. + fn total_fields_in_tuple(&self, pat_hir_id: hir::HirId, span: Span) -> McResult { + let ty = self.typeck_results.node_type(pat_hir_id); + match ty.kind() { + ty::Tuple(substs) => Ok(substs.len()), + _ => { + self.tcx().sess.delay_span_bug(span, "tuple pattern not applied to a tuple"); + Err(()) + } + } + } + + // FIXME(#19596) This is a workaround, but there should be a better way to do this + fn cat_pattern_( + &self, + mut place_with_id: PlaceWithHirId<'tcx>, + pat: &hir::Pat<'_>, + op: &mut F, + ) -> McResult<()> + where + F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>), + { + // Here, `place` is the `PlaceWithHirId` being matched and pat is the pattern it + // is being matched against. + // + // In general, the way that this works is that we walk down the pattern, + // constructing a `PlaceWithHirId` that represents the path that will be taken + // to reach the value being matched. + + debug!("cat_pattern(pat={:?}, place_with_id={:?})", pat, place_with_id); + + // If (pattern) adjustments are active for this pattern, adjust the `PlaceWithHirId` correspondingly. + // `PlaceWithHirId`s are constructed differently from patterns. For example, in + // + // ``` + // match foo { + // &&Some(x, ) => { ... }, + // _ => { ... }, + // } + // ``` + // + // the pattern `&&Some(x,)` is represented as `Ref { Ref { TupleStruct }}`. To build the + // corresponding `PlaceWithHirId` we start with the `PlaceWithHirId` for `foo`, and then, by traversing the + // pattern, try to answer the question: given the address of `foo`, how is `x` reached? + // + // `&&Some(x,)` `place_foo` + // `&Some(x,)` `deref { place_foo}` + // `Some(x,)` `deref { deref { place_foo }}` + // (x,)` `field0 { deref { deref { place_foo }}}` <- resulting place + // + // The above example has no adjustments. If the code were instead the (after adjustments, + // equivalent) version + // + // ``` + // match foo { + // Some(x, ) => { ... }, + // _ => { ... }, + // } + // ``` + // + // Then we see that to get the same result, we must start with + // `deref { deref { place_foo }}` instead of `place_foo` since the pattern is now `Some(x,)` + // and not `&&Some(x,)`, even though its assigned type is that of `&&Some(x,)`. + for _ in 0..self.typeck_results.pat_adjustments().get(pat.hir_id).map_or(0, |v| v.len()) { + debug!("cat_pattern: applying adjustment to place_with_id={:?}", place_with_id); + place_with_id = self.cat_deref(pat, place_with_id)?; + } + let place_with_id = place_with_id; // lose mutability + debug!("cat_pattern: applied adjustment derefs to get place_with_id={:?}", place_with_id); + + // Invoke the callback, but only now, after the `place_with_id` has adjusted. + // + // To see that this makes sense, consider `match &Some(3) { Some(x) => { ... }}`. In that + // case, the initial `place_with_id` will be that for `&Some(3)` and the pattern is `Some(x)`. We + // don't want to call `op` with these incompatible values. As written, what happens instead + // is that `op` is called with the adjusted place (that for `*&Some(3)`) and the pattern + // `Some(x)` (which matches). Recursing once more, `*&Some(3)` and the pattern `Some(x)` + // result in the place `Downcast(*&Some(3)).0` associated to `x` and invoke `op` with + // that (where the `ref` on `x` is implied). + op(&place_with_id, pat); + + match pat.kind { + PatKind::Tuple(subpats, dots_pos) => { + // (p1, ..., pN) + let total_fields = self.total_fields_in_tuple(pat.hir_id, pat.span)?; + + for (i, subpat) in subpats.iter().enumerate_and_adjust(total_fields, dots_pos) { + let subpat_ty = self.pat_ty_adjusted(subpat)?; + let projection_kind = ProjectionKind::Field(i as u32, VariantIdx::new(0)); + let sub_place = + self.cat_projection(pat, place_with_id.clone(), subpat_ty, projection_kind); + self.cat_pattern_(sub_place, subpat, op)?; + } + } + + PatKind::TupleStruct(ref qpath, subpats, dots_pos) => { + // S(p1, ..., pN) + let variant_index = self.variant_index_for_adt(qpath, pat.hir_id, pat.span)?; + let total_fields = + self.total_fields_in_adt_variant(pat.hir_id, variant_index, pat.span)?; + + for (i, subpat) in subpats.iter().enumerate_and_adjust(total_fields, dots_pos) { + let subpat_ty = self.pat_ty_adjusted(subpat)?; + let projection_kind = ProjectionKind::Field(i as u32, variant_index); + let sub_place = + self.cat_projection(pat, place_with_id.clone(), subpat_ty, projection_kind); + self.cat_pattern_(sub_place, subpat, op)?; + } + } + + PatKind::Struct(ref qpath, field_pats, _) => { + // S { f1: p1, ..., fN: pN } + + let variant_index = self.variant_index_for_adt(qpath, pat.hir_id, pat.span)?; + + for fp in field_pats { + let field_ty = self.pat_ty_adjusted(fp.pat)?; + let field_index = self + .typeck_results + .field_indices() + .get(fp.hir_id) + .cloned() + .expect("no index for a field"); + + let field_place = self.cat_projection( + pat, + place_with_id.clone(), + field_ty, + ProjectionKind::Field(field_index as u32, variant_index), + ); + self.cat_pattern_(field_place, fp.pat, op)?; + } + } + + PatKind::Or(pats) => { + for pat in pats { + self.cat_pattern_(place_with_id.clone(), pat, op)?; + } + } + + PatKind::Binding(.., Some(ref subpat)) => { + self.cat_pattern_(place_with_id, subpat, op)?; + } + + PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) => { + // box p1, &p1, &mut p1. we can ignore the mutability of + // PatKind::Ref since that information is already contained + // in the type. + let subplace = self.cat_deref(pat, place_with_id)?; + self.cat_pattern_(subplace, subpat, op)?; + } + + PatKind::Slice(before, ref slice, after) => { + let Some(element_ty) = place_with_id.place.ty().builtin_index() else { + debug!("explicit index of non-indexable type {:?}", place_with_id); + return Err(()); + }; + let elt_place = self.cat_projection( + pat, + place_with_id.clone(), + element_ty, + ProjectionKind::Index, + ); + for before_pat in before { + self.cat_pattern_(elt_place.clone(), before_pat, op)?; + } + if let Some(ref slice_pat) = *slice { + let slice_pat_ty = self.pat_ty_adjusted(slice_pat)?; + let slice_place = self.cat_projection( + pat, + place_with_id, + slice_pat_ty, + ProjectionKind::Subslice, + ); + self.cat_pattern_(slice_place, slice_pat, op)?; + } + for after_pat in after { + self.cat_pattern_(elt_place.clone(), after_pat, op)?; + } + } + + PatKind::Path(_) + | PatKind::Binding(.., None) + | PatKind::Lit(..) + | PatKind::Range(..) + | PatKind::Wild => { + // always ok + } + } + + Ok(()) + } +} diff --git a/compiler/rustc_typeck/src/outlives/explicit.rs b/compiler/rustc_typeck/src/outlives/explicit.rs new file mode 100644 index 000000000..7534482cc --- /dev/null +++ b/compiler/rustc_typeck/src/outlives/explicit.rs @@ -0,0 +1,69 @@ +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def_id::DefId; +use rustc_middle::ty::{self, OutlivesPredicate, TyCtxt}; + +use super::utils::*; + +#[derive(Debug)] +pub struct ExplicitPredicatesMap<'tcx> { + map: FxHashMap>>, +} + +impl<'tcx> ExplicitPredicatesMap<'tcx> { + pub fn new() -> ExplicitPredicatesMap<'tcx> { + ExplicitPredicatesMap { map: FxHashMap::default() } + } + + pub(crate) fn explicit_predicates_of( + &mut self, + tcx: TyCtxt<'tcx>, + def_id: DefId, + ) -> &ty::EarlyBinder> { + self.map.entry(def_id).or_insert_with(|| { + let predicates = if def_id.is_local() { + tcx.explicit_predicates_of(def_id) + } else { + tcx.predicates_of(def_id) + }; + let mut required_predicates = RequiredPredicates::default(); + + // process predicates and convert to `RequiredPredicates` entry, see below + for &(predicate, span) in predicates.predicates { + match predicate.kind().skip_binder() { + ty::PredicateKind::TypeOutlives(OutlivesPredicate(ty, reg)) => { + insert_outlives_predicate( + tcx, + ty.into(), + reg, + span, + &mut required_predicates, + ) + } + + ty::PredicateKind::RegionOutlives(OutlivesPredicate(reg1, reg2)) => { + insert_outlives_predicate( + tcx, + reg1.into(), + reg2, + span, + &mut required_predicates, + ) + } + + ty::PredicateKind::Trait(..) + | ty::PredicateKind::Projection(..) + | ty::PredicateKind::WellFormed(..) + | ty::PredicateKind::ObjectSafe(..) + | ty::PredicateKind::ClosureKind(..) + | ty::PredicateKind::Subtype(..) + | ty::PredicateKind::Coerce(..) + | ty::PredicateKind::ConstEvaluatable(..) + | ty::PredicateKind::ConstEquate(..) + | ty::PredicateKind::TypeWellFormedFromEnv(..) => (), + } + } + + ty::EarlyBinder(required_predicates) + }) + } +} diff --git a/compiler/rustc_typeck/src/outlives/implicit_infer.rs b/compiler/rustc_typeck/src/outlives/implicit_infer.rs new file mode 100644 index 000000000..3b779280e --- /dev/null +++ b/compiler/rustc_typeck/src/outlives/implicit_infer.rs @@ -0,0 +1,300 @@ +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::DefId; +use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst}; +use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt}; +use rustc_span::Span; + +use super::explicit::ExplicitPredicatesMap; +use super::utils::*; + +/// Infer predicates for the items in the crate. +/// +/// `global_inferred_outlives`: this is initially the empty map that +/// was generated by walking the items in the crate. This will +/// now be filled with inferred predicates. +pub(super) fn infer_predicates<'tcx>( + tcx: TyCtxt<'tcx>, +) -> FxHashMap>> { + debug!("infer_predicates"); + + let mut explicit_map = ExplicitPredicatesMap::new(); + + let mut global_inferred_outlives = FxHashMap::default(); + + // If new predicates were added then we need to re-calculate + // all crates since there could be new implied predicates. + 'outer: loop { + let mut predicates_added = false; + + // Visit all the crates and infer predicates + for id in tcx.hir().items() { + let item_did = id.def_id; + + debug!("InferVisitor::visit_item(item={:?})", item_did); + + let mut item_required_predicates = RequiredPredicates::default(); + match tcx.def_kind(item_did) { + DefKind::Union | DefKind::Enum | DefKind::Struct => { + let adt_def = tcx.adt_def(item_did.to_def_id()); + + // Iterate over all fields in item_did + for field_def in adt_def.all_fields() { + // Calculating the predicate requirements necessary + // for item_did. + // + // For field of type &'a T (reference) or Adt + // (struct/enum/union) there will be outlive + // requirements for adt_def. + let field_ty = tcx.type_of(field_def.did); + let field_span = tcx.def_span(field_def.did); + insert_required_predicates_to_be_wf( + tcx, + field_ty, + field_span, + &global_inferred_outlives, + &mut item_required_predicates, + &mut explicit_map, + ); + } + } + + _ => {} + }; + + // If new predicates were added (`local_predicate_map` has more + // predicates than the `global_inferred_outlives`), the new predicates + // might result in implied predicates for their parent types. + // Therefore mark `predicates_added` as true and which will ensure + // we walk the crates again and re-calculate predicates for all + // items. + let item_predicates_len: usize = + global_inferred_outlives.get(&item_did.to_def_id()).map_or(0, |p| p.0.len()); + if item_required_predicates.len() > item_predicates_len { + predicates_added = true; + global_inferred_outlives + .insert(item_did.to_def_id(), ty::EarlyBinder(item_required_predicates)); + } + } + + if !predicates_added { + break 'outer; + } + } + + global_inferred_outlives +} + +fn insert_required_predicates_to_be_wf<'tcx>( + tcx: TyCtxt<'tcx>, + field_ty: Ty<'tcx>, + field_span: Span, + global_inferred_outlives: &FxHashMap>>, + required_predicates: &mut RequiredPredicates<'tcx>, + explicit_map: &mut ExplicitPredicatesMap<'tcx>, +) { + for arg in field_ty.walk() { + let ty = match arg.unpack() { + GenericArgKind::Type(ty) => ty, + + // No predicates from lifetimes or constants, except potentially + // constants' types, but `walk` will get to them as well. + GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue, + }; + + match *ty.kind() { + // The field is of type &'a T which means that we will have + // a predicate requirement of T: 'a (T outlives 'a). + // + // We also want to calculate potential predicates for the T + ty::Ref(region, rty, _) => { + debug!("Ref"); + insert_outlives_predicate(tcx, rty.into(), region, field_span, required_predicates); + } + + // For each Adt (struct/enum/union) type `Foo<'a, T>`, we + // can load the current set of inferred and explicit + // predicates from `global_inferred_outlives` and filter the + // ones that are TypeOutlives. + ty::Adt(def, substs) => { + // First check the inferred predicates + // + // Example 1: + // + // struct Foo<'a, T> { + // field1: Bar<'a, T> + // } + // + // struct Bar<'b, U> { + // field2: &'b U + // } + // + // Here, when processing the type of `field1`, we would + // request the set of implicit predicates computed for `Bar` + // thus far. This will initially come back empty, but in next + // round we will get `U: 'b`. We then apply the substitution + // `['b => 'a, U => T]` and thus get the requirement that `T: + // 'a` holds for `Foo`. + debug!("Adt"); + if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did()) { + for (unsubstituted_predicate, &span) in &unsubstituted_predicates.0 { + // `unsubstituted_predicate` is `U: 'b` in the + // example above. So apply the substitution to + // get `T: 'a` (or `predicate`): + let predicate = unsubstituted_predicates + .rebind(*unsubstituted_predicate) + .subst(tcx, substs); + insert_outlives_predicate( + tcx, + predicate.0, + predicate.1, + span, + required_predicates, + ); + } + } + + // Check if the type has any explicit predicates that need + // to be added to `required_predicates` + // let _: () = substs.region_at(0); + check_explicit_predicates( + tcx, + def.did(), + substs, + required_predicates, + explicit_map, + None, + ); + } + + ty::Dynamic(obj, ..) => { + // This corresponds to `dyn Trait<..>`. In this case, we should + // use the explicit predicates as well. + + debug!("Dynamic"); + debug!("field_ty = {}", &field_ty); + debug!("ty in field = {}", &ty); + if let Some(ex_trait_ref) = obj.principal() { + // Here, we are passing the type `usize` as a + // placeholder value with the function + // `with_self_ty`, since there is no concrete type + // `Self` for a `dyn Trait` at this + // stage. Therefore when checking explicit + // predicates in `check_explicit_predicates` we + // need to ignore checking the explicit_map for + // Self type. + let substs = + ex_trait_ref.with_self_ty(tcx, tcx.types.usize).skip_binder().substs; + check_explicit_predicates( + tcx, + ex_trait_ref.skip_binder().def_id, + substs, + required_predicates, + explicit_map, + Some(tcx.types.self_param), + ); + } + } + + ty::Projection(obj) => { + // This corresponds to `>::Bar`. In this case, we should use the + // explicit predicates as well. + debug!("Projection"); + check_explicit_predicates( + tcx, + tcx.parent(obj.item_def_id), + obj.substs, + required_predicates, + explicit_map, + None, + ); + } + + _ => {} + } + } +} + +/// We also have to check the explicit predicates +/// declared on the type. +/// ```ignore (illustrative) +/// struct Foo<'a, T> { +/// field1: Bar +/// } +/// +/// struct Bar where U: 'static, U: Foo { +/// ... +/// } +/// ``` +/// Here, we should fetch the explicit predicates, which +/// will give us `U: 'static` and `U: Foo`. The latter we +/// can ignore, but we will want to process `U: 'static`, +/// applying the substitution as above. +fn check_explicit_predicates<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + substs: &[GenericArg<'tcx>], + required_predicates: &mut RequiredPredicates<'tcx>, + explicit_map: &mut ExplicitPredicatesMap<'tcx>, + ignored_self_ty: Option>, +) { + debug!( + "check_explicit_predicates(def_id={:?}, \ + substs={:?}, \ + explicit_map={:?}, \ + required_predicates={:?}, \ + ignored_self_ty={:?})", + def_id, substs, explicit_map, required_predicates, ignored_self_ty, + ); + let explicit_predicates = explicit_map.explicit_predicates_of(tcx, def_id); + + for (outlives_predicate, &span) in &explicit_predicates.0 { + debug!("outlives_predicate = {:?}", &outlives_predicate); + + // Careful: If we are inferring the effects of a `dyn Trait<..>` + // type, then when we look up the predicates for `Trait`, + // we may find some that reference `Self`. e.g., perhaps the + // definition of `Trait` was: + // + // ``` + // trait Trait<'a, T> where Self: 'a { .. } + // ``` + // + // we want to ignore such predicates here, because + // there is no type parameter for them to affect. Consider + // a struct containing `dyn Trait`: + // + // ``` + // struct MyStruct<'x, X> { field: Box> } + // ``` + // + // The `where Self: 'a` predicate refers to the *existential, hidden type* + // that is represented by the `dyn Trait`, not to the `X` type parameter + // (or any other generic parameter) declared on `MyStruct`. + // + // Note that we do this check for self **before** applying `substs`. In the + // case that `substs` come from a `dyn Trait` type, our caller will have + // included `Self = usize` as the value for `Self`. If we were + // to apply the substs, and not filter this predicate, we might then falsely + // conclude that e.g., `X: 'x` was a reasonable inferred requirement. + // + // Another similar case is where we have an inferred + // requirement like `::Foo: 'b`. We presently + // ignore such requirements as well (cc #54467)-- though + // conceivably it might be better if we could extract the `Foo + // = X` binding from the object type (there must be such a + // binding) and thus infer an outlives requirement that `X: + // 'b`. + if let Some(self_ty) = ignored_self_ty + && let GenericArgKind::Type(ty) = outlives_predicate.0.unpack() + && ty.walk().any(|arg| arg == self_ty.into()) + { + debug!("skipping self ty = {:?}", &ty); + continue; + } + + let predicate = explicit_predicates.rebind(*outlives_predicate).subst(tcx, substs); + debug!("predicate = {:?}", &predicate); + insert_outlives_predicate(tcx, predicate.0, predicate.1, span, required_predicates); + } +} diff --git a/compiler/rustc_typeck/src/outlives/mod.rs b/compiler/rustc_typeck/src/outlives/mod.rs new file mode 100644 index 000000000..8fa65d51e --- /dev/null +++ b/compiler/rustc_typeck/src/outlives/mod.rs @@ -0,0 +1,130 @@ +use hir::Node; +use rustc_hir as hir; +use rustc_hir::def_id::DefId; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::subst::GenericArgKind; +use rustc_middle::ty::{self, CratePredicatesMap, ToPredicate, TyCtxt}; +use rustc_span::symbol::sym; +use rustc_span::Span; + +mod explicit; +mod implicit_infer; +pub(crate) mod outlives_bounds; +/// Code to write unit test for outlives. +pub mod test; +mod utils; + +pub fn provide(providers: &mut Providers) { + *providers = Providers { inferred_outlives_of, inferred_outlives_crate, ..*providers }; +} + +fn inferred_outlives_of(tcx: TyCtxt<'_>, item_def_id: DefId) -> &[(ty::Predicate<'_>, Span)] { + let id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local()); + + if matches!(tcx.def_kind(item_def_id), hir::def::DefKind::AnonConst) && tcx.lazy_normalization() + { + if tcx.hir().opt_const_param_default_param_hir_id(id).is_some() { + // In `generics_of` we set the generics' parent to be our parent's parent which means that + // we lose out on the predicates of our actual parent if we dont return those predicates here. + // (See comment in `generics_of` for more information on why the parent shenanigans is necessary) + // + // struct Foo<'a, 'b, const N: usize = { ... }>(&'a &'b ()); + // ^^^ ^^^^^^^ the def id we are calling + // ^^^ inferred_outlives_of on + // parent item we dont have set as the + // parent of generics returned by `generics_of` + // + // In the above code we want the anon const to have predicates in its param env for `'b: 'a` + let item_def_id = tcx.hir().get_parent_item(id); + // In the above code example we would be calling `inferred_outlives_of(Foo)` here + return tcx.inferred_outlives_of(item_def_id); + } + } + + match tcx.hir().get(id) { + Node::Item(item) => match item.kind { + hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..) => { + let crate_map = tcx.inferred_outlives_crate(()); + + let predicates = crate_map.predicates.get(&item_def_id).copied().unwrap_or(&[]); + + if tcx.has_attr(item_def_id, sym::rustc_outlives) { + let mut pred: Vec = predicates + .iter() + .map(|(out_pred, _)| match out_pred.kind().skip_binder() { + ty::PredicateKind::RegionOutlives(p) => p.to_string(), + ty::PredicateKind::TypeOutlives(p) => p.to_string(), + err => bug!("unexpected predicate {:?}", err), + }) + .collect(); + pred.sort(); + + let span = tcx.def_span(item_def_id); + let mut err = tcx.sess.struct_span_err(span, "rustc_outlives"); + for p in &pred { + err.note(p); + } + err.emit(); + } + + debug!("inferred_outlives_of({:?}) = {:?}", item_def_id, predicates); + + predicates + } + + _ => &[], + }, + + _ => &[], + } +} + +fn inferred_outlives_crate(tcx: TyCtxt<'_>, (): ()) -> CratePredicatesMap<'_> { + // Compute a map from each struct/enum/union S to the **explicit** + // outlives predicates (`T: 'a`, `'a: 'b`) that the user wrote. + // Typically there won't be many of these, except in older code where + // they were mandatory. Nonetheless, we have to ensure that every such + // predicate is satisfied, so they form a kind of base set of requirements + // for the type. + + // Compute the inferred predicates + let global_inferred_outlives = implicit_infer::infer_predicates(tcx); + + // Convert the inferred predicates into the "collected" form the + // global data structure expects. + // + // FIXME -- consider correcting impedance mismatch in some way, + // probably by updating the global data structure. + let predicates = global_inferred_outlives + .iter() + .map(|(&def_id, set)| { + let predicates = &*tcx.arena.alloc_from_iter(set.0.iter().filter_map( + |(ty::OutlivesPredicate(kind1, region2), &span)| { + match kind1.unpack() { + GenericArgKind::Type(ty1) => Some(( + ty::Binder::dummy(ty::PredicateKind::TypeOutlives( + ty::OutlivesPredicate(ty1, *region2), + )) + .to_predicate(tcx), + span, + )), + GenericArgKind::Lifetime(region1) => Some(( + ty::Binder::dummy(ty::PredicateKind::RegionOutlives( + ty::OutlivesPredicate(region1, *region2), + )) + .to_predicate(tcx), + span, + )), + GenericArgKind::Const(_) => { + // Generic consts don't impose any constraints. + None + } + } + }, + )); + (def_id, predicates) + }) + .collect(); + + ty::CratePredicatesMap { predicates } +} diff --git a/compiler/rustc_typeck/src/outlives/outlives_bounds.rs b/compiler/rustc_typeck/src/outlives/outlives_bounds.rs new file mode 100644 index 000000000..229a64650 --- /dev/null +++ b/compiler/rustc_typeck/src/outlives/outlives_bounds.rs @@ -0,0 +1,90 @@ +use rustc_hir as hir; +use rustc_middle::ty::{self, Ty}; +use rustc_trait_selection::infer::InferCtxt; +use rustc_trait_selection::traits::query::type_op::{self, TypeOp, TypeOpOutput}; +use rustc_trait_selection::traits::query::NoSolution; +use rustc_trait_selection::traits::{ObligationCause, TraitEngine, TraitEngineExt}; + +pub use rustc_middle::traits::query::OutlivesBound; + +pub trait InferCtxtExt<'tcx> { + fn implied_outlives_bounds( + &self, + param_env: ty::ParamEnv<'tcx>, + body_id: hir::HirId, + ty: Ty<'tcx>, + ) -> Vec>; +} + +impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { + /// Implied bounds are region relationships that we deduce + /// automatically. The idea is that (e.g.) a caller must check that a + /// function's argument types are well-formed immediately before + /// calling that fn, and hence the *callee* can assume that its + /// argument types are well-formed. This may imply certain relationships + /// between generic parameters. For example: + /// ``` + /// fn foo<'a,T>(x: &'a T) {} + /// ``` + /// can only be called with a `'a` and `T` such that `&'a T` is WF. + /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`. + /// + /// # Parameters + /// + /// - `param_env`, the where-clauses in scope + /// - `body_id`, the body-id to use when normalizing assoc types. + /// Note that this may cause outlives obligations to be injected + /// into the inference context with this body-id. + /// - `ty`, the type that we are supposed to assume is WF. + #[instrument(level = "debug", skip(self, param_env, body_id))] + fn implied_outlives_bounds( + &self, + param_env: ty::ParamEnv<'tcx>, + body_id: hir::HirId, + ty: Ty<'tcx>, + ) -> Vec> { + let span = self.tcx.hir().span(body_id); + let result = param_env + .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty }) + .fully_perform(self); + let result = match result { + Ok(r) => r, + Err(NoSolution) => { + self.tcx.sess.delay_span_bug( + span, + "implied_outlives_bounds failed to solve all obligations", + ); + return vec![]; + } + }; + + let TypeOpOutput { output, constraints, .. } = result; + + if let Some(constraints) = constraints { + // Instantiation may have produced new inference variables and constraints on those + // variables. Process these constraints. + let mut fulfill_cx = >::new(self.tcx); + let cause = ObligationCause::misc(span, body_id); + for &constraint in &constraints.outlives { + let obligation = self.query_outlives_constraint_to_obligation( + constraint, + cause.clone(), + param_env, + ); + fulfill_cx.register_predicate_obligation(self, obligation); + } + if !constraints.member_constraints.is_empty() { + span_bug!(span, "{:#?}", constraints.member_constraints); + } + let errors = fulfill_cx.select_all_or_error(self); + if !errors.is_empty() { + self.tcx.sess.delay_span_bug( + span, + "implied_outlives_bounds failed to solve obligations from instantiation", + ); + } + }; + + output + } +} diff --git a/compiler/rustc_typeck/src/outlives/test.rs b/compiler/rustc_typeck/src/outlives/test.rs new file mode 100644 index 000000000..eb0e12034 --- /dev/null +++ b/compiler/rustc_typeck/src/outlives/test.rs @@ -0,0 +1,21 @@ +use rustc_errors::struct_span_err; +use rustc_middle::ty::TyCtxt; +use rustc_span::symbol::sym; + +pub fn test_inferred_outlives(tcx: TyCtxt<'_>) { + for id in tcx.hir().items() { + // For unit testing: check for a special "rustc_outlives" + // attribute and report an error with various results if found. + if tcx.has_attr(id.def_id.to_def_id(), sym::rustc_outlives) { + let inferred_outlives_of = tcx.inferred_outlives_of(id.def_id); + struct_span_err!( + tcx.sess, + tcx.def_span(id.def_id), + E0640, + "{:?}", + inferred_outlives_of + ) + .emit(); + } + } +} diff --git a/compiler/rustc_typeck/src/outlives/utils.rs b/compiler/rustc_typeck/src/outlives/utils.rs new file mode 100644 index 000000000..b718ca942 --- /dev/null +++ b/compiler/rustc_typeck/src/outlives/utils.rs @@ -0,0 +1,175 @@ +use rustc_infer::infer::outlives::components::{push_outlives_components, Component}; +use rustc_middle::ty::subst::{GenericArg, GenericArgKind}; +use rustc_middle::ty::{self, Region, Ty, TyCtxt}; +use rustc_span::Span; +use smallvec::smallvec; +use std::collections::BTreeMap; + +/// Tracks the `T: 'a` or `'a: 'a` predicates that we have inferred +/// must be added to the struct header. +pub(crate) type RequiredPredicates<'tcx> = + BTreeMap, ty::Region<'tcx>>, Span>; + +/// Given a requirement `T: 'a` or `'b: 'a`, deduce the +/// outlives_component and add it to `required_predicates` +pub(crate) fn insert_outlives_predicate<'tcx>( + tcx: TyCtxt<'tcx>, + kind: GenericArg<'tcx>, + outlived_region: Region<'tcx>, + span: Span, + required_predicates: &mut RequiredPredicates<'tcx>, +) { + // If the `'a` region is bound within the field type itself, we + // don't want to propagate this constraint to the header. + if !is_free_region(outlived_region) { + return; + } + + match kind.unpack() { + GenericArgKind::Type(ty) => { + // `T: 'outlived_region` for some type `T` + // But T could be a lot of things: + // e.g., if `T = &'b u32`, then `'b: 'outlived_region` is + // what we want to add. + // + // Or if within `struct Foo` you had `T = Vec`, then + // we would want to add `U: 'outlived_region` + let mut components = smallvec![]; + push_outlives_components(tcx, ty, &mut components); + for component in components { + match component { + Component::Region(r) => { + // This would arise from something like: + // + // ``` + // struct Foo<'a, 'b> { + // x: &'a &'b u32 + // } + // ``` + // + // Here `outlived_region = 'a` and `kind = &'b + // u32`. Decomposing `&'b u32` into + // components would yield `'b`, and we add the + // where clause that `'b: 'a`. + insert_outlives_predicate( + tcx, + r.into(), + outlived_region, + span, + required_predicates, + ); + } + + Component::Param(param_ty) => { + // param_ty: ty::ParamTy + // This would arise from something like: + // + // ``` + // struct Foo<'a, U> { + // x: &'a Vec + // } + // ``` + // + // Here `outlived_region = 'a` and `kind = + // Vec`. Decomposing `Vec` into + // components would yield `U`, and we add the + // where clause that `U: 'a`. + let ty: Ty<'tcx> = param_ty.to_ty(tcx); + required_predicates + .entry(ty::OutlivesPredicate(ty.into(), outlived_region)) + .or_insert(span); + } + + Component::Projection(proj_ty) => { + // This would arise from something like: + // + // ``` + // struct Foo<'a, T: Iterator> { + // x: &'a ::Item + // } + // ``` + // + // Here we want to add an explicit `where ::Item: 'a`. + let ty: Ty<'tcx> = tcx.mk_projection(proj_ty.item_def_id, proj_ty.substs); + required_predicates + .entry(ty::OutlivesPredicate(ty.into(), outlived_region)) + .or_insert(span); + } + + Component::EscapingProjection(_) => { + // As above, but the projection involves + // late-bound regions. Therefore, the WF + // requirement is not checked in type definition + // but at fn call site, so ignore it. + // + // ``` + // struct Foo<'a, T: Iterator> { + // x: for<'b> fn(<&'b T as Iterator>::Item) + // // ^^^^^^^^^^^^^^^^^^^^^^^^^ + // } + // ``` + // + // Since `'b` is not in scope on `Foo`, can't + // do anything here, ignore it. + } + + Component::UnresolvedInferenceVariable(_) => bug!("not using infcx"), + } + } + } + + GenericArgKind::Lifetime(r) => { + if !is_free_region(r) { + return; + } + required_predicates.entry(ty::OutlivesPredicate(kind, outlived_region)).or_insert(span); + } + + GenericArgKind::Const(_) => { + // Generic consts don't impose any constraints. + } + } +} + +fn is_free_region(region: Region<'_>) -> bool { + // First, screen for regions that might appear in a type header. + match *region { + // These correspond to `T: 'a` relationships: + // + // struct Foo<'a, T> { + // field: &'a T, // this would generate a ReEarlyBound referencing `'a` + // } + // + // We care about these, so fall through. + ty::ReEarlyBound(_) => true, + + // These correspond to `T: 'static` relationships which can be + // rather surprising. + // + // struct Foo<'a, T> { + // field: &'static T, // this would generate a ReStatic + // } + ty::ReStatic => false, + + // Late-bound regions can appear in `fn` types: + // + // struct Foo { + // field: for<'b> fn(&'b T) // e.g., 'b here + // } + // + // The type above might generate a `T: 'b` bound, but we can + // ignore it. We can't put it on the struct header anyway. + ty::ReLateBound(..) => false, + + // This can appear in `where Self: ` bounds (#64855): + // + // struct Bar(::Type) where Self: ; + // struct Baz<'a>(&'a Self) where Self: ; + ty::ReEmpty(_) => false, + + // These regions don't appear in types from type declarations: + ty::ReErased | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReFree(..) => { + bug!("unexpected region in outlives inference: {:?}", region); + } + } +} diff --git a/compiler/rustc_typeck/src/structured_errors.rs b/compiler/rustc_typeck/src/structured_errors.rs new file mode 100644 index 000000000..0b46fce17 --- /dev/null +++ b/compiler/rustc_typeck/src/structured_errors.rs @@ -0,0 +1,42 @@ +mod missing_cast_for_variadic_arg; +mod sized_unsized_cast; +mod wrong_number_of_generic_args; + +pub use self::{ + missing_cast_for_variadic_arg::*, sized_unsized_cast::*, wrong_number_of_generic_args::*, +}; + +use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed}; +use rustc_session::Session; + +pub trait StructuredDiagnostic<'tcx> { + fn session(&self) -> &Session; + + fn code(&self) -> DiagnosticId; + + fn diagnostic(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let err = self.diagnostic_common(); + + if self.session().teach(&self.code()) { + self.diagnostic_extended(err) + } else { + self.diagnostic_regular(err) + } + } + + fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>; + + fn diagnostic_regular( + &self, + err: DiagnosticBuilder<'tcx, ErrorGuaranteed>, + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + err + } + + fn diagnostic_extended( + &self, + err: DiagnosticBuilder<'tcx, ErrorGuaranteed>, + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + err + } +} diff --git a/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs b/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs new file mode 100644 index 000000000..324df313e --- /dev/null +++ b/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs @@ -0,0 +1,61 @@ +use crate::structured_errors::StructuredDiagnostic; +use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticId, ErrorGuaranteed}; +use rustc_middle::ty::{Ty, TypeVisitable}; +use rustc_session::Session; +use rustc_span::Span; + +pub struct MissingCastForVariadicArg<'tcx, 's> { + pub sess: &'tcx Session, + pub span: Span, + pub ty: Ty<'tcx>, + pub cast_ty: &'s str, +} + +impl<'tcx> StructuredDiagnostic<'tcx> for MissingCastForVariadicArg<'tcx, '_> { + fn session(&self) -> &Session { + self.sess + } + + fn code(&self) -> DiagnosticId { + rustc_errors::error_code!(E0617) + } + + fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let mut err = self.sess.struct_span_err_with_code( + self.span, + &format!("can't pass `{}` to variadic function", self.ty), + self.code(), + ); + + if self.ty.references_error() { + err.downgrade_to_delayed_bug(); + } + + if let Ok(snippet) = self.sess.source_map().span_to_snippet(self.span) { + err.span_suggestion( + self.span, + &format!("cast the value to `{}`", self.cast_ty), + format!("{} as {}", snippet, self.cast_ty), + Applicability::MachineApplicable, + ); + } else { + err.help(&format!("cast the value to `{}`", self.cast_ty)); + } + + err + } + + fn diagnostic_extended( + &self, + mut err: DiagnosticBuilder<'tcx, ErrorGuaranteed>, + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + err.note(&format!( + "certain types, like `{}`, must be casted before passing them to a \ + variadic function, because of arcane ABI rules dictated by the C \ + standard", + self.ty + )); + + err + } +} diff --git a/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs b/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs new file mode 100644 index 000000000..bb6088054 --- /dev/null +++ b/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs @@ -0,0 +1,62 @@ +use crate::structured_errors::StructuredDiagnostic; +use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed}; +use rustc_middle::ty::{Ty, TypeVisitable}; +use rustc_session::Session; +use rustc_span::Span; + +pub struct SizedUnsizedCast<'tcx> { + pub sess: &'tcx Session, + pub span: Span, + pub expr_ty: Ty<'tcx>, + pub cast_ty: String, +} + +impl<'tcx> StructuredDiagnostic<'tcx> for SizedUnsizedCast<'tcx> { + fn session(&self) -> &Session { + self.sess + } + + fn code(&self) -> DiagnosticId { + rustc_errors::error_code!(E0607) + } + + fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let mut err = self.sess.struct_span_err_with_code( + self.span, + &format!( + "cannot cast thin pointer `{}` to fat pointer `{}`", + self.expr_ty, self.cast_ty + ), + self.code(), + ); + + if self.expr_ty.references_error() { + err.downgrade_to_delayed_bug(); + } + + err + } + + fn diagnostic_extended( + &self, + mut err: DiagnosticBuilder<'tcx, ErrorGuaranteed>, + ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + err.help( + "Thin pointers are \"simple\" pointers: they are purely a reference to a +memory address. + +Fat pointers are pointers referencing \"Dynamically Sized Types\" (also +called DST). DST don't have a statically known size, therefore they can +only exist behind some kind of pointers that contain additional +information. Slices and trait objects are DSTs. In the case of slices, +the additional information the fat pointer holds is their size. + +To fix this error, don't try to cast directly between thin and fat +pointers. + +For more information about casts, take a look at The Book: +https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions", + ); + err + } +} diff --git a/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs b/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs new file mode 100644 index 000000000..99729391e --- /dev/null +++ b/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs @@ -0,0 +1,890 @@ +use crate::structured_errors::StructuredDiagnostic; +use rustc_errors::{ + pluralize, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId, ErrorGuaranteed, + MultiSpan, +}; +use rustc_hir as hir; +use rustc_middle::hir::map::fn_sig; +use rustc_middle::ty::{self as ty, AssocItems, AssocKind, TyCtxt}; +use rustc_session::Session; +use rustc_span::def_id::DefId; +use std::iter; + +use GenericArgsInfo::*; + +/// Handles the `wrong number of type / lifetime / ... arguments` family of error messages. +pub struct WrongNumberOfGenericArgs<'a, 'tcx> { + pub(crate) tcx: TyCtxt<'tcx>, + + pub(crate) angle_brackets: AngleBrackets, + + pub(crate) gen_args_info: GenericArgsInfo, + + /// Offending path segment + pub(crate) path_segment: &'a hir::PathSegment<'a>, + + /// Generic parameters as expected by type or trait + pub(crate) gen_params: &'a ty::Generics, + + /// Index offset into parameters. Depends on whether `Self` is included and on + /// number of lifetime parameters in case we're processing missing or redundant + /// type or constant arguments. + pub(crate) params_offset: usize, + + /// Generic arguments as provided by user + pub(crate) gen_args: &'a hir::GenericArgs<'a>, + + /// DefId of the generic type + pub(crate) def_id: DefId, +} + +// Provides information about the kind of arguments that were provided for +// the PathSegment, for which missing generic arguments were detected +#[derive(Debug)] +pub(crate) enum AngleBrackets { + // No angle brackets were provided, but generic arguments exist in elided form + Implied, + + // No angle brackets were provided + Missing, + + // Angle brackets are available, but missing some generic arguments + Available, +} + +// Information about the kind of arguments that are either missing or are unexpected +#[derive(Debug)] +pub enum GenericArgsInfo { + MissingLifetimes { + num_missing_args: usize, + }, + ExcessLifetimes { + num_redundant_args: usize, + }, + MissingTypesOrConsts { + num_missing_args: usize, + + // type or const generic arguments can have default values + num_default_params: usize, + + // lifetime arguments precede type and const parameters, this + // field gives the number of generic lifetime arguments to let + // us infer the position of type and const generic arguments + // in the angle brackets + args_offset: usize, + }, + + ExcessTypesOrConsts { + num_redundant_args: usize, + + // type or const generic arguments can have default values + num_default_params: usize, + + // lifetime arguments precede type and const parameters, this + // field gives the number of generic lifetime arguments to let + // us infer the position of type and const generic arguments + // in the angle brackets + args_offset: usize, + + // if synthetic type arguments (e.g. `impl Trait`) are specified + synth_provided: bool, + }, +} + +impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> { + pub fn new( + tcx: TyCtxt<'tcx>, + gen_args_info: GenericArgsInfo, + path_segment: &'a hir::PathSegment<'_>, + gen_params: &'a ty::Generics, + params_offset: usize, + gen_args: &'a hir::GenericArgs<'a>, + def_id: DefId, + ) -> Self { + let angle_brackets = if gen_args.span_ext().is_none() { + if gen_args.is_empty() { AngleBrackets::Missing } else { AngleBrackets::Implied } + } else { + AngleBrackets::Available + }; + + Self { + tcx, + angle_brackets, + gen_args_info, + path_segment, + gen_params, + params_offset, + gen_args, + def_id, + } + } + + fn missing_lifetimes(&self) -> bool { + match self.gen_args_info { + MissingLifetimes { .. } | ExcessLifetimes { .. } => true, + MissingTypesOrConsts { .. } | ExcessTypesOrConsts { .. } => false, + } + } + + fn kind(&self) -> &str { + if self.missing_lifetimes() { "lifetime" } else { "generic" } + } + + fn num_provided_args(&self) -> usize { + if self.missing_lifetimes() { + self.num_provided_lifetime_args() + } else { + self.num_provided_type_or_const_args() + } + } + + fn num_provided_lifetime_args(&self) -> usize { + match self.angle_brackets { + AngleBrackets::Missing => 0, + // Only lifetime arguments can be implied + AngleBrackets::Implied => self.gen_args.args.len(), + AngleBrackets::Available => self.gen_args.num_lifetime_params(), + } + } + + fn num_provided_type_or_const_args(&self) -> usize { + match self.angle_brackets { + AngleBrackets::Missing => 0, + // Only lifetime arguments can be implied + AngleBrackets::Implied => 0, + AngleBrackets::Available => self.gen_args.num_generic_params(), + } + } + + fn num_expected_lifetime_args(&self) -> usize { + let num_provided_args = self.num_provided_lifetime_args(); + match self.gen_args_info { + MissingLifetimes { num_missing_args } => num_provided_args + num_missing_args, + ExcessLifetimes { num_redundant_args } => num_provided_args - num_redundant_args, + _ => 0, + } + } + + fn num_expected_type_or_const_args(&self) -> usize { + let num_provided_args = self.num_provided_type_or_const_args(); + match self.gen_args_info { + MissingTypesOrConsts { num_missing_args, .. } => num_provided_args + num_missing_args, + ExcessTypesOrConsts { num_redundant_args, .. } => { + num_provided_args - num_redundant_args + } + _ => 0, + } + } + + // Gives the number of expected arguments taking into account default arguments + fn num_expected_type_or_const_args_including_defaults(&self) -> usize { + let provided_args = self.num_provided_type_or_const_args(); + match self.gen_args_info { + MissingTypesOrConsts { num_missing_args, num_default_params, .. } => { + provided_args + num_missing_args - num_default_params + } + ExcessTypesOrConsts { num_redundant_args, num_default_params, .. } => { + provided_args - num_redundant_args - num_default_params + } + _ => 0, + } + } + + fn num_missing_lifetime_args(&self) -> usize { + let missing_args = self.num_expected_lifetime_args() - self.num_provided_lifetime_args(); + assert!(missing_args > 0); + missing_args + } + + fn num_missing_type_or_const_args(&self) -> usize { + let missing_args = self.num_expected_type_or_const_args_including_defaults() + - self.num_provided_type_or_const_args(); + assert!(missing_args > 0); + missing_args + } + + fn num_excess_lifetime_args(&self) -> usize { + match self.gen_args_info { + ExcessLifetimes { num_redundant_args } => num_redundant_args, + _ => 0, + } + } + + fn num_excess_type_or_const_args(&self) -> usize { + match self.gen_args_info { + ExcessTypesOrConsts { num_redundant_args, .. } => num_redundant_args, + _ => 0, + } + } + + fn too_many_args_provided(&self) -> bool { + match self.gen_args_info { + MissingLifetimes { .. } | MissingTypesOrConsts { .. } => false, + ExcessLifetimes { num_redundant_args } + | ExcessTypesOrConsts { num_redundant_args, .. } => { + assert!(num_redundant_args > 0); + true + } + } + } + + fn not_enough_args_provided(&self) -> bool { + match self.gen_args_info { + MissingLifetimes { num_missing_args } + | MissingTypesOrConsts { num_missing_args, .. } => { + assert!(num_missing_args > 0); + true + } + ExcessLifetimes { .. } | ExcessTypesOrConsts { .. } => false, + } + } + + // Helper method to get the index offset in angle brackets, at which type or const arguments + // start appearing + fn get_lifetime_args_offset(&self) -> usize { + match self.gen_args_info { + MissingLifetimes { .. } | ExcessLifetimes { .. } => 0, + MissingTypesOrConsts { args_offset, .. } | ExcessTypesOrConsts { args_offset, .. } => { + args_offset + } + } + } + + fn get_num_default_params(&self) -> usize { + match self.gen_args_info { + MissingTypesOrConsts { num_default_params, .. } + | ExcessTypesOrConsts { num_default_params, .. } => num_default_params, + _ => 0, + } + } + + fn is_synth_provided(&self) -> bool { + match self.gen_args_info { + ExcessTypesOrConsts { synth_provided, .. } => synth_provided, + _ => false, + } + } + + // Helper function to choose a quantifier word for the number of expected arguments + // and to give a bound for the number of expected arguments + fn get_quantifier_and_bound(&self) -> (&'static str, usize) { + if self.get_num_default_params() == 0 { + match self.gen_args_info { + MissingLifetimes { .. } | ExcessLifetimes { .. } => { + ("", self.num_expected_lifetime_args()) + } + MissingTypesOrConsts { .. } | ExcessTypesOrConsts { .. } => { + ("", self.num_expected_type_or_const_args()) + } + } + } else { + match self.gen_args_info { + MissingLifetimes { .. } => ("at least ", self.num_expected_lifetime_args()), + MissingTypesOrConsts { .. } => { + ("at least ", self.num_expected_type_or_const_args_including_defaults()) + } + ExcessLifetimes { .. } => ("at most ", self.num_expected_lifetime_args()), + ExcessTypesOrConsts { .. } => ("at most ", self.num_expected_type_or_const_args()), + } + } + } + + // Creates lifetime name suggestions from the lifetime parameter names + fn get_lifetime_args_suggestions_from_param_names( + &self, + path_hir_id: Option, + num_params_to_take: usize, + ) -> String { + debug!(?path_hir_id); + + if let Some(path_hir_id) = path_hir_id { + let mut ret = Vec::new(); + for (id, node) in self.tcx.hir().parent_iter(path_hir_id) { + debug!(?id); + let params = if let Some(generics) = node.generics() { + generics.params + } else if let hir::Node::Ty(ty) = node + && let hir::TyKind::BareFn(bare_fn) = ty.kind + { + bare_fn.generic_params + } else { + &[] + }; + ret.extend(params.iter().filter_map(|p| { + let hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit } + = p.kind + else { return None }; + let hir::ParamName::Plain(name) = p.name else { return None }; + Some(name.to_string()) + })); + // Suggest `'static` when in const/static item-like. + if let hir::Node::Item(hir::Item { + kind: hir::ItemKind::Static { .. } | hir::ItemKind::Const { .. }, + .. + }) + | hir::Node::TraitItem(hir::TraitItem { + kind: hir::TraitItemKind::Const { .. }, + .. + }) + | hir::Node::ImplItem(hir::ImplItem { + kind: hir::ImplItemKind::Const { .. }, + .. + }) + | hir::Node::ForeignItem(hir::ForeignItem { + kind: hir::ForeignItemKind::Static { .. }, + .. + }) + | hir::Node::AnonConst(..) = node + { + ret.extend( + std::iter::repeat("'static".to_owned()) + .take(num_params_to_take.saturating_sub(ret.len())), + ); + } + if ret.len() >= num_params_to_take { + return ret[..num_params_to_take].join(", "); + } + // We cannot refer to lifetimes defined in an outer function. + if let hir::Node::Item(_) = node { + break; + } + } + } + + // We could not gather enough lifetime parameters in the scope. + // We use the parameter names from the target type's definition instead. + self.gen_params + .params + .iter() + .skip(self.params_offset + self.num_provided_lifetime_args()) + .take(num_params_to_take) + .map(|param| param.name.to_string()) + .collect::>() + .join(", ") + } + + // Creates type or constant name suggestions from the provided parameter names + fn get_type_or_const_args_suggestions_from_param_names( + &self, + num_params_to_take: usize, + ) -> String { + let fn_sig = self.tcx.hir().get_if_local(self.def_id).and_then(fn_sig); + let is_used_in_input = |def_id| { + fn_sig.map_or(false, |fn_sig| { + fn_sig.decl.inputs.iter().any(|ty| match ty.kind { + hir::TyKind::Path(hir::QPath::Resolved( + None, + hir::Path { res: hir::def::Res::Def(_, id), .. }, + )) => *id == def_id, + _ => false, + }) + }) + }; + self.gen_params + .params + .iter() + .skip(self.params_offset + self.num_provided_type_or_const_args()) + .take(num_params_to_take) + .map(|param| match param.kind { + // This is being inferred from the item's inputs, no need to set it. + ty::GenericParamDefKind::Type { .. } if is_used_in_input(param.def_id) => { + "_".to_string() + } + _ => param.name.to_string(), + }) + .collect::>() + .join(", ") + } + + fn get_unbound_associated_types(&self) -> Vec { + if self.tcx.is_trait(self.def_id) { + let items: &AssocItems<'_> = self.tcx.associated_items(self.def_id); + items + .in_definition_order() + .filter(|item| item.kind == AssocKind::Type) + .filter(|item| { + !self.gen_args.bindings.iter().any(|binding| binding.ident.name == item.name) + }) + .map(|item| item.name.to_ident_string()) + .collect() + } else { + Vec::default() + } + } + + fn create_error_message(&self) -> String { + let def_path = self.tcx.def_path_str(self.def_id); + let def_kind = self.tcx.def_kind(self.def_id).descr(self.def_id); + let (quantifier, bound) = self.get_quantifier_and_bound(); + let kind = self.kind(); + let provided_lt_args = self.num_provided_lifetime_args(); + let provided_type_or_const_args = self.num_provided_type_or_const_args(); + + let (provided_args_str, verb) = match self.gen_args_info { + MissingLifetimes { .. } | ExcessLifetimes { .. } => ( + format!("{} lifetime argument{}", provided_lt_args, pluralize!(provided_lt_args)), + pluralize!("was", provided_lt_args), + ), + MissingTypesOrConsts { .. } | ExcessTypesOrConsts { .. } => ( + format!( + "{} generic argument{}", + provided_type_or_const_args, + pluralize!(provided_type_or_const_args) + ), + pluralize!("was", provided_type_or_const_args), + ), + }; + + if self.gen_args.span_ext().is_some() { + format!( + "this {} takes {}{} {} argument{} but {} {} supplied", + def_kind, + quantifier, + bound, + kind, + pluralize!(bound), + provided_args_str.as_str(), + verb + ) + } else { + format!("missing generics for {} `{}`", def_kind, def_path) + } + } + + fn start_diagnostics(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let span = self.path_segment.ident.span; + let msg = self.create_error_message(); + + self.tcx.sess.struct_span_err_with_code(span, &msg, self.code()) + } + + /// Builds the `expected 1 type argument / supplied 2 type arguments` message. + fn notify(&self, err: &mut Diagnostic) { + let (quantifier, bound) = self.get_quantifier_and_bound(); + let provided_args = self.num_provided_args(); + + err.span_label( + self.path_segment.ident.span, + format!( + "expected {}{} {} argument{}", + quantifier, + bound, + self.kind(), + pluralize!(bound), + ), + ); + + // When too many arguments were provided, we don't highlight each of them, because it + // would overlap with the suggestion to remove them: + // + // ``` + // type Foo = Bar; + // ----- ----- supplied 2 type arguments + // ^^^^^^^ remove this type argument + // ``` + if self.too_many_args_provided() { + return; + } + + let args = self + .gen_args + .args + .iter() + .skip(self.get_lifetime_args_offset()) + .take(provided_args) + .enumerate(); + + for (i, arg) in args { + err.span_label( + arg.span(), + if i + 1 == provided_args { + format!( + "supplied {} {} argument{}", + provided_args, + self.kind(), + pluralize!(provided_args) + ) + } else { + String::new() + }, + ); + } + } + + fn suggest(&self, err: &mut Diagnostic) { + debug!( + "suggest(self.provided {:?}, self.gen_args.span(): {:?})", + self.num_provided_args(), + self.gen_args.span(), + ); + + match self.angle_brackets { + AngleBrackets::Missing | AngleBrackets::Implied => self.suggest_adding_args(err), + AngleBrackets::Available => { + if self.not_enough_args_provided() { + self.suggest_adding_args(err); + } else if self.too_many_args_provided() { + self.suggest_removing_args_or_generics(err); + } else { + unreachable!(); + } + } + } + } + + /// Suggests to add missing argument(s) when current invocation site already contains some + /// generics: + /// + /// ```text + /// type Map = HashMap; + /// ``` + fn suggest_adding_args(&self, err: &mut Diagnostic) { + if self.gen_args.parenthesized { + return; + } + + match self.gen_args_info { + MissingLifetimes { .. } => { + self.suggest_adding_lifetime_args(err); + } + MissingTypesOrConsts { .. } => { + self.suggest_adding_type_and_const_args(err); + } + _ => unreachable!(), + } + } + + fn suggest_adding_lifetime_args(&self, err: &mut Diagnostic) { + debug!("suggest_adding_lifetime_args(path_segment: {:?})", self.path_segment); + let num_missing_args = self.num_missing_lifetime_args(); + let num_params_to_take = num_missing_args; + let msg = format!("add missing {} argument{}", self.kind(), pluralize!(num_missing_args)); + + let suggested_args = self.get_lifetime_args_suggestions_from_param_names( + self.path_segment.hir_id, + num_params_to_take, + ); + debug!("suggested_args: {:?}", &suggested_args); + + match self.angle_brackets { + AngleBrackets::Missing => { + let span = self.path_segment.ident.span; + + // insert a suggestion of the form "Y<'a, 'b>" + let ident = self.path_segment.ident.name.to_ident_string(); + let sugg = format!("{}<{}>", ident, suggested_args); + debug!("sugg: {:?}", sugg); + + err.span_suggestion_verbose(span, &msg, sugg, Applicability::HasPlaceholders); + } + + AngleBrackets::Available => { + let (sugg_span, is_first) = if self.num_provided_lifetime_args() == 0 { + (self.gen_args.span().unwrap().shrink_to_lo(), true) + } else { + let last_lt = &self.gen_args.args[self.num_provided_lifetime_args() - 1]; + (last_lt.span().shrink_to_hi(), false) + }; + let has_non_lt_args = self.num_provided_type_or_const_args() != 0; + let has_bindings = !self.gen_args.bindings.is_empty(); + + let sugg_prefix = if is_first { "" } else { ", " }; + let sugg_suffix = + if is_first && (has_non_lt_args || has_bindings) { ", " } else { "" }; + + let sugg = format!("{}{}{}", sugg_prefix, suggested_args, sugg_suffix); + debug!("sugg: {:?}", sugg); + + err.span_suggestion_verbose(sugg_span, &msg, sugg, Applicability::HasPlaceholders); + } + AngleBrackets::Implied => { + // We never encounter missing lifetimes in situations in which lifetimes are elided + unreachable!(); + } + } + } + + fn suggest_adding_type_and_const_args(&self, err: &mut Diagnostic) { + let num_missing_args = self.num_missing_type_or_const_args(); + let msg = format!("add missing {} argument{}", self.kind(), pluralize!(num_missing_args)); + + let suggested_args = + self.get_type_or_const_args_suggestions_from_param_names(num_missing_args); + debug!("suggested_args: {:?}", suggested_args); + + match self.angle_brackets { + AngleBrackets::Missing | AngleBrackets::Implied => { + let span = self.path_segment.ident.span; + + // insert a suggestion of the form "Y" + let ident = self.path_segment.ident.name.to_ident_string(); + let sugg = format!("{}<{}>", ident, suggested_args); + debug!("sugg: {:?}", sugg); + + err.span_suggestion_verbose(span, &msg, sugg, Applicability::HasPlaceholders); + } + AngleBrackets::Available => { + let gen_args_span = self.gen_args.span().unwrap(); + let sugg_offset = + self.get_lifetime_args_offset() + self.num_provided_type_or_const_args(); + + let (sugg_span, is_first) = if sugg_offset == 0 { + (gen_args_span.shrink_to_lo(), true) + } else { + let arg_span = self.gen_args.args[sugg_offset - 1].span(); + // If we came here then inferred lifetime's spans can only point + // to either the opening bracket or to the space right after. + // Both of these spans have an `hi` lower than or equal to the span + // of the generics excluding the brackets. + // This allows us to check if `arg_span` is the artificial span of + // an inferred lifetime, in which case the generic we're suggesting to + // add will be the first visible, even if it isn't the actual first generic. + (arg_span.shrink_to_hi(), arg_span.hi() <= gen_args_span.lo()) + }; + + let sugg_prefix = if is_first { "" } else { ", " }; + let sugg_suffix = + if is_first && !self.gen_args.bindings.is_empty() { ", " } else { "" }; + + let sugg = format!("{}{}{}", sugg_prefix, suggested_args, sugg_suffix); + debug!("sugg: {:?}", sugg); + + err.span_suggestion_verbose(sugg_span, &msg, sugg, Applicability::HasPlaceholders); + } + } + } + + /// Suggests to remove redundant argument(s): + /// + /// ```text + /// type Map = HashMap; + /// ``` + fn suggest_removing_args_or_generics(&self, err: &mut Diagnostic) { + let num_provided_lt_args = self.num_provided_lifetime_args(); + let num_provided_type_const_args = self.num_provided_type_or_const_args(); + let unbound_types = self.get_unbound_associated_types(); + let num_provided_args = num_provided_lt_args + num_provided_type_const_args; + assert!(num_provided_args > 0); + + let num_redundant_lt_args = self.num_excess_lifetime_args(); + let num_redundant_type_or_const_args = self.num_excess_type_or_const_args(); + let num_redundant_args = num_redundant_lt_args + num_redundant_type_or_const_args; + + let redundant_lifetime_args = num_redundant_lt_args > 0; + let redundant_type_or_const_args = num_redundant_type_or_const_args > 0; + + let remove_entire_generics = num_redundant_args >= self.gen_args.args.len(); + let provided_args_matches_unbound_traits = + unbound_types.len() == num_redundant_type_or_const_args; + + let remove_lifetime_args = |err: &mut Diagnostic| { + let mut lt_arg_spans = Vec::new(); + let mut found_redundant = false; + for arg in self.gen_args.args { + if let hir::GenericArg::Lifetime(_) = arg { + lt_arg_spans.push(arg.span()); + if lt_arg_spans.len() > self.num_expected_lifetime_args() { + found_redundant = true; + } + } else if found_redundant { + // Argument which is redundant and separated like this `'c` + // is not included to avoid including `Bar` in span. + // ``` + // type Foo<'a, T> = &'a T; + // let _: Foo<'a, 'b, Bar, 'c>; + // ``` + break; + } + } + + let span_lo_redundant_lt_args = lt_arg_spans[self.num_expected_lifetime_args()]; + let span_hi_redundant_lt_args = lt_arg_spans[lt_arg_spans.len() - 1]; + + let span_redundant_lt_args = span_lo_redundant_lt_args.to(span_hi_redundant_lt_args); + debug!("span_redundant_lt_args: {:?}", span_redundant_lt_args); + + let num_redundant_lt_args = lt_arg_spans.len() - self.num_expected_lifetime_args(); + let msg_lifetimes = format!( + "remove {these} lifetime argument{s}", + these = pluralize!("this", num_redundant_lt_args), + s = pluralize!(num_redundant_lt_args), + ); + + err.span_suggestion( + span_redundant_lt_args, + &msg_lifetimes, + "", + Applicability::MaybeIncorrect, + ); + }; + + let remove_type_or_const_args = |err: &mut Diagnostic| { + let mut gen_arg_spans = Vec::new(); + let mut found_redundant = false; + for arg in self.gen_args.args { + match arg { + hir::GenericArg::Type(_) + | hir::GenericArg::Const(_) + | hir::GenericArg::Infer(_) => { + gen_arg_spans.push(arg.span()); + if gen_arg_spans.len() > self.num_expected_type_or_const_args() { + found_redundant = true; + } + } + _ if found_redundant => break, + _ => {} + } + } + + let span_lo_redundant_type_or_const_args = + gen_arg_spans[self.num_expected_type_or_const_args()]; + let span_hi_redundant_type_or_const_args = gen_arg_spans[gen_arg_spans.len() - 1]; + + let span_redundant_type_or_const_args = + span_lo_redundant_type_or_const_args.to(span_hi_redundant_type_or_const_args); + debug!("span_redundant_type_or_const_args: {:?}", span_redundant_type_or_const_args); + + let num_redundant_gen_args = + gen_arg_spans.len() - self.num_expected_type_or_const_args(); + let msg_types_or_consts = format!( + "remove {these} generic argument{s}", + these = pluralize!("this", num_redundant_gen_args), + s = pluralize!(num_redundant_gen_args), + ); + + err.span_suggestion( + span_redundant_type_or_const_args, + &msg_types_or_consts, + "", + Applicability::MaybeIncorrect, + ); + }; + + // If there is a single unbound associated type and a single excess generic param + // suggest replacing the generic param with the associated type bound + if provided_args_matches_unbound_traits && !unbound_types.is_empty() { + let mut suggestions = vec![]; + let unused_generics = &self.gen_args.args[self.num_expected_type_or_const_args()..]; + for (potential, name) in iter::zip(unused_generics, &unbound_types) { + if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(potential.span()) { + suggestions.push((potential.span(), format!("{} = {}", name, snippet))); + } + } + + if !suggestions.is_empty() { + err.multipart_suggestion( + &format!( + "replace the generic bound{s} with the associated type{s}", + s = pluralize!(unbound_types.len()) + ), + suggestions, + Applicability::MaybeIncorrect, + ); + } + } else if remove_entire_generics { + let span = self + .path_segment + .args + .unwrap() + .span_ext() + .unwrap() + .with_lo(self.path_segment.ident.span.hi()); + + let msg = format!( + "remove these {}generics", + if self.gen_args.parenthesized { "parenthetical " } else { "" }, + ); + + err.span_suggestion(span, &msg, "", Applicability::MaybeIncorrect); + } else if redundant_lifetime_args && redundant_type_or_const_args { + remove_lifetime_args(err); + remove_type_or_const_args(err); + } else if redundant_lifetime_args { + remove_lifetime_args(err); + } else { + assert!(redundant_type_or_const_args); + remove_type_or_const_args(err); + } + } + + /// Builds the `type defined here` message. + fn show_definition(&self, err: &mut Diagnostic) { + let mut spans: MultiSpan = if let Some(def_span) = self.tcx.def_ident_span(self.def_id) { + if self.tcx.sess.source_map().is_span_accessible(def_span) { + def_span.into() + } else { + return; + } + } else { + return; + }; + + let msg = { + let def_kind = self.tcx.def_kind(self.def_id).descr(self.def_id); + let (quantifier, bound) = self.get_quantifier_and_bound(); + + let params = if bound == 0 { + String::new() + } else { + let params = self + .gen_params + .params + .iter() + .skip(self.params_offset) + .take(bound) + .map(|param| { + let span = self.tcx.def_span(param.def_id); + spans.push_span_label(span, ""); + param + }) + .map(|param| format!("`{}`", param.name)) + .collect::>() + .join(", "); + + format!(": {}", params) + }; + + format!( + "{} defined here, with {}{} {} parameter{}{}", + def_kind, + quantifier, + bound, + self.kind(), + pluralize!(bound), + params, + ) + }; + + err.span_note(spans, &msg); + } + + /// Add note if `impl Trait` is explicitly specified. + fn note_synth_provided(&self, err: &mut Diagnostic) { + if !self.is_synth_provided() { + return; + } + + err.note("`impl Trait` cannot be explicitly specified as a generic argument"); + } +} + +impl<'tcx> StructuredDiagnostic<'tcx> for WrongNumberOfGenericArgs<'_, 'tcx> { + fn session(&self) -> &Session { + self.tcx.sess + } + + fn code(&self) -> DiagnosticId { + rustc_errors::error_code!(E0107) + } + + fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { + let mut err = self.start_diagnostics(); + + self.notify(&mut err); + self.suggest(&mut err); + self.show_definition(&mut err); + self.note_synth_provided(&mut err); + + err + } +} diff --git a/compiler/rustc_typeck/src/variance/constraints.rs b/compiler/rustc_typeck/src/variance/constraints.rs new file mode 100644 index 000000000..d79450e1a --- /dev/null +++ b/compiler/rustc_typeck/src/variance/constraints.rs @@ -0,0 +1,449 @@ +//! Constraint construction and representation +//! +//! The second pass over the AST determines the set of constraints. +//! We walk the set of items and, for each member, generate new constraints. + +use hir::def_id::{DefId, LocalDefId}; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_middle::ty::subst::{GenericArgKind, SubstsRef}; +use rustc_middle::ty::{self, Ty, TyCtxt}; + +use super::terms::VarianceTerm::*; +use super::terms::*; + +pub struct ConstraintContext<'a, 'tcx> { + pub terms_cx: TermsContext<'a, 'tcx>, + + // These are pointers to common `ConstantTerm` instances + covariant: VarianceTermPtr<'a>, + contravariant: VarianceTermPtr<'a>, + invariant: VarianceTermPtr<'a>, + bivariant: VarianceTermPtr<'a>, + + pub constraints: Vec>, +} + +/// Declares that the variable `decl_id` appears in a location with +/// variance `variance`. +#[derive(Copy, Clone)] +pub struct Constraint<'a> { + pub inferred: InferredIndex, + pub variance: &'a VarianceTerm<'a>, +} + +/// To build constraints, we visit one item (type, trait) at a time +/// and look at its contents. So e.g., if we have +/// ```ignore (illustrative) +/// struct Foo { +/// b: Bar +/// } +/// ``` +/// then while we are visiting `Bar`, the `CurrentItem` would have +/// the `DefId` and the start of `Foo`'s inferreds. +pub struct CurrentItem { + inferred_start: InferredIndex, +} + +pub fn add_constraints_from_crate<'a, 'tcx>( + terms_cx: TermsContext<'a, 'tcx>, +) -> ConstraintContext<'a, 'tcx> { + let tcx = terms_cx.tcx; + let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant)); + let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant)); + let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant)); + let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant)); + let mut constraint_cx = ConstraintContext { + terms_cx, + covariant, + contravariant, + invariant, + bivariant, + constraints: Vec::new(), + }; + + let crate_items = tcx.hir_crate_items(()); + + for def_id in crate_items.definitions() { + let def_kind = tcx.def_kind(def_id); + match def_kind { + DefKind::Struct | DefKind::Union | DefKind::Enum => { + constraint_cx.build_constraints_for_item(def_id); + + let adt = tcx.adt_def(def_id); + for variant in adt.variants() { + if let Some(ctor) = variant.ctor_def_id { + constraint_cx.build_constraints_for_item(ctor.expect_local()); + } + } + } + DefKind::Fn | DefKind::AssocFn => constraint_cx.build_constraints_for_item(def_id), + _ => {} + } + } + + constraint_cx +} + +impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.terms_cx.tcx + } + + fn build_constraints_for_item(&mut self, def_id: LocalDefId) { + let tcx = self.tcx(); + debug!("build_constraints_for_item({})", tcx.def_path_str(def_id.to_def_id())); + + // Skip items with no generics - there's nothing to infer in them. + if tcx.generics_of(def_id).count() == 0 { + return; + } + + let inferred_start = self.terms_cx.inferred_starts[&def_id]; + let current_item = &CurrentItem { inferred_start }; + match tcx.type_of(def_id).kind() { + ty::Adt(def, _) => { + // Not entirely obvious: constraints on structs/enums do not + // affect the variance of their type parameters. See discussion + // in comment at top of module. + // + // self.add_constraints_from_generics(generics); + + for field in def.all_fields() { + self.add_constraints_from_ty( + current_item, + tcx.type_of(field.did), + self.covariant, + ); + } + } + + ty::FnDef(..) => { + self.add_constraints_from_sig(current_item, tcx.fn_sig(def_id), self.covariant); + } + + ty::Error(_) => {} + _ => { + span_bug!( + tcx.def_span(def_id), + "`build_constraints_for_item` unsupported for this item" + ); + } + } + } + + fn add_constraint(&mut self, current: &CurrentItem, index: u32, variance: VarianceTermPtr<'a>) { + debug!("add_constraint(index={}, variance={:?})", index, variance); + self.constraints.push(Constraint { + inferred: InferredIndex(current.inferred_start.0 + index as usize), + variance, + }); + } + + fn contravariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { + self.xform(variance, self.contravariant) + } + + fn invariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { + self.xform(variance, self.invariant) + } + + fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> { + match v { + ty::Covariant => self.covariant, + ty::Invariant => self.invariant, + ty::Contravariant => self.contravariant, + ty::Bivariant => self.bivariant, + } + } + + fn xform(&mut self, v1: VarianceTermPtr<'a>, v2: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { + match (*v1, *v2) { + (_, ConstantTerm(ty::Covariant)) => { + // Applying a "covariant" transform is always a no-op + v1 + } + + (ConstantTerm(c1), ConstantTerm(c2)) => self.constant_term(c1.xform(c2)), + + _ => &*self.terms_cx.arena.alloc(TransformTerm(v1, v2)), + } + } + + #[instrument(level = "debug", skip(self, current))] + fn add_constraints_from_invariant_substs( + &mut self, + current: &CurrentItem, + substs: SubstsRef<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + // Trait are always invariant so we can take advantage of that. + let variance_i = self.invariant(variance); + + for k in substs { + match k.unpack() { + GenericArgKind::Lifetime(lt) => { + self.add_constraints_from_region(current, lt, variance_i) + } + GenericArgKind::Type(ty) => self.add_constraints_from_ty(current, ty, variance_i), + GenericArgKind::Const(val) => { + self.add_constraints_from_const(current, val, variance_i) + } + } + } + } + + /// Adds constraints appropriate for an instance of `ty` appearing + /// in a context with the generics defined in `generics` and + /// ambient variance `variance` + fn add_constraints_from_ty( + &mut self, + current: &CurrentItem, + ty: Ty<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + debug!("add_constraints_from_ty(ty={:?}, variance={:?})", ty, variance); + + match *ty.kind() { + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Str + | ty::Never + | ty::Foreign(..) => { + // leaf type -- noop + } + + ty::FnDef(..) | ty::Generator(..) | ty::Closure(..) => { + bug!("Unexpected closure type in variance computation"); + } + + ty::Ref(region, ty, mutbl) => { + let contra = self.contravariant(variance); + self.add_constraints_from_region(current, region, contra); + self.add_constraints_from_mt(current, &ty::TypeAndMut { ty, mutbl }, variance); + } + + ty::Array(typ, len) => { + self.add_constraints_from_const(current, len, variance); + self.add_constraints_from_ty(current, typ, variance); + } + + ty::Slice(typ) => { + self.add_constraints_from_ty(current, typ, variance); + } + + ty::RawPtr(ref mt) => { + self.add_constraints_from_mt(current, mt, variance); + } + + ty::Tuple(subtys) => { + for subty in subtys { + self.add_constraints_from_ty(current, subty, variance); + } + } + + ty::Adt(def, substs) => { + self.add_constraints_from_substs(current, def.did(), substs, variance); + } + + ty::Projection(ref data) => { + self.add_constraints_from_invariant_substs(current, data.substs, variance); + } + + ty::Opaque(_, substs) => { + self.add_constraints_from_invariant_substs(current, substs, variance); + } + + ty::Dynamic(data, r) => { + // The type `Foo` is contravariant w/r/t `'a`: + let contra = self.contravariant(variance); + self.add_constraints_from_region(current, r, contra); + + if let Some(poly_trait_ref) = data.principal() { + self.add_constraints_from_invariant_substs( + current, + poly_trait_ref.skip_binder().substs, + variance, + ); + } + + for projection in data.projection_bounds() { + match projection.skip_binder().term { + ty::Term::Ty(ty) => { + self.add_constraints_from_ty(current, ty, self.invariant); + } + ty::Term::Const(c) => { + self.add_constraints_from_const(current, c, self.invariant) + } + } + } + } + + ty::Param(ref data) => { + self.add_constraint(current, data.index, variance); + } + + ty::FnPtr(sig) => { + self.add_constraints_from_sig(current, sig, variance); + } + + ty::Error(_) => { + // we encounter this when walking the trait references for object + // types, where we use Error as the Self type + } + + ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Bound(..) | ty::Infer(..) => { + bug!( + "unexpected type encountered in \ + variance inference: {}", + ty + ); + } + } + } + + /// Adds constraints appropriate for a nominal type (enum, struct, + /// object, etc) appearing in a context with ambient variance `variance` + fn add_constraints_from_substs( + &mut self, + current: &CurrentItem, + def_id: DefId, + substs: SubstsRef<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + debug!( + "add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})", + def_id, substs, variance + ); + + // We don't record `inferred_starts` entries for empty generics. + if substs.is_empty() { + return; + } + + let (local, remote) = if let Some(def_id) = def_id.as_local() { + (Some(self.terms_cx.inferred_starts[&def_id]), None) + } else { + (None, Some(self.tcx().variances_of(def_id))) + }; + + for (i, k) in substs.iter().enumerate() { + let variance_decl = if let Some(InferredIndex(start)) = local { + // Parameter on an item defined within current crate: + // variance not yet inferred, so return a symbolic + // variance. + self.terms_cx.inferred_terms[start + i] + } else { + // Parameter on an item defined within another crate: + // variance already inferred, just look it up. + self.constant_term(remote.as_ref().unwrap()[i]) + }; + let variance_i = self.xform(variance, variance_decl); + debug!( + "add_constraints_from_substs: variance_decl={:?} variance_i={:?}", + variance_decl, variance_i + ); + match k.unpack() { + GenericArgKind::Lifetime(lt) => { + self.add_constraints_from_region(current, lt, variance_i) + } + GenericArgKind::Type(ty) => self.add_constraints_from_ty(current, ty, variance_i), + GenericArgKind::Const(val) => { + self.add_constraints_from_const(current, val, variance) + } + } + } + } + + /// Adds constraints appropriate for a const expression `val` + /// in a context with ambient variance `variance` + fn add_constraints_from_const( + &mut self, + current: &CurrentItem, + c: ty::Const<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + debug!("add_constraints_from_const(c={:?}, variance={:?})", c, variance); + + match &c.kind() { + ty::ConstKind::Unevaluated(uv) => { + self.add_constraints_from_invariant_substs(current, uv.substs, variance); + } + _ => {} + } + } + + /// Adds constraints appropriate for a function with signature + /// `sig` appearing in a context with ambient variance `variance` + fn add_constraints_from_sig( + &mut self, + current: &CurrentItem, + sig: ty::PolyFnSig<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + let contra = self.contravariant(variance); + for &input in sig.skip_binder().inputs() { + self.add_constraints_from_ty(current, input, contra); + } + self.add_constraints_from_ty(current, sig.skip_binder().output(), variance); + } + + /// Adds constraints appropriate for a region appearing in a + /// context with ambient variance `variance` + fn add_constraints_from_region( + &mut self, + current: &CurrentItem, + region: ty::Region<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + match *region { + ty::ReEarlyBound(ref data) => { + self.add_constraint(current, data.index, variance); + } + + ty::ReStatic => {} + + ty::ReLateBound(..) => { + // Late-bound regions do not get substituted the same + // way early-bound regions do, so we skip them here. + } + + ty::ReFree(..) + | ty::ReVar(..) + | ty::RePlaceholder(..) + | ty::ReEmpty(_) + | ty::ReErased => { + // We don't expect to see anything but 'static or bound + // regions when visiting member types or method types. + bug!( + "unexpected region encountered in variance \ + inference: {:?}", + region + ); + } + } + } + + /// Adds constraints appropriate for a mutability-type pair + /// appearing in a context with ambient variance `variance` + fn add_constraints_from_mt( + &mut self, + current: &CurrentItem, + mt: &ty::TypeAndMut<'tcx>, + variance: VarianceTermPtr<'a>, + ) { + match mt.mutbl { + hir::Mutability::Mut => { + let invar = self.invariant(variance); + self.add_constraints_from_ty(current, mt.ty, invar); + } + + hir::Mutability::Not => { + self.add_constraints_from_ty(current, mt.ty, variance); + } + } + } +} diff --git a/compiler/rustc_typeck/src/variance/mod.rs b/compiler/rustc_typeck/src/variance/mod.rs new file mode 100644 index 000000000..82103c5a0 --- /dev/null +++ b/compiler/rustc_typeck/src/variance/mod.rs @@ -0,0 +1,63 @@ +//! Module for inferring the variance of type and lifetime parameters. See the [rustc dev guide] +//! chapter for more info. +//! +//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/variance.html + +use rustc_arena::DroplessArena; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::DefId; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::{self, CrateVariancesMap, TyCtxt}; + +/// Defines the `TermsContext` basically houses an arena where we can +/// allocate terms. +mod terms; + +/// Code to gather up constraints. +mod constraints; + +/// Code to solve constraints and write out the results. +mod solve; + +/// Code to write unit tests of variance. +pub mod test; + +/// Code for transforming variances. +mod xform; + +pub fn provide(providers: &mut Providers) { + *providers = Providers { variances_of, crate_variances, ..*providers }; +} + +fn crate_variances(tcx: TyCtxt<'_>, (): ()) -> CrateVariancesMap<'_> { + let arena = DroplessArena::default(); + let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &arena); + let constraints_cx = constraints::add_constraints_from_crate(terms_cx); + solve::solve_constraints(constraints_cx) +} + +fn variances_of(tcx: TyCtxt<'_>, item_def_id: DefId) -> &[ty::Variance] { + // Skip items with no generics - there's nothing to infer in them. + if tcx.generics_of(item_def_id).count() == 0 { + return &[]; + } + + match tcx.def_kind(item_def_id) { + DefKind::Fn + | DefKind::AssocFn + | DefKind::Enum + | DefKind::Struct + | DefKind::Union + | DefKind::Variant + | DefKind::Ctor(..) => {} + _ => { + // Variance not relevant. + span_bug!(tcx.def_span(item_def_id), "asked to compute variance for wrong kind of item") + } + } + + // Everything else must be inferred. + + let crate_map = tcx.crate_variances(()); + crate_map.variances.get(&item_def_id).copied().unwrap_or(&[]) +} diff --git a/compiler/rustc_typeck/src/variance/solve.rs b/compiler/rustc_typeck/src/variance/solve.rs new file mode 100644 index 000000000..97aca621a --- /dev/null +++ b/compiler/rustc_typeck/src/variance/solve.rs @@ -0,0 +1,135 @@ +//! Constraint solving +//! +//! The final phase iterates over the constraints, refining the variance +//! for each inferred until a fixed point is reached. This will be the +//! optimal solution to the constraints. The final variance for each +//! inferred is then written into the `variance_map` in the tcx. + +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def_id::DefId; +use rustc_middle::ty; + +use super::constraints::*; +use super::terms::VarianceTerm::*; +use super::terms::*; +use super::xform::*; + +struct SolveContext<'a, 'tcx> { + terms_cx: TermsContext<'a, 'tcx>, + constraints: Vec>, + + // Maps from an InferredIndex to the inferred value for that variable. + solutions: Vec, +} + +pub fn solve_constraints<'tcx>( + constraints_cx: ConstraintContext<'_, 'tcx>, +) -> ty::CrateVariancesMap<'tcx> { + let ConstraintContext { terms_cx, constraints, .. } = constraints_cx; + + let mut solutions = vec![ty::Bivariant; terms_cx.inferred_terms.len()]; + for &(id, ref variances) in &terms_cx.lang_items { + let InferredIndex(start) = terms_cx.inferred_starts[&id]; + for (i, &variance) in variances.iter().enumerate() { + solutions[start + i] = variance; + } + } + + let mut solutions_cx = SolveContext { terms_cx, constraints, solutions }; + solutions_cx.solve(); + let variances = solutions_cx.create_map(); + + ty::CrateVariancesMap { variances } +} + +impl<'a, 'tcx> SolveContext<'a, 'tcx> { + fn solve(&mut self) { + // Propagate constraints until a fixed point is reached. Note + // that the maximum number of iterations is 2C where C is the + // number of constraints (each variable can change values at most + // twice). Since number of constraints is linear in size of the + // input, so is the inference process. + let mut changed = true; + while changed { + changed = false; + + for constraint in &self.constraints { + let Constraint { inferred, variance: term } = *constraint; + let InferredIndex(inferred) = inferred; + let variance = self.evaluate(term); + let old_value = self.solutions[inferred]; + let new_value = glb(variance, old_value); + if old_value != new_value { + debug!( + "updating inferred {} \ + from {:?} to {:?} due to {:?}", + inferred, old_value, new_value, term + ); + + self.solutions[inferred] = new_value; + changed = true; + } + } + } + } + + fn enforce_const_invariance(&self, generics: &ty::Generics, variances: &mut [ty::Variance]) { + let tcx = self.terms_cx.tcx; + + // Make all const parameters invariant. + for param in generics.params.iter() { + if let ty::GenericParamDefKind::Const { .. } = param.kind { + variances[param.index as usize] = ty::Invariant; + } + } + + // Make all the const parameters in the parent invariant (recursively). + if let Some(def_id) = generics.parent { + self.enforce_const_invariance(tcx.generics_of(def_id), variances); + } + } + + fn create_map(&self) -> FxHashMap { + let tcx = self.terms_cx.tcx; + + let solutions = &self.solutions; + self.terms_cx + .inferred_starts + .iter() + .map(|(&def_id, &InferredIndex(start))| { + let generics = tcx.generics_of(def_id); + let count = generics.count(); + + let variances = tcx.arena.alloc_slice(&solutions[start..(start + count)]); + + // Const parameters are always invariant. + self.enforce_const_invariance(generics, variances); + + // Functions are permitted to have unused generic parameters: make those invariant. + if let ty::FnDef(..) = tcx.type_of(def_id).kind() { + for variance in variances.iter_mut() { + if *variance == ty::Bivariant { + *variance = ty::Invariant; + } + } + } + + (def_id.to_def_id(), &*variances) + }) + .collect() + } + + fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance { + match *term { + ConstantTerm(v) => v, + + TransformTerm(t1, t2) => { + let v1 = self.evaluate(t1); + let v2 = self.evaluate(t2); + v1.xform(v2) + } + + InferredTerm(InferredIndex(index)) => self.solutions[index], + } + } +} diff --git a/compiler/rustc_typeck/src/variance/terms.rs b/compiler/rustc_typeck/src/variance/terms.rs new file mode 100644 index 000000000..1f763011e --- /dev/null +++ b/compiler/rustc_typeck/src/variance/terms.rs @@ -0,0 +1,145 @@ +// Representing terms +// +// Terms are structured as a straightforward tree. Rather than rely on +// GC, we allocate terms out of a bounded arena (the lifetime of this +// arena is the lifetime 'a that is threaded around). +// +// We assign a unique index to each type/region parameter whose variance +// is to be inferred. We refer to such variables as "inferreds". An +// `InferredIndex` is a newtype'd int representing the index of such +// a variable. + +use rustc_arena::DroplessArena; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{LocalDefId, LocalDefIdMap}; +use rustc_middle::ty::{self, TyCtxt}; +use std::fmt; + +use self::VarianceTerm::*; + +pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>; + +#[derive(Copy, Clone, Debug)] +pub struct InferredIndex(pub usize); + +#[derive(Copy, Clone)] +pub enum VarianceTerm<'a> { + ConstantTerm(ty::Variance), + TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>), + InferredTerm(InferredIndex), +} + +impl<'a> fmt::Debug for VarianceTerm<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ConstantTerm(c1) => write!(f, "{:?}", c1), + TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2), + InferredTerm(id) => write!(f, "[{}]", { + let InferredIndex(i) = id; + i + }), + } + } +} + +// The first pass over the crate simply builds up the set of inferreds. + +pub struct TermsContext<'a, 'tcx> { + pub tcx: TyCtxt<'tcx>, + pub arena: &'a DroplessArena, + + // For marker types, UnsafeCell, and other lang items where + // variance is hardcoded, records the item-id and the hardcoded + // variance. + pub lang_items: Vec<(LocalDefId, Vec)>, + + // Maps from the node id of an item to the first inferred index + // used for its type & region parameters. + pub inferred_starts: LocalDefIdMap, + + // Maps from an InferredIndex to the term for that variable. + pub inferred_terms: Vec>, +} + +pub fn determine_parameters_to_be_inferred<'a, 'tcx>( + tcx: TyCtxt<'tcx>, + arena: &'a DroplessArena, +) -> TermsContext<'a, 'tcx> { + let mut terms_cx = TermsContext { + tcx, + arena, + inferred_starts: Default::default(), + inferred_terms: vec![], + + lang_items: lang_items(tcx), + }; + + // See the following for a discussion on dep-graph management. + // + // - https://rustc-dev-guide.rust-lang.org/query.html + // - https://rustc-dev-guide.rust-lang.org/variance.html + let crate_items = tcx.hir_crate_items(()); + + for def_id in crate_items.definitions() { + debug!("add_inferreds for item {:?}", def_id); + + let def_kind = tcx.def_kind(def_id); + + match def_kind { + DefKind::Struct | DefKind::Union | DefKind::Enum => { + terms_cx.add_inferreds_for_item(def_id); + + let adt = tcx.adt_def(def_id); + for variant in adt.variants() { + if let Some(ctor) = variant.ctor_def_id { + terms_cx.add_inferreds_for_item(ctor.expect_local()); + } + } + } + DefKind::Fn | DefKind::AssocFn => terms_cx.add_inferreds_for_item(def_id), + _ => {} + } + } + + terms_cx +} + +fn lang_items(tcx: TyCtxt<'_>) -> Vec<(LocalDefId, Vec)> { + let lang_items = tcx.lang_items(); + let all = [ + (lang_items.phantom_data(), vec![ty::Covariant]), + (lang_items.unsafe_cell_type(), vec![ty::Invariant]), + ]; + + all.into_iter() // iterating over (Option, Variance) + .filter_map(|(d, v)| { + let def_id = d?.as_local()?; // LocalDefId + Some((def_id, v)) + }) + .collect() +} + +impl<'a, 'tcx> TermsContext<'a, 'tcx> { + fn add_inferreds_for_item(&mut self, def_id: LocalDefId) { + let tcx = self.tcx; + let count = tcx.generics_of(def_id).count(); + + if count == 0 { + return; + } + + // Record the start of this item's inferreds. + let start = self.inferred_terms.len(); + let newly_added = self.inferred_starts.insert(def_id, InferredIndex(start)).is_none(); + assert!(newly_added); + + // N.B., in the code below for writing the results back into the + // `CrateVariancesMap`, we rely on the fact that all inferreds + // for a particular item are assigned continuous indices. + + let arena = self.arena; + self.inferred_terms.extend( + (start..(start + count)).map(|i| &*arena.alloc(InferredTerm(InferredIndex(i)))), + ); + } +} diff --git a/compiler/rustc_typeck/src/variance/test.rs b/compiler/rustc_typeck/src/variance/test.rs new file mode 100644 index 000000000..2ba87db88 --- /dev/null +++ b/compiler/rustc_typeck/src/variance/test.rs @@ -0,0 +1,14 @@ +use rustc_errors::struct_span_err; +use rustc_middle::ty::TyCtxt; +use rustc_span::symbol::sym; + +pub fn test_variance(tcx: TyCtxt<'_>) { + // For unit testing: check for a special "rustc_variance" + // attribute and report an error with various results if found. + for id in tcx.hir().items() { + if tcx.has_attr(id.def_id.to_def_id(), sym::rustc_variance) { + let variances_of = tcx.variances_of(id.def_id); + struct_span_err!(tcx.sess, tcx.def_span(id.def_id), E0208, "{:?}", variances_of).emit(); + } + } +} diff --git a/compiler/rustc_typeck/src/variance/xform.rs b/compiler/rustc_typeck/src/variance/xform.rs new file mode 100644 index 000000000..027f0859f --- /dev/null +++ b/compiler/rustc_typeck/src/variance/xform.rs @@ -0,0 +1,22 @@ +use rustc_middle::ty; + +pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance { + // Greatest lower bound of the variance lattice as + // defined in The Paper: + // + // * + // - + + // o + match (v1, v2) { + (ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant, + + (ty::Covariant, ty::Contravariant) => ty::Invariant, + (ty::Contravariant, ty::Covariant) => ty::Invariant, + + (ty::Covariant, ty::Covariant) => ty::Covariant, + + (ty::Contravariant, ty::Contravariant) => ty::Contravariant, + + (x, ty::Bivariant) | (ty::Bivariant, x) => x, + } +} -- cgit v1.2.3