summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_hir_analysis/src/check
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:13:23 +0000
commit20431706a863f92cb37dc512fef6e48d192aaf2c (patch)
tree2867f13f5fd5437ba628c67d7f87309ccadcd286 /compiler/rustc_hir_analysis/src/check
parentReleasing progress-linux version 1.65.0+dfsg1-2~progress7.99u1. (diff)
downloadrustc-20431706a863f92cb37dc512fef6e48d192aaf2c.tar.xz
rustc-20431706a863f92cb37dc512fef6e48d192aaf2c.zip
Merging upstream version 1.66.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_hir_analysis/src/check')
-rw-r--r--compiler/rustc_hir_analysis/src/check/check.rs1443
-rw-r--r--compiler/rustc_hir_analysis/src/check/compare_method.rs1825
-rw-r--r--compiler/rustc_hir_analysis/src/check/dropck.rs323
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs549
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsicck.rs437
-rw-r--r--compiler/rustc_hir_analysis/src/check/mod.rs515
-rw-r--r--compiler/rustc_hir_analysis/src/check/region.rs856
-rw-r--r--compiler/rustc_hir_analysis/src/check/wfcheck.rs1990
8 files changed, 7938 insertions, 0 deletions
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
new file mode 100644
index 000000000..b70ac0205
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -0,0 +1,1443 @@
+use crate::check::intrinsicck::InlineAsmCtxt;
+
+use super::compare_method::check_type_bounds;
+use super::compare_method::{compare_impl_method, compare_ty_impl};
+use super::*;
+use rustc_attr as attr;
+use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{ItemKind, Node, PathSegment};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::{DefiningAnchor, RegionVariableOrigin, TyCtxtInferExt};
+use rustc_infer::traits::Obligation;
+use rustc_lint::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::stability::EvalResult;
+use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::util::{Discr, IntTypeExt};
+use rustc_middle::ty::{
+ self, ParamEnv, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable,
+};
+use rustc_session::lint::builtin::{UNINHABITED_STATIC, UNSUPPORTED_CALLING_CONVENTIONS};
+use rustc_span::symbol::sym;
+use rustc_span::{self, Span};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCtxt};
+
+use std::ops::ControlFlow;
+
+pub fn check_abi(tcx: TyCtxt<'_>, hir_id: hir::HirId, span: Span, abi: Abi) {
+ match tcx.sess.target.is_abi_supported(abi) {
+ Some(true) => (),
+ Some(false) => {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0570,
+ "`{abi}` is not a supported ABI for the current target",
+ )
+ .emit();
+ }
+ None => {
+ tcx.struct_span_lint_hir(
+ UNSUPPORTED_CALLING_CONVENTIONS,
+ hir_id,
+ span,
+ "use of calling convention not supported on this target",
+ |lint| lint,
+ );
+ }
+ }
+
+ // This ABI is only allowed on function pointers
+ if abi == Abi::CCmseNonSecureCall {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0781,
+ "the `\"C-cmse-nonsecure-call\"` ABI is only allowed on function pointers"
+ )
+ .emit();
+ }
+}
+
+fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+
+ if def.repr().simd() {
+ check_simd(tcx, span, def_id);
+ }
+
+ check_transparent(tcx, span, def);
+ check_packed(tcx, span, def);
+}
+
+fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+ check_transparent(tcx, span, def);
+ check_union_fields(tcx, span, def_id);
+ check_packed(tcx, span, def);
+}
+
+/// Check that the fields of the `union` do not need dropping.
+fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
+ let item_type = tcx.type_of(item_def_id);
+ if let ty::Adt(def, substs) = item_type.kind() {
+ assert!(def.is_union());
+
+ fn allowed_union_field<'tcx>(
+ ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ span: Span,
+ ) -> bool {
+ // We don't just accept all !needs_drop fields, due to semver concerns.
+ match ty.kind() {
+ ty::Ref(..) => true, // references never drop (even mutable refs, which are non-Copy and hence fail the later check)
+ ty::Tuple(tys) => {
+ // allow tuples of allowed types
+ tys.iter().all(|ty| allowed_union_field(ty, tcx, param_env, span))
+ }
+ ty::Array(elem, _len) => {
+ // Like `Copy`, we do *not* special-case length 0.
+ allowed_union_field(*elem, tcx, param_env, span)
+ }
+ _ => {
+ // Fallback case: allow `ManuallyDrop` and things that are `Copy`.
+ ty.ty_adt_def().is_some_and(|adt_def| adt_def.is_manually_drop())
+ || ty.is_copy_modulo_regions(tcx, param_env)
+ }
+ }
+ }
+
+ let param_env = tcx.param_env(item_def_id);
+ for field in &def.non_enum_variant().fields {
+ let field_ty = field.ty(tcx, substs);
+
+ if !allowed_union_field(field_ty, tcx, param_env, span) {
+ let (field_span, ty_span) = match tcx.hir().get_if_local(field.did) {
+ // We are currently checking the type this field came from, so it must be local.
+ Some(Node::Field(field)) => (field.span, field.ty.span),
+ _ => unreachable!("mir field has to correspond to hir field"),
+ };
+ struct_span_err!(
+ tcx.sess,
+ field_span,
+ E0740,
+ "unions cannot contain fields that may need dropping"
+ )
+ .note(
+ "a type is guaranteed not to need dropping \
+ when it implements `Copy`, or when it is the special `ManuallyDrop<_>` type",
+ )
+ .multipart_suggestion_verbose(
+ "when the type does not implement `Copy`, \
+ wrap it inside a `ManuallyDrop<_>` and ensure it is manually dropped",
+ vec![
+ (ty_span.shrink_to_lo(), "std::mem::ManuallyDrop<".into()),
+ (ty_span.shrink_to_hi(), ">".into()),
+ ],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ return false;
+ } else if field_ty.needs_drop(tcx, param_env) {
+ // This should never happen. But we can get here e.g. in case of name resolution errors.
+ tcx.sess.delay_span_bug(span, "we should never accept maybe-dropping union fields");
+ }
+ }
+ } else {
+ span_bug!(span, "unions must be ty::Adt, but got {:?}", item_type.kind());
+ }
+ true
+}
+
+/// Check that a `static` is inhabited.
+fn check_static_inhabited<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) {
+ // Make sure statics are inhabited.
+ // Other parts of the compiler assume that there are no uninhabited places. In principle it
+ // would be enough to check this for `extern` statics, as statics with an initializer will
+ // have UB during initialization if they are uninhabited, but there also seems to be no good
+ // reason to allow any statics to be uninhabited.
+ let ty = tcx.type_of(def_id);
+ let span = tcx.def_span(def_id);
+ let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) {
+ Ok(l) => l,
+ // Foreign statics that overflow their allowed size should emit an error
+ Err(LayoutError::SizeOverflow(_))
+ if {
+ let node = tcx.hir().get_by_def_id(def_id);
+ matches!(
+ node,
+ hir::Node::ForeignItem(hir::ForeignItem {
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ })
+ )
+ } =>
+ {
+ tcx.sess
+ .struct_span_err(span, "extern static is too large for the current architecture")
+ .emit();
+ return;
+ }
+ // Generic statics are rejected, but we still reach this case.
+ Err(e) => {
+ tcx.sess.delay_span_bug(span, &e.to_string());
+ return;
+ }
+ };
+ if layout.abi.is_uninhabited() {
+ tcx.struct_span_lint_hir(
+ UNINHABITED_STATIC,
+ tcx.hir().local_def_id_to_hir_id(def_id),
+ span,
+ "static of uninhabited type",
+ |lint| {
+ lint
+ .note("uninhabited statics cannot be initialized, and any access would be an immediate error")
+ },
+ );
+ }
+}
+
+/// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo`
+/// projections that would result in "inheriting lifetimes".
+fn check_opaque<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) {
+ let item = tcx.hir().item(id);
+ let hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) = item.kind else {
+ tcx.sess.delay_span_bug(tcx.hir().span(id.hir_id()), "expected opaque item");
+ return;
+ };
+
+ // HACK(jynelson): trying to infer the type of `impl trait` breaks documenting
+ // `async-std` (and `pub async fn` in general).
+ // Since rustdoc doesn't care about the concrete type behind `impl Trait`, just don't look at it!
+ // See https://github.com/rust-lang/rust/issues/75100
+ if tcx.sess.opts.actually_rustdoc {
+ return;
+ }
+
+ let substs = InternalSubsts::identity_for_item(tcx, item.owner_id.to_def_id());
+ let span = tcx.def_span(item.owner_id.def_id);
+
+ check_opaque_for_inheriting_lifetimes(tcx, item.owner_id.def_id, span);
+ if tcx.type_of(item.owner_id.def_id).references_error() {
+ return;
+ }
+ if check_opaque_for_cycles(tcx, item.owner_id.def_id, substs, span, &origin).is_err() {
+ return;
+ }
+ check_opaque_meets_bounds(tcx, item.owner_id.def_id, substs, span, &origin);
+}
+/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
+/// in "inheriting lifetimes".
+#[instrument(level = "debug", skip(tcx, span))]
+pub(super) fn check_opaque_for_inheriting_lifetimes<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ span: Span,
+) {
+ let item = tcx.hir().expect_item(def_id);
+ debug!(?item, ?span);
+
+ struct FoundParentLifetime;
+ struct FindParentLifetimeVisitor<'tcx>(&'tcx ty::Generics);
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for FindParentLifetimeVisitor<'tcx> {
+ type BreakTy = FoundParentLifetime;
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("FindParentLifetimeVisitor: r={:?}", r);
+ if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *r {
+ if index < self.0.parent_count as u32 {
+ return ControlFlow::Break(FoundParentLifetime);
+ } else {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ r.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Unevaluated(..) = c.kind() {
+ // FIXME(#72219) We currently don't detect lifetimes within substs
+ // which would violate this check. Even though the particular substitution is not used
+ // within the const, this should still be fixed.
+ return ControlFlow::CONTINUE;
+ }
+ c.super_visit_with(self)
+ }
+ }
+
+ struct ProhibitOpaqueVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ opaque_identity_ty: Ty<'tcx>,
+ generics: &'tcx ty::Generics,
+ selftys: Vec<(Span, Option<String>)>,
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type BreakTy = Ty<'tcx>;
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t);
+ if t == self.opaque_identity_ty {
+ ControlFlow::CONTINUE
+ } else {
+ t.super_visit_with(&mut FindParentLifetimeVisitor(self.generics))
+ .map_break(|FoundParentLifetime| t)
+ }
+ }
+ }
+
+ impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ match arg.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
+ [PathSegment { res: Res::SelfTyParam { .. }, .. }] => {
+ let impl_ty_name = None;
+ self.selftys.push((path.span, impl_ty_name));
+ }
+ [PathSegment { res: Res::SelfTyAlias { alias_to: def_id, .. }, .. }] => {
+ let impl_ty_name = Some(self.tcx.def_path_str(*def_id));
+ self.selftys.push((path.span, impl_ty_name));
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, arg);
+ }
+ }
+
+ if let ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
+ ..
+ }) = item.kind
+ {
+ let mut visitor = ProhibitOpaqueVisitor {
+ opaque_identity_ty: tcx.mk_opaque(
+ def_id.to_def_id(),
+ InternalSubsts::identity_for_item(tcx, def_id.to_def_id()),
+ ),
+ generics: tcx.generics_of(def_id),
+ tcx,
+ selftys: vec![],
+ };
+ let prohibit_opaque = tcx
+ .explicit_item_bounds(def_id)
+ .iter()
+ .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
+ debug!(
+ "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor.opaque_identity_ty={:?}, visitor.generics={:?}",
+ prohibit_opaque, visitor.opaque_identity_ty, visitor.generics
+ );
+
+ if let Some(ty) = prohibit_opaque.break_value() {
+ visitor.visit_item(&item);
+ let is_async = match item.kind {
+ ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
+ matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..))
+ }
+ _ => unreachable!(),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0760,
+ "`{}` return type cannot contain a projection or `Self` that references lifetimes from \
+ a parent scope",
+ if is_async { "async fn" } else { "impl Trait" },
+ );
+
+ for (span, name) in visitor.selftys {
+ err.span_suggestion(
+ span,
+ "consider spelling out the type instead",
+ name.unwrap_or_else(|| format!("{:?}", ty)),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+ }
+}
+
+/// Checks that an opaque type does not contain cycles.
+pub(super) fn check_opaque_for_cycles<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ origin: &hir::OpaqueTyOrigin,
+) -> Result<(), ErrorGuaranteed> {
+ if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() {
+ let reported = match origin {
+ hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span),
+ _ => opaque_type_cycle_error(tcx, def_id, span),
+ };
+ Err(reported)
+ } else {
+ Ok(())
+ }
+}
+
+/// Check that the concrete type behind `impl Trait` actually implements `Trait`.
+///
+/// This is mostly checked at the places that specify the opaque type, but we
+/// check those cases in the `param_env` of that function, which may have
+/// bounds not on this opaque type:
+///
+/// ```ignore (illustrative)
+/// type X<T> = impl Clone;
+/// fn f<T: Clone>(t: T) -> X<T> {
+/// t
+/// }
+/// ```
+///
+/// Without this check the above code is incorrectly accepted: we would ICE if
+/// some tried, for example, to clone an `Option<X<&mut ()>>`.
+#[instrument(level = "debug", skip(tcx))]
+fn check_opaque_meets_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ origin: &hir::OpaqueTyOrigin,
+) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let defining_use_anchor = match *origin {
+ hir::OpaqueTyOrigin::FnReturn(did) | hir::OpaqueTyOrigin::AsyncFn(did) => did,
+ hir::OpaqueTyOrigin::TyAlias => def_id,
+ };
+ let param_env = tcx.param_env(defining_use_anchor);
+
+ let infcx = tcx
+ .infer_ctxt()
+ .with_opaque_type_inference(DefiningAnchor::Bind(defining_use_anchor))
+ .build();
+ let ocx = ObligationCtxt::new(&infcx);
+ let opaque_ty = tcx.mk_opaque(def_id.to_def_id(), substs);
+
+ // `ReErased` regions appear in the "parent_substs" of closures/generators.
+ // We're ignoring them here and replacing them with fresh region variables.
+ // See tests in ui/type-alias-impl-trait/closure_{parent_substs,wf_outlives}.rs.
+ //
+ // FIXME: Consider wrapping the hidden type in an existential `Binder` and instantiating it
+ // here rather than using ReErased.
+ let hidden_ty = tcx.bound_type_of(def_id.to_def_id()).subst(tcx, substs);
+ let hidden_ty = tcx.fold_regions(hidden_ty, |re, _dbi| match re.kind() {
+ ty::ReErased => infcx.next_region_var(RegionVariableOrigin::MiscVariable(span)),
+ _ => re,
+ });
+
+ let misc_cause = traits::ObligationCause::misc(span, hir_id);
+
+ match infcx.at(&misc_cause, param_env).eq(opaque_ty, hidden_ty) {
+ Ok(infer_ok) => ocx.register_infer_ok_obligations(infer_ok),
+ Err(ty_err) => {
+ tcx.sess.delay_span_bug(
+ span,
+ &format!("could not unify `{hidden_ty}` with revealed type:\n{ty_err}"),
+ );
+ }
+ }
+
+ // Additionally require the hidden type to be well-formed with only the generics of the opaque type.
+ // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
+ // hidden type is well formed even without those bounds.
+ let predicate =
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(hidden_ty.into())).to_predicate(tcx);
+ ocx.register_obligation(Obligation::new(misc_cause, param_env, predicate));
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ }
+ match origin {
+ // Checked when type checking the function containing them.
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {}
+ // Can have different predicates to their defining use
+ hir::OpaqueTyOrigin::TyAlias => {
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ defining_use_anchor,
+ &outlives_environment,
+ );
+ }
+ }
+ // Clean up after ourselves
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+}
+
+fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) {
+ debug!(
+ "check_item_type(it.def_id={:?}, it.name={})",
+ id.owner_id,
+ tcx.def_path_str(id.owner_id.to_def_id())
+ );
+ let _indenter = indenter();
+ match tcx.def_kind(id.owner_id) {
+ DefKind::Static(..) => {
+ tcx.ensure().typeck(id.owner_id.def_id);
+ maybe_check_static_with_link_section(tcx, id.owner_id.def_id);
+ check_static_inhabited(tcx, id.owner_id.def_id);
+ }
+ DefKind::Const => {
+ tcx.ensure().typeck(id.owner_id.def_id);
+ }
+ DefKind::Enum => {
+ let item = tcx.hir().item(id);
+ let hir::ItemKind::Enum(ref enum_definition, _) = item.kind else {
+ return;
+ };
+ check_enum(tcx, &enum_definition.variants, item.owner_id.def_id);
+ }
+ DefKind::Fn => {} // entirely within check_item_body
+ DefKind::Impl => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::Impl(ref impl_) = it.kind else {
+ return;
+ };
+ debug!("ItemKind::Impl {} with id {:?}", it.ident, it.owner_id);
+ if let Some(impl_trait_ref) = tcx.impl_trait_ref(it.owner_id) {
+ check_impl_items_against_trait(
+ tcx,
+ it.span,
+ it.owner_id.def_id,
+ impl_trait_ref,
+ &impl_.items,
+ );
+ check_on_unimplemented(tcx, it);
+ }
+ }
+ DefKind::Trait => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::Trait(_, _, _, _, ref items) = it.kind else {
+ return;
+ };
+ check_on_unimplemented(tcx, it);
+
+ for item in items.iter() {
+ let item = tcx.hir().trait_item(item.id);
+ match item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => {
+ let abi = sig.header.abi;
+ fn_maybe_err(tcx, item.ident.span, abi);
+ }
+ hir::TraitItemKind::Type(.., Some(default)) => {
+ let assoc_item = tcx.associated_item(item.owner_id);
+ let trait_substs =
+ InternalSubsts::identity_for_item(tcx, it.owner_id.to_def_id());
+ let _: Result<_, rustc_errors::ErrorGuaranteed> = check_type_bounds(
+ tcx,
+ assoc_item,
+ assoc_item,
+ default.span,
+ ty::TraitRef { def_id: it.owner_id.to_def_id(), substs: trait_substs },
+ );
+ }
+ _ => {}
+ }
+ }
+ }
+ DefKind::Struct => {
+ check_struct(tcx, id.owner_id.def_id);
+ }
+ DefKind::Union => {
+ check_union(tcx, id.owner_id.def_id);
+ }
+ DefKind::OpaqueTy => {
+ check_opaque(tcx, id);
+ }
+ DefKind::ImplTraitPlaceholder => {
+ let parent = tcx.impl_trait_in_trait_parent(id.owner_id.to_def_id());
+ // Only check the validity of this opaque type if the function has a default body
+ if let hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
+ ..
+ }) = tcx.hir().get_by_def_id(parent.expect_local())
+ {
+ check_opaque(tcx, id);
+ }
+ }
+ DefKind::TyAlias => {
+ let pty_ty = tcx.type_of(id.owner_id);
+ let generics = tcx.generics_of(id.owner_id);
+ check_type_params_are_used(tcx, &generics, pty_ty);
+ }
+ DefKind::ForeignMod => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::ForeignMod { abi, items } = it.kind else {
+ return;
+ };
+ check_abi(tcx, it.hir_id(), it.span, abi);
+
+ if abi == Abi::RustIntrinsic {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ intrinsic::check_intrinsic_type(tcx, item);
+ }
+ } else if abi == Abi::PlatformIntrinsic {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ intrinsic::check_platform_intrinsic_type(tcx, item);
+ }
+ } else {
+ for item in items {
+ let def_id = item.id.owner_id.def_id;
+ let generics = tcx.generics_of(def_id);
+ let own_counts = generics.own_counts();
+ if generics.params.len() - own_counts.lifetimes != 0 {
+ let (kinds, kinds_pl, egs) = match (own_counts.types, own_counts.consts) {
+ (_, 0) => ("type", "types", Some("u32")),
+ // We don't specify an example value, because we can't generate
+ // a valid value for any type.
+ (0, _) => ("const", "consts", None),
+ _ => ("type or const", "types or consts", None),
+ };
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0044,
+ "foreign items may not have {kinds} parameters",
+ )
+ .span_label(item.span, &format!("can't have {kinds} parameters"))
+ .help(
+ // FIXME: once we start storing spans for type arguments, turn this
+ // into a suggestion.
+ &format!(
+ "replace the {} parameters with concrete {}{}",
+ kinds,
+ kinds_pl,
+ egs.map(|egs| format!(" like `{}`", egs)).unwrap_or_default(),
+ ),
+ )
+ .emit();
+ }
+
+ let item = tcx.hir().foreign_item(item.id);
+ match item.kind {
+ hir::ForeignItemKind::Fn(ref fn_decl, _, _) => {
+ require_c_abi_if_c_variadic(tcx, fn_decl, abi, item.span);
+ }
+ hir::ForeignItemKind::Static(..) => {
+ check_static_inhabited(tcx, def_id);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ DefKind::GlobalAsm => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::GlobalAsm(asm) = it.kind else { span_bug!(it.span, "DefKind::GlobalAsm but got {:#?}", it) };
+ InlineAsmCtxt::new_global_asm(tcx).check_asm(asm, id.hir_id());
+ }
+ _ => {}
+ }
+}
+
+pub(super) fn check_on_unimplemented(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
+ // an error would be reported if this fails.
+ let _ = traits::OnUnimplementedDirective::of_item(tcx, item.owner_id.to_def_id());
+}
+
+pub(super) fn check_specialization_validity<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def: &ty::TraitDef,
+ trait_item: &ty::AssocItem,
+ impl_id: DefId,
+ impl_item: &hir::ImplItemRef,
+) {
+ let Ok(ancestors) = trait_def.ancestors(tcx, impl_id) else { return };
+ let mut ancestor_impls = ancestors.skip(1).filter_map(|parent| {
+ if parent.is_from_trait() {
+ None
+ } else {
+ Some((parent, parent.item(tcx, trait_item.def_id)))
+ }
+ });
+
+ let opt_result = ancestor_impls.find_map(|(parent_impl, parent_item)| {
+ match parent_item {
+ // Parent impl exists, and contains the parent item we're trying to specialize, but
+ // doesn't mark it `default`.
+ Some(parent_item) if traits::impl_item_is_final(tcx, &parent_item) => {
+ Some(Err(parent_impl.def_id()))
+ }
+
+ // Parent impl contains item and makes it specializable.
+ Some(_) => Some(Ok(())),
+
+ // Parent impl doesn't mention the item. This means it's inherited from the
+ // grandparent. In that case, if parent is a `default impl`, inherited items use the
+ // "defaultness" from the grandparent, else they are final.
+ None => {
+ if tcx.impl_defaultness(parent_impl.def_id()).is_default() {
+ None
+ } else {
+ Some(Err(parent_impl.def_id()))
+ }
+ }
+ }
+ });
+
+ // If `opt_result` is `None`, we have only encountered `default impl`s that don't contain the
+ // item. This is allowed, the item isn't actually getting specialized here.
+ let result = opt_result.unwrap_or(Ok(()));
+
+ if let Err(parent_impl) = result {
+ report_forbidden_specialization(tcx, impl_item, parent_impl);
+ }
+}
+
+fn check_impl_items_against_trait<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ full_impl_span: Span,
+ impl_id: LocalDefId,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ impl_item_refs: &[hir::ImplItemRef],
+) {
+ // If the trait reference itself is erroneous (so the compilation is going
+ // to fail), skip checking the items here -- the `impl_item` table in `tcx`
+ // isn't populated for such impls.
+ if impl_trait_ref.references_error() {
+ return;
+ }
+
+ // Negative impls are not expected to have any items
+ match tcx.impl_polarity(impl_id) {
+ ty::ImplPolarity::Reservation | ty::ImplPolarity::Positive => {}
+ ty::ImplPolarity::Negative => {
+ if let [first_item_ref, ..] = impl_item_refs {
+ let first_item_span = tcx.hir().impl_item(first_item_ref.id).span;
+ struct_span_err!(
+ tcx.sess,
+ first_item_span,
+ E0749,
+ "negative impls cannot have any items"
+ )
+ .emit();
+ }
+ return;
+ }
+ }
+
+ let trait_def = tcx.trait_def(impl_trait_ref.def_id);
+
+ for impl_item in impl_item_refs {
+ let ty_impl_item = tcx.associated_item(impl_item.id.owner_id);
+ let ty_trait_item = if let Some(trait_item_id) = ty_impl_item.trait_item_def_id {
+ tcx.associated_item(trait_item_id)
+ } else {
+ // Checked in `associated_item`.
+ tcx.sess.delay_span_bug(impl_item.span, "missing associated item in trait");
+ continue;
+ };
+ let impl_item_full = tcx.hir().impl_item(impl_item.id);
+ match impl_item_full.kind {
+ hir::ImplItemKind::Const(..) => {
+ let _ = tcx.compare_assoc_const_impl_item_with_trait_item((
+ impl_item.id.owner_id.def_id,
+ ty_impl_item.trait_item_def_id.unwrap(),
+ ));
+ }
+ hir::ImplItemKind::Fn(..) => {
+ let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
+ compare_impl_method(
+ tcx,
+ &ty_impl_item,
+ &ty_trait_item,
+ impl_trait_ref,
+ opt_trait_span,
+ );
+ }
+ hir::ImplItemKind::Type(impl_ty) => {
+ let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
+ compare_ty_impl(
+ tcx,
+ &ty_impl_item,
+ impl_ty.span,
+ &ty_trait_item,
+ impl_trait_ref,
+ opt_trait_span,
+ );
+ }
+ }
+
+ check_specialization_validity(
+ tcx,
+ trait_def,
+ &ty_trait_item,
+ impl_id.to_def_id(),
+ impl_item,
+ );
+ }
+
+ if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) {
+ // Check for missing items from trait
+ let mut missing_items = Vec::new();
+
+ let mut must_implement_one_of: Option<&[Ident]> =
+ trait_def.must_implement_one_of.as_deref();
+
+ for &trait_item_id in tcx.associated_item_def_ids(impl_trait_ref.def_id) {
+ let is_implemented = ancestors
+ .leaf_def(tcx, trait_item_id)
+ .map_or(false, |node_item| node_item.item.defaultness(tcx).has_value());
+
+ if !is_implemented && tcx.impl_defaultness(impl_id).is_final() {
+ missing_items.push(tcx.associated_item(trait_item_id));
+ }
+
+ // true if this item is specifically implemented in this impl
+ let is_implemented_here = ancestors
+ .leaf_def(tcx, trait_item_id)
+ .map_or(false, |node_item| !node_item.defining_node.is_from_trait());
+
+ if !is_implemented_here {
+ match tcx.eval_default_body_stability(trait_item_id, full_impl_span) {
+ EvalResult::Deny { feature, reason, issue, .. } => default_body_is_unstable(
+ tcx,
+ full_impl_span,
+ trait_item_id,
+ feature,
+ reason,
+ issue,
+ ),
+
+ // Unmarked default bodies are considered stable (at least for now).
+ EvalResult::Allow | EvalResult::Unmarked => {}
+ }
+ }
+
+ if let Some(required_items) = &must_implement_one_of {
+ if is_implemented_here {
+ let trait_item = tcx.associated_item(trait_item_id);
+ if required_items.contains(&trait_item.ident(tcx)) {
+ must_implement_one_of = None;
+ }
+ }
+ }
+ }
+
+ if !missing_items.is_empty() {
+ missing_items_err(tcx, tcx.def_span(impl_id), &missing_items, full_impl_span);
+ }
+
+ if let Some(missing_items) = must_implement_one_of {
+ let attr_span = tcx
+ .get_attr(impl_trait_ref.def_id, sym::rustc_must_implement_one_of)
+ .map(|attr| attr.span);
+
+ missing_items_must_implement_one_of_err(
+ tcx,
+ tcx.def_span(impl_id),
+ missing_items,
+ attr_span,
+ );
+ }
+ }
+}
+
+pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
+ let t = tcx.type_of(def_id);
+ if let ty::Adt(def, substs) = t.kind()
+ && def.is_struct()
+ {
+ let fields = &def.non_enum_variant().fields;
+ if fields.is_empty() {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ }
+ let e = fields[0].ty(tcx, substs);
+ if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
+ struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
+ .span_label(sp, "SIMD elements must have the same type")
+ .emit();
+ return;
+ }
+
+ let len = if let ty::Array(_ty, c) = e.kind() {
+ c.try_eval_usize(tcx, tcx.param_env(def.did()))
+ } else {
+ Some(fields.len() as u64)
+ };
+ if let Some(len) = len {
+ if len == 0 {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ } else if len > MAX_SIMD_LANES {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0075,
+ "SIMD vector cannot have more than {MAX_SIMD_LANES} elements",
+ )
+ .emit();
+ return;
+ }
+ }
+
+ // Check that we use types valid for use in the lanes of a SIMD "vector register"
+ // These are scalar types which directly match a "machine" type
+ // Yes: Integers, floats, "thin" pointers
+ // No: char, "fat" pointers, compound types
+ match e.kind() {
+ ty::Param(_) => (), // pass struct<T>(T, T, T, T) through, let monomorphization catch errors
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_) => (), // struct(u8, u8, u8, u8) is ok
+ ty::Array(t, _) if matches!(t.kind(), ty::Param(_)) => (), // pass struct<T>([T; N]) through, let monomorphization catch errors
+ ty::Array(t, _clen)
+ if matches!(
+ t.kind(),
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_)
+ ) =>
+ { /* struct([f32; 4]) is ok */ }
+ _ => {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0077,
+ "SIMD vector element type should be a \
+ primitive scalar (integer/float/pointer) type"
+ )
+ .emit();
+ return;
+ }
+ }
+ }
+}
+
+pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
+ let repr = def.repr();
+ if repr.packed() {
+ for attr in tcx.get_attrs(def.did(), sym::repr) {
+ for r in attr::parse_repr_attr(&tcx.sess, attr) {
+ if let attr::ReprPacked(pack) = r
+ && let Some(repr_pack) = repr.pack
+ && pack as u64 != repr_pack.bytes()
+ {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0634,
+ "type has conflicting packed representation hints"
+ )
+ .emit();
+ }
+ }
+ }
+ if repr.align.is_some() {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0587,
+ "type has conflicting packed and align representation hints"
+ )
+ .emit();
+ } else {
+ if let Some(def_spans) = check_packed_inner(tcx, def.did(), &mut vec![]) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0588,
+ "packed type cannot transitively contain a `#[repr(align)]` type"
+ );
+
+ err.span_note(
+ tcx.def_span(def_spans[0].0),
+ &format!(
+ "`{}` has a `#[repr(align)]` attribute",
+ tcx.item_name(def_spans[0].0)
+ ),
+ );
+
+ if def_spans.len() > 2 {
+ let mut first = true;
+ for (adt_def, span) in def_spans.iter().skip(1).rev() {
+ let ident = tcx.item_name(*adt_def);
+ err.span_note(
+ *span,
+ &if first {
+ format!(
+ "`{}` contains a field of type `{}`",
+ tcx.type_of(def.did()),
+ ident
+ )
+ } else {
+ format!("...which contains a field of type `{ident}`")
+ },
+ );
+ first = false;
+ }
+ }
+
+ err.emit();
+ }
+ }
+ }
+}
+
+pub(super) fn check_packed_inner(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ stack: &mut Vec<DefId>,
+) -> Option<Vec<(DefId, Span)>> {
+ if let ty::Adt(def, substs) = tcx.type_of(def_id).kind() {
+ if def.is_struct() || def.is_union() {
+ if def.repr().align.is_some() {
+ return Some(vec![(def.did(), DUMMY_SP)]);
+ }
+
+ stack.push(def_id);
+ for field in &def.non_enum_variant().fields {
+ if let ty::Adt(def, _) = field.ty(tcx, substs).kind()
+ && !stack.contains(&def.did())
+ && let Some(mut defs) = check_packed_inner(tcx, def.did(), stack)
+ {
+ defs.push((def.did(), field.ident(tcx).span));
+ return Some(defs);
+ }
+ }
+ stack.pop();
+ }
+ }
+
+ None
+}
+
+pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: ty::AdtDef<'tcx>) {
+ if !adt.repr().transparent() {
+ return;
+ }
+
+ if adt.is_union() && !tcx.features().transparent_unions {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::transparent_unions,
+ sp,
+ "transparent unions are unstable",
+ )
+ .emit();
+ }
+
+ if adt.variants().len() != 1 {
+ bad_variant_count(tcx, adt, sp, adt.did());
+ if adt.variants().is_empty() {
+ // Don't bother checking the fields. No variants (and thus no fields) exist.
+ return;
+ }
+ }
+
+ // For each field, figure out if it's known to be a ZST and align(1), with "known"
+ // respecting #[non_exhaustive] attributes.
+ let field_infos = adt.all_fields().map(|field| {
+ let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
+ let param_env = tcx.param_env(field.did);
+ let layout = tcx.layout_of(param_env.and(ty));
+ // We are currently checking the type this field came from, so it must be local
+ let span = tcx.hir().span_if_local(field.did).unwrap();
+ let zst = layout.map_or(false, |layout| layout.is_zst());
+ let align1 = layout.map_or(false, |layout| layout.align.abi.bytes() == 1);
+ if !zst {
+ return (span, zst, align1, None);
+ }
+
+ fn check_non_exhaustive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ ) -> ControlFlow<(&'static str, DefId, SubstsRef<'tcx>, bool)> {
+ match t.kind() {
+ ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)),
+ ty::Array(ty, _) => check_non_exhaustive(tcx, *ty),
+ ty::Adt(def, subst) => {
+ if !def.did().is_local() {
+ let non_exhaustive = def.is_variant_list_non_exhaustive()
+ || def
+ .variants()
+ .iter()
+ .any(ty::VariantDef::is_field_list_non_exhaustive);
+ let has_priv = def.all_fields().any(|f| !f.vis.is_public());
+ if non_exhaustive || has_priv {
+ return ControlFlow::Break((
+ def.descr(),
+ def.did(),
+ subst,
+ non_exhaustive,
+ ));
+ }
+ }
+ def.all_fields()
+ .map(|field| field.ty(tcx, subst))
+ .try_for_each(|t| check_non_exhaustive(tcx, t))
+ }
+ _ => ControlFlow::Continue(()),
+ }
+ }
+
+ (span, zst, align1, check_non_exhaustive(tcx, ty).break_value())
+ });
+
+ let non_zst_fields = field_infos
+ .clone()
+ .filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None });
+ let non_zst_count = non_zst_fields.clone().count();
+ if non_zst_count >= 2 {
+ bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, sp);
+ }
+ let incompatible_zst_fields =
+ field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
+ let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
+ for (span, zst, align1, non_exhaustive) in field_infos {
+ if zst && !align1 {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0691,
+ "zero-sized field in transparent {} has alignment larger than 1",
+ adt.descr(),
+ )
+ .span_label(span, "has alignment larger than 1")
+ .emit();
+ }
+ if incompat && let Some((descr, def_id, substs, non_exhaustive)) = non_exhaustive {
+ tcx.struct_span_lint_hir(
+ REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
+ tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
+ span,
+ "zero-sized fields in `repr(transparent)` cannot contain external non-exhaustive types",
+ |lint| {
+ let note = if non_exhaustive {
+ "is marked with `#[non_exhaustive]`"
+ } else {
+ "contains private fields"
+ };
+ let field_ty = tcx.def_path_str_with_substs(def_id, substs);
+ lint
+ .note(format!("this {descr} contains `{field_ty}`, which {note}, \
+ and makes it not a breaking change to become non-zero-sized in the future."))
+ },
+ )
+ }
+ }
+}
+
+#[allow(trivial_numeric_casts)]
+fn check_enum<'tcx>(tcx: TyCtxt<'tcx>, vs: &'tcx [hir::Variant<'tcx>], def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let sp = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+
+ if vs.is_empty() {
+ if let Some(attr) = tcx.get_attrs(def_id.to_def_id(), sym::repr).next() {
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0084,
+ "unsupported representation for zero-variant enum"
+ )
+ .span_label(sp, "zero-variant enum")
+ .emit();
+ }
+ }
+
+ let repr_type_ty = def.repr().discr_type().to_ty(tcx);
+ if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
+ if !tcx.features().repr128 {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::repr128,
+ sp,
+ "repr with 128-bit type is unstable",
+ )
+ .emit();
+ }
+ }
+
+ for v in vs {
+ if let Some(ref e) = v.disr_expr {
+ tcx.ensure().typeck(tcx.hir().local_def_id(e.hir_id));
+ }
+ }
+
+ if tcx.adt_def(def_id).repr().int.is_none() {
+ let is_unit = |var: &hir::Variant<'_>| matches!(var.data, hir::VariantData::Unit(..));
+
+ let has_disr = |var: &hir::Variant<'_>| var.disr_expr.is_some();
+ let has_non_units = vs.iter().any(|var| !is_unit(var));
+ let disr_units = vs.iter().any(|var| is_unit(&var) && has_disr(&var));
+ let disr_non_unit = vs.iter().any(|var| !is_unit(&var) && has_disr(&var));
+
+ if disr_non_unit || (disr_units && has_non_units) {
+ let mut err =
+ struct_span_err!(tcx.sess, sp, E0732, "`#[repr(inttype)]` must be specified");
+ err.emit();
+ }
+ }
+
+ detect_discriminant_duplicate(tcx, def.discriminants(tcx).collect(), vs, sp);
+
+ check_transparent(tcx, sp, def);
+}
+
+/// Part of enum check. Given the discriminants of an enum, errors if two or more discriminants are equal
+fn detect_discriminant_duplicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut discrs: Vec<(VariantIdx, Discr<'tcx>)>,
+ vs: &'tcx [hir::Variant<'tcx>],
+ self_span: Span,
+) {
+ // Helper closure to reduce duplicate code. This gets called everytime we detect a duplicate.
+ // Here `idx` refers to the order of which the discriminant appears, and its index in `vs`
+ let report = |dis: Discr<'tcx>, idx: usize, err: &mut Diagnostic| {
+ let var = &vs[idx]; // HIR for the duplicate discriminant
+ let (span, display_discr) = match var.disr_expr {
+ Some(ref expr) => {
+ // In the case the discriminant is both a duplicate and overflowed, let the user know
+ if let hir::ExprKind::Lit(lit) = &tcx.hir().body(expr.body).value.kind
+ && let rustc_ast::LitKind::Int(lit_value, _int_kind) = &lit.node
+ && *lit_value != dis.val
+ {
+ (tcx.hir().span(expr.hir_id), format!("`{dis}` (overflowed from `{lit_value}`)"))
+ // Otherwise, format the value as-is
+ } else {
+ (tcx.hir().span(expr.hir_id), format!("`{dis}`"))
+ }
+ }
+ None => {
+ // At this point we know this discriminant is a duplicate, and was not explicitly
+ // assigned by the user. Here we iterate backwards to fetch the HIR for the last
+ // explicitly assigned discriminant, and letting the user know that this was the
+ // increment startpoint, and how many steps from there leading to the duplicate
+ if let Some((n, hir::Variant { span, ident, .. })) =
+ vs[..idx].iter().rev().enumerate().find(|v| v.1.disr_expr.is_some())
+ {
+ let ve_ident = var.ident;
+ let n = n + 1;
+ let sp = if n > 1 { "variants" } else { "variant" };
+
+ err.span_label(
+ *span,
+ format!("discriminant for `{ve_ident}` incremented from this startpoint (`{ident}` + {n} {sp} later => `{ve_ident}` = {dis})"),
+ );
+ }
+
+ (vs[idx].span, format!("`{dis}`"))
+ }
+ };
+
+ err.span_label(span, format!("{display_discr} assigned here"));
+ };
+
+ // Here we loop through the discriminants, comparing each discriminant to another.
+ // When a duplicate is detected, we instantiate an error and point to both
+ // initial and duplicate value. The duplicate discriminant is then discarded by swapping
+ // it with the last element and decrementing the `vec.len` (which is why we have to evaluate
+ // `discrs.len()` anew every iteration, and why this could be tricky to do in a functional
+ // style as we are mutating `discrs` on the fly).
+ let mut i = 0;
+ while i < discrs.len() {
+ let hir_var_i_idx = discrs[i].0.index();
+ let mut error: Option<DiagnosticBuilder<'_, _>> = None;
+
+ let mut o = i + 1;
+ while o < discrs.len() {
+ let hir_var_o_idx = discrs[o].0.index();
+
+ if discrs[i].1.val == discrs[o].1.val {
+ let err = error.get_or_insert_with(|| {
+ let mut ret = struct_span_err!(
+ tcx.sess,
+ self_span,
+ E0081,
+ "discriminant value `{}` assigned more than once",
+ discrs[i].1,
+ );
+
+ report(discrs[i].1, hir_var_i_idx, &mut ret);
+
+ ret
+ });
+
+ report(discrs[o].1, hir_var_o_idx, err);
+
+ // Safe to unwrap here, as we wouldn't reach this point if `discrs` was empty
+ discrs[o] = *discrs.last().unwrap();
+ discrs.pop();
+ } else {
+ o += 1;
+ }
+ }
+
+ if let Some(mut e) = error {
+ e.emit();
+ }
+
+ i += 1;
+ }
+}
+
+pub(super) fn check_type_params_are_used<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &ty::Generics,
+ ty: Ty<'tcx>,
+) {
+ debug!("check_type_params_are_used(generics={:?}, ty={:?})", generics, ty);
+
+ assert_eq!(generics.parent, None);
+
+ if generics.own_counts().types == 0 {
+ return;
+ }
+
+ let mut params_used = BitSet::new_empty(generics.params.len());
+
+ if ty.references_error() {
+ // If there is already another error, do not emit
+ // an error for not using a type parameter.
+ assert!(tcx.sess.has_errors().is_some());
+ return;
+ }
+
+ for leaf in ty.walk() {
+ if let GenericArgKind::Type(leaf_ty) = leaf.unpack()
+ && let ty::Param(param) = leaf_ty.kind()
+ {
+ debug!("found use of ty param {:?}", param);
+ params_used.insert(param.index);
+ }
+ }
+
+ for param in &generics.params {
+ if !params_used.contains(param.index)
+ && let ty::GenericParamDefKind::Type { .. } = param.kind
+ {
+ let span = tcx.def_span(param.def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0091,
+ "type parameter `{}` is unused",
+ param.name,
+ )
+ .span_label(span, "unused type parameter")
+ .emit();
+ }
+ }
+}
+
+pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let module = tcx.hir_module_items(module_def_id);
+ for id in module.items() {
+ check_item_type(tcx, id);
+ }
+}
+
+fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed {
+ struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing")
+ .span_label(span, "recursive `async fn`")
+ .note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`")
+ .note(
+ "consider using the `async_recursion` crate: https://crates.io/crates/async_recursion",
+ )
+ .emit()
+}
+
+/// Emit an error for recursive opaque types.
+///
+/// If this is a return `impl Trait`, find the item's return expressions and point at them. For
+/// direct recursion this is enough, but for indirect recursion also point at the last intermediary
+/// `impl Trait`.
+///
+/// If all the return expressions evaluate to `!`, then we explain that the error will go away
+/// after changing it. This can happen when a user uses `panic!()` or similar as a placeholder.
+fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) -> ErrorGuaranteed {
+ let mut err = struct_span_err!(tcx.sess, span, E0720, "cannot resolve opaque type");
+
+ let mut label = false;
+ if let Some((def_id, visitor)) = get_owner_return_paths(tcx, def_id) {
+ let typeck_results = tcx.typeck(def_id);
+ if visitor
+ .returns
+ .iter()
+ .filter_map(|expr| typeck_results.node_type_opt(expr.hir_id))
+ .all(|ty| matches!(ty.kind(), ty::Never))
+ {
+ let spans = visitor
+ .returns
+ .iter()
+ .filter(|expr| typeck_results.node_type_opt(expr.hir_id).is_some())
+ .map(|expr| expr.span)
+ .collect::<Vec<Span>>();
+ let span_len = spans.len();
+ if span_len == 1 {
+ err.span_label(spans[0], "this returned value is of `!` type");
+ } else {
+ let mut multispan: MultiSpan = spans.clone().into();
+ for span in spans {
+ multispan.push_span_label(span, "this returned value is of `!` type");
+ }
+ err.span_note(multispan, "these returned values have a concrete \"never\" type");
+ }
+ err.help("this error will resolve once the item's body returns a concrete type");
+ } else {
+ let mut seen = FxHashSet::default();
+ seen.insert(span);
+ err.span_label(span, "recursive opaque type");
+ label = true;
+ for (sp, ty) in visitor
+ .returns
+ .iter()
+ .filter_map(|e| typeck_results.node_type_opt(e.hir_id).map(|t| (e.span, t)))
+ .filter(|(_, ty)| !matches!(ty.kind(), ty::Never))
+ {
+ struct OpaqueTypeCollector(Vec<DefId>);
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for OpaqueTypeCollector {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *t.kind() {
+ ty::Opaque(def, _) => {
+ self.0.push(def);
+ ControlFlow::CONTINUE
+ }
+ _ => t.super_visit_with(self),
+ }
+ }
+ }
+ let mut visitor = OpaqueTypeCollector(vec![]);
+ ty.visit_with(&mut visitor);
+ for def_id in visitor.0 {
+ let ty_span = tcx.def_span(def_id);
+ if !seen.contains(&ty_span) {
+ err.span_label(ty_span, &format!("returning this opaque type `{ty}`"));
+ seen.insert(ty_span);
+ }
+ err.span_label(sp, &format!("returning here with type `{ty}`"));
+ }
+ }
+ }
+ }
+ if !label {
+ err.span_label(span, "cannot resolve opaque type");
+ }
+ err.emit()
+}
diff --git a/compiler/rustc_hir_analysis/src/check/compare_method.rs b/compiler/rustc_hir_analysis/src/check/compare_method.rs
new file mode 100644
index 000000000..32f66b06f
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/compare_method.rs
@@ -0,0 +1,1825 @@
+use super::potentially_plural_count;
+use crate::errors::LifetimesOrBoundsMismatchOnTrait;
+use hir::def_id::{DefId, LocalDefId};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticId, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::intravisit;
+use rustc_hir::{GenericParamKind, ImplItemKind, TraitItemKind};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{self, TyCtxtInferExt};
+use rustc_infer::traits::util;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::util::ExplicitSelf;
+use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::{
+ self, AssocItem, DefIdTree, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitable,
+};
+use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
+use rustc_span::Span;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, ObligationCtxt, Reveal,
+};
+use std::iter;
+
+/// Checks that a method from an impl conforms to the signature of
+/// the same method as declared in the trait.
+///
+/// # Parameters
+///
+/// - `impl_m`: type of the method we are checking
+/// - `impl_m_span`: span to use for reporting errors
+/// - `trait_m`: the method in the trait
+/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation
+pub(crate) fn compare_impl_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ trait_item_span: Option<Span>,
+) {
+ debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref);
+
+ let impl_m_span = tcx.def_span(impl_m.def_id);
+
+ if let Err(_) = compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) {
+ return;
+ }
+
+ if let Err(_) = compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span) {
+ return;
+ }
+
+ if let Err(_) = compare_generic_param_kinds(tcx, impl_m, trait_m) {
+ return;
+ }
+
+ if let Err(_) =
+ compare_number_of_method_arguments(tcx, impl_m, impl_m_span, trait_m, trait_item_span)
+ {
+ return;
+ }
+
+ if let Err(_) = compare_synthetic_generics(tcx, impl_m, trait_m) {
+ return;
+ }
+
+ if let Err(_) = compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
+ {
+ return;
+ }
+}
+
+/// This function is best explained by example. Consider a trait:
+///
+/// trait Trait<'t, T> {
+/// // `trait_m`
+/// fn method<'a, M>(t: &'t T, m: &'a M) -> Self;
+/// }
+///
+/// And an impl:
+///
+/// impl<'i, 'j, U> Trait<'j, &'i U> for Foo {
+/// // `impl_m`
+/// fn method<'b, N>(t: &'j &'i U, m: &'b N) -> Foo;
+/// }
+///
+/// We wish to decide if those two method types are compatible.
+/// For this we have to show that, assuming the bounds of the impl hold, the
+/// bounds of `trait_m` imply the bounds of `impl_m`.
+///
+/// We start out with `trait_to_impl_substs`, that maps the trait
+/// type parameters to impl type parameters. This is taken from the
+/// impl trait reference:
+///
+/// trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
+///
+/// We create a mapping `dummy_substs` that maps from the impl type
+/// parameters to fresh types and regions. For type parameters,
+/// this is the identity transform, but we could as well use any
+/// placeholder types. For regions, we convert from bound to free
+/// regions (Note: but only early-bound regions, i.e., those
+/// declared on the impl or used in type parameter bounds).
+///
+/// impl_to_placeholder_substs = {'i => 'i0, U => U0, N => N0 }
+///
+/// Now we can apply `placeholder_substs` to the type of the impl method
+/// to yield a new function type in terms of our fresh, placeholder
+/// types:
+///
+/// <'b> fn(t: &'i0 U0, m: &'b) -> Foo
+///
+/// We now want to extract and substitute the type of the *trait*
+/// method and compare it. To do so, we must create a compound
+/// substitution by combining `trait_to_impl_substs` and
+/// `impl_to_placeholder_substs`, and also adding a mapping for the method
+/// type parameters. We extend the mapping to also include
+/// the method parameters.
+///
+/// trait_to_placeholder_substs = { T => &'i0 U0, Self => Foo, M => N0 }
+///
+/// Applying this to the trait method type yields:
+///
+/// <'a> fn(t: &'i0 U0, m: &'a) -> Foo
+///
+/// This type is also the same but the name of the bound region (`'a`
+/// vs `'b`). However, the normal subtyping rules on fn types handle
+/// this kind of equivalency just fine.
+///
+/// We now use these substitutions to ensure that all declared bounds are
+/// satisfied by the implementation's method.
+///
+/// We do this by creating a parameter environment which contains a
+/// substitution corresponding to `impl_to_placeholder_substs`. We then build
+/// `trait_to_placeholder_substs` and use it to convert the predicates contained
+/// in the `trait_m` generics to the placeholder form.
+///
+/// Finally we register each of these predicates as an obligation and check that
+/// they hold.
+#[instrument(level = "debug", skip(tcx, impl_m_span, impl_trait_ref))]
+fn compare_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &AssocItem,
+ impl_m_span: Span,
+ trait_m: &AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ // This node-id should be used for the `body_id` field on each
+ // `ObligationCause` (and the `FnCtxt`).
+ //
+ // FIXME(@lcnr): remove that after removing `cause.body_id` from
+ // obligations.
+ let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local());
+ // We sometimes modify the span further down.
+ let mut cause = ObligationCause::new(
+ impl_m_span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+
+ // Create mapping from impl to placeholder.
+ let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+
+ // Create mapping from trait to placeholder.
+ let trait_to_placeholder_substs =
+ impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
+ debug!("compare_impl_method: trait_to_placeholder_substs={:?}", trait_to_placeholder_substs);
+
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
+ let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
+
+ // Check region bounds.
+ check_region_bounds_on_impl_item(tcx, impl_m, trait_m, &trait_m_generics, &impl_m_generics)?;
+
+ // Create obligations for each predicate declared by the impl
+ // definition in the context of the trait's parameter
+ // environment. We can't just use `impl_env.caller_bounds`,
+ // however, because we want to replace all late-bound regions with
+ // region variables.
+ let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+
+ debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
+
+ // This is the only tricky bit of the new way we check implementation methods
+ // We need to build a set of predicates where only the method-level bounds
+ // are from the trait and we assume all other bounds from the implementation
+ // to be previously satisfied.
+ //
+ // We then register the obligations from the impl_m and check to see
+ // if all constraints hold.
+ hybrid_preds
+ .predicates
+ .extend(trait_m_predicates.instantiate_own(tcx, trait_to_placeholder_substs).predicates);
+
+ // Construct trait parameter environment and then shift it into the placeholder viewpoint.
+ // The key step here is to update the caller_bounds's predicates to be
+ // the new hybrid bounds we computed.
+ let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_hir_id);
+ let param_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&hybrid_preds.predicates),
+ Reveal::UserFacing,
+ hir::Constness::NotConst,
+ );
+ let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
+
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_substs);
+ for (predicate, span) in iter::zip(impl_m_own_bounds.predicates, impl_m_own_bounds.spans) {
+ let normalize_cause = traits::ObligationCause::misc(span, impl_m_hir_id);
+ let traits::Normalized { value: predicate, obligations } =
+ traits::normalize(&mut selcx, param_env, normalize_cause, predicate);
+
+ ocx.register_obligations(obligations);
+ let cause = ObligationCause::new(
+ span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+ ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
+ }
+
+ // We now need to check that the signature of the impl method is
+ // compatible with that of the trait method. We do this by
+ // checking that `impl_fty <: trait_fty`.
+ //
+ // FIXME. Unfortunately, this doesn't quite work right now because
+ // associated type normalization is not integrated into subtype
+ // checks. For the comparison to be valid, we need to
+ // normalize the associated types in the impl/trait methods
+ // first. However, because function types bind regions, just
+ // calling `normalize_associated_types_in` would have no effect on
+ // any associated types appearing in the fn arguments or return
+ // type.
+
+ // Compute placeholder form of impl and trait method tys.
+ let tcx = infcx.tcx;
+
+ let mut wf_tys = FxHashSet::default();
+
+ let impl_sig = infcx.replace_bound_vars_with_fresh_vars(
+ impl_m_span,
+ infer::HigherRankedType,
+ tcx.fn_sig(impl_m.def_id),
+ );
+
+ let norm_cause = ObligationCause::misc(impl_m_span, impl_m_hir_id);
+ let impl_sig = ocx.normalize(norm_cause.clone(), param_env, impl_sig);
+ let impl_fty = tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig));
+ debug!("compare_impl_method: impl_fty={:?}", impl_fty);
+
+ let trait_sig = tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs);
+ let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
+
+ // Next, add all inputs and output as well-formed tys. Importantly,
+ // we have to do this before normalization, since the normalized ty may
+ // not contain the input parameters. See issue #87748.
+ wf_tys.extend(trait_sig.inputs_and_output.iter());
+ let trait_sig = ocx.normalize(norm_cause, param_env, trait_sig);
+ // We also have to add the normalized trait signature
+ // as we don't normalize during implied bounds computation.
+ wf_tys.extend(trait_sig.inputs_and_output.iter());
+ let trait_fty = tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig));
+
+ debug!("compare_impl_method: trait_fty={:?}", trait_fty);
+
+ // FIXME: We'd want to keep more accurate spans than "the method signature" when
+ // processing the comparison between the trait and impl fn, but we sadly lose them
+ // and point at the whole signature when a trait bound or specific input or output
+ // type would be more appropriate. In other places we have a `Vec<Span>`
+ // corresponding to their `Vec<Predicate>`, but we don't have that here.
+ // Fixing this would improve the output of test `issue-83765.rs`.
+ let mut result = infcx
+ .at(&cause, param_env)
+ .sup(trait_fty, impl_fty)
+ .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok));
+
+ // HACK(RPITIT): #101614. When we are trying to infer the hidden types for
+ // RPITITs, we need to equate the output tys instead of just subtyping. If
+ // we just use `sup` above, we'll end up `&'static str <: _#1t`, which causes
+ // us to infer `_#1t = #'_#2r str`, where `'_#2r` is unconstrained, which gets
+ // fixed up to `ReEmpty`, and which is certainly not what we want.
+ if trait_fty.has_infer_types() {
+ result = result.and_then(|()| {
+ infcx
+ .at(&cause, param_env)
+ .eq(trait_sig.output(), impl_sig.output())
+ .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok))
+ });
+ }
+
+ if let Err(terr) = result {
+ debug!("sub_types failed: impl ty {:?}, trait ty {:?}", impl_fty, trait_fty);
+
+ let (impl_err_span, trait_err_span) =
+ extract_spans_for_error_reporting(&infcx, terr, &cause, impl_m, trait_m);
+
+ cause.span = impl_err_span;
+
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span(),
+ E0053,
+ "method `{}` has an incompatible type for trait",
+ trait_m.name
+ );
+ match &terr {
+ TypeError::ArgumentMutability(0) | TypeError::ArgumentSorts(_, 0)
+ if trait_m.fn_has_self_parameter =>
+ {
+ let ty = trait_sig.inputs()[0];
+ let sugg = match ExplicitSelf::determine(ty, |_| ty == impl_trait_ref.self_ty()) {
+ ExplicitSelf::ByValue => "self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(),
+ _ => format!("self: {ty}"),
+ };
+
+ // When the `impl` receiver is an arbitrary self type, like `self: Box<Self>`, the
+ // span points only at the type `Box<Self`>, but we want to cover the whole
+ // argument pattern and type.
+ let span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, body) => tcx
+ .hir()
+ .body_param_names(body)
+ .zip(sig.decl.inputs.iter())
+ .map(|(param, ty)| param.span.to(ty.span))
+ .next()
+ .unwrap_or(impl_err_span),
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+
+ diag.span_suggestion(
+ span,
+ "change the self-receiver type to match the trait",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ TypeError::ArgumentMutability(i) | TypeError::ArgumentSorts(_, i) => {
+ if trait_sig.inputs().len() == *i {
+ // Suggestion to change output type. We do not suggest in `async` functions
+ // to avoid complex logic or incorrect output.
+ match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, _)
+ if sig.header.asyncness == hir::IsAsync::NotAsync =>
+ {
+ let msg = "change the output type to match the trait";
+ let ap = Applicability::MachineApplicable;
+ match sig.decl.output {
+ hir::FnRetTy::DefaultReturn(sp) => {
+ let sugg = format!("-> {} ", trait_sig.output());
+ diag.span_suggestion_verbose(sp, msg, sugg, ap);
+ }
+ hir::FnRetTy::Return(hir_ty) => {
+ let sugg = trait_sig.output();
+ diag.span_suggestion(hir_ty.span, msg, sugg, ap);
+ }
+ };
+ }
+ _ => {}
+ };
+ } else if let Some(trait_ty) = trait_sig.inputs().get(*i) {
+ diag.span_suggestion(
+ impl_err_span,
+ "change the parameter type to match the trait",
+ trait_ty,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ _ => {}
+ }
+
+ infcx.err_ctxt().note_type_err(
+ &mut diag,
+ &cause,
+ trait_err_span.map(|sp| (sp, "type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_fty.into(),
+ found: impl_fty.into(),
+ })),
+ terr,
+ false,
+ false,
+ );
+
+ return Err(diag.emit());
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(infcx),
+ infcx.implied_bounds_tys(param_env, impl_m_hir_id, wf_tys),
+ );
+ infcx.check_region_obligations_and_report_errors(
+ impl_m.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ Ok(())
+}
+
+pub fn collect_trait_impl_trait_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed> {
+ let impl_m = tcx.opt_associated_item(def_id).unwrap();
+ let trait_m = tcx.opt_associated_item(impl_m.trait_item_def_id.unwrap()).unwrap();
+ let impl_trait_ref = tcx.impl_trait_ref(impl_m.impl_container(tcx).unwrap()).unwrap();
+ let param_env = tcx.param_env(def_id);
+
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local());
+ let return_span = tcx.hir().fn_decl_by_hir_id(impl_m_hir_id).unwrap().output.span();
+ let cause = ObligationCause::new(
+ return_span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+
+ // Create mapping from impl to placeholder.
+ let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+
+ // Create mapping from trait to placeholder.
+ let trait_to_placeholder_substs =
+ impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
+
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ let norm_cause = ObligationCause::misc(return_span, impl_m_hir_id);
+ let impl_sig = ocx.normalize(
+ norm_cause.clone(),
+ param_env,
+ infcx.replace_bound_vars_with_fresh_vars(
+ return_span,
+ infer::HigherRankedType,
+ tcx.fn_sig(impl_m.def_id),
+ ),
+ );
+ let impl_return_ty = impl_sig.output();
+
+ let mut collector = ImplTraitInTraitCollector::new(&ocx, return_span, param_env, impl_m_hir_id);
+ let unnormalized_trait_sig = tcx
+ .liberate_late_bound_regions(
+ impl_m.def_id,
+ tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs),
+ )
+ .fold_with(&mut collector);
+ let trait_sig = ocx.normalize(norm_cause.clone(), param_env, unnormalized_trait_sig);
+ let trait_return_ty = trait_sig.output();
+
+ let wf_tys = FxHashSet::from_iter(
+ unnormalized_trait_sig.inputs_and_output.iter().chain(trait_sig.inputs_and_output.iter()),
+ );
+
+ match infcx.at(&cause, param_env).eq(trait_return_ty, impl_return_ty) {
+ Ok(infer::InferOk { value: (), obligations }) => {
+ ocx.register_obligations(obligations);
+ }
+ Err(terr) => {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span(),
+ E0053,
+ "method `{}` has an incompatible return type for trait",
+ trait_m.name
+ );
+ let hir = tcx.hir();
+ infcx.err_ctxt().note_type_err(
+ &mut diag,
+ &cause,
+ hir.get_if_local(impl_m.def_id)
+ .and_then(|node| node.fn_decl())
+ .map(|decl| (decl.output.span(), "return type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_return_ty.into(),
+ found: impl_return_ty.into(),
+ })),
+ terr,
+ false,
+ false,
+ );
+ return Err(diag.emit());
+ }
+ }
+
+ // Unify the whole function signature. We need to do this to fully infer
+ // the lifetimes of the return type, but do this after unifying just the
+ // return types, since we want to avoid duplicating errors from
+ // `compare_predicate_entailment`.
+ match infcx
+ .at(&cause, param_env)
+ .eq(tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig)), tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig)))
+ {
+ Ok(infer::InferOk { value: (), obligations }) => {
+ ocx.register_obligations(obligations);
+ }
+ Err(terr) => {
+ let guar = tcx.sess.delay_span_bug(
+ return_span,
+ format!("could not unify `{trait_sig}` and `{impl_sig}`: {terr:?}"),
+ );
+ return Err(guar);
+ }
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // RPITs.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(infcx),
+ infcx.implied_bounds_tys(param_env, impl_m_hir_id, wf_tys),
+ );
+ infcx.check_region_obligations_and_report_errors(
+ impl_m.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ let mut collected_tys = FxHashMap::default();
+ for (def_id, (ty, substs)) in collector.types {
+ match infcx.fully_resolve(ty) {
+ Ok(ty) => {
+ // `ty` contains free regions that we created earlier while liberating the
+ // trait fn signature. However, projection normalization expects `ty` to
+ // contains `def_id`'s early-bound regions.
+ let id_substs = InternalSubsts::identity_for_item(tcx, def_id);
+ debug!(?id_substs, ?substs);
+ let map: FxHashMap<ty::GenericArg<'tcx>, ty::GenericArg<'tcx>> =
+ std::iter::zip(substs, id_substs).collect();
+ debug!(?map);
+
+ // NOTE(compiler-errors): RPITITs, like all other RPITs, have early-bound
+ // region substs that are synthesized during AST lowering. These are substs
+ // that are appended to the parent substs (trait and trait method). However,
+ // we're trying to infer the unsubstituted type value of the RPITIT inside
+ // the *impl*, so we can later use the impl's method substs to normalize
+ // an RPITIT to a concrete type (`confirm_impl_trait_in_trait_candidate`).
+ //
+ // Due to the design of RPITITs, during AST lowering, we have no idea that
+ // an impl method corresponds to a trait method with RPITITs in it. Therefore,
+ // we don't have a list of early-bound region substs for the RPITIT in the impl.
+ // Since early region parameters are index-based, we can't just rebase these
+ // (trait method) early-bound region substs onto the impl, and there's no
+ // guarantee that the indices from the trait substs and impl substs line up.
+ // So to fix this, we subtract the number of trait substs and add the number of
+ // impl substs to *renumber* these early-bound regions to their corresponding
+ // indices in the impl's substitutions list.
+ //
+ // Also, we only need to account for a difference in trait and impl substs,
+ // since we previously enforce that the trait method and impl method have the
+ // same generics.
+ let num_trait_substs = trait_to_impl_substs.len();
+ let num_impl_substs = tcx.generics_of(impl_m.container_id(tcx)).params.len();
+ let ty = tcx.fold_regions(ty, |region, _| {
+ let (ty::ReFree(_) | ty::ReEarlyBound(_)) = region.kind() else { return region; };
+ let Some(ty::ReEarlyBound(e)) = map.get(&region.into()).map(|r| r.expect_region().kind())
+ else {
+ tcx
+ .sess
+ .delay_span_bug(
+ return_span,
+ "expected ReFree to map to ReEarlyBound"
+ );
+ return tcx.lifetimes.re_static;
+ };
+ tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: e.def_id,
+ name: e.name,
+ index: (e.index as usize - num_trait_substs + num_impl_substs) as u32,
+ }))
+ });
+ debug!(%ty);
+ collected_tys.insert(def_id, ty);
+ }
+ Err(err) => {
+ tcx.sess.delay_span_bug(
+ return_span,
+ format!("could not fully resolve: {ty} => {err:?}"),
+ );
+ collected_tys.insert(def_id, tcx.ty_error());
+ }
+ }
+ }
+
+ Ok(&*tcx.arena.alloc(collected_tys))
+}
+
+struct ImplTraitInTraitCollector<'a, 'tcx> {
+ ocx: &'a ObligationCtxt<'a, 'tcx>,
+ types: FxHashMap<DefId, (Ty<'tcx>, ty::SubstsRef<'tcx>)>,
+ span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+}
+
+impl<'a, 'tcx> ImplTraitInTraitCollector<'a, 'tcx> {
+ fn new(
+ ocx: &'a ObligationCtxt<'a, 'tcx>,
+ span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ) -> Self {
+ ImplTraitInTraitCollector { ocx, types: FxHashMap::default(), span, param_env, body_id }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ImplTraitInTraitCollector<'_, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.ocx.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Projection(proj) = ty.kind()
+ && self.tcx().def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder
+ {
+ if let Some((ty, _)) = self.types.get(&proj.item_def_id) {
+ return *ty;
+ }
+ //FIXME(RPITIT): Deny nested RPITIT in substs too
+ if proj.substs.has_escaping_bound_vars() {
+ bug!("FIXME(RPITIT): error here");
+ }
+ // Replace with infer var
+ let infer_ty = self.ocx.infcx.next_ty_var(TypeVariableOrigin {
+ span: self.span,
+ kind: TypeVariableOriginKind::MiscVariable,
+ });
+ self.types.insert(proj.item_def_id, (infer_ty, proj.substs));
+ // Recurse into bounds
+ for (pred, pred_span) in self.tcx().bound_explicit_item_bounds(proj.item_def_id).subst_iter_copied(self.tcx(), proj.substs) {
+ let pred = pred.fold_with(self);
+ let pred = self.ocx.normalize(
+ ObligationCause::misc(self.span, self.body_id),
+ self.param_env,
+ pred,
+ );
+
+ self.ocx.register_obligation(traits::Obligation::new(
+ ObligationCause::new(
+ self.span,
+ self.body_id,
+ ObligationCauseCode::BindingObligation(proj.item_def_id, pred_span),
+ ),
+ self.param_env,
+ pred,
+ ));
+ }
+ infer_ty
+ } else {
+ ty.super_fold_with(self)
+ }
+ }
+}
+
+fn check_region_bounds_on_impl_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+ trait_generics: &ty::Generics,
+ impl_generics: &ty::Generics,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_params = trait_generics.own_counts().lifetimes;
+ let impl_params = impl_generics.own_counts().lifetimes;
+
+ debug!(
+ "check_region_bounds_on_impl_item: \
+ trait_generics={:?} \
+ impl_generics={:?}",
+ trait_generics, impl_generics
+ );
+
+ // Must have same number of early-bound lifetime parameters.
+ // Unfortunately, if the user screws up the bounds, then this
+ // will change classification between early and late. E.g.,
+ // if in trait we have `<'a,'b:'a>`, and in impl we just have
+ // `<'a,'b>`, then we have 2 early-bound lifetime parameters
+ // in trait but 0 in the impl. But if we report "expected 2
+ // but found 0" it's confusing, because it looks like there
+ // are zero. Since I don't quite know how to phrase things at
+ // the moment, give a kind of vague error message.
+ if trait_params != impl_params {
+ let span = tcx
+ .hir()
+ .get_generics(impl_m.def_id.expect_local())
+ .expect("expected impl item to have generics or else we can't compare them")
+ .span;
+ let generics_span = if let Some(local_def_id) = trait_m.def_id.as_local() {
+ Some(
+ tcx.hir()
+ .get_generics(local_def_id)
+ .expect("expected trait item to have generics or else we can't compare them")
+ .span,
+ )
+ } else {
+ None
+ };
+
+ let reported = tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait {
+ span,
+ item_kind: assoc_item_kind_str(impl_m),
+ ident: impl_m.ident(tcx),
+ generics_span,
+ });
+ return Err(reported);
+ }
+
+ Ok(())
+}
+
+#[instrument(level = "debug", skip(infcx))]
+fn extract_spans_for_error_reporting<'tcx>(
+ infcx: &infer::InferCtxt<'tcx>,
+ terr: TypeError<'_>,
+ cause: &ObligationCause<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+) -> (Span, Option<Span>) {
+ let tcx = infcx.tcx;
+ let mut impl_args = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, _) => {
+ sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+ let trait_args =
+ trait_m.def_id.as_local().map(|def_id| match tcx.hir().expect_trait_item(def_id).kind {
+ TraitItemKind::Fn(ref sig, _) => {
+ sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
+ }
+ _ => bug!("{:?} is not a TraitItemKind::Fn", trait_m),
+ });
+
+ match terr {
+ TypeError::ArgumentMutability(i) => {
+ (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
+ }
+ TypeError::ArgumentSorts(ExpectedFound { .. }, i) => {
+ (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
+ }
+ _ => (cause.span(), tcx.hir().span_if_local(trait_m.def_id)),
+ }
+}
+
+fn compare_self_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ // Try to give more informative error messages about self typing
+ // mismatches. Note that any mismatch will also be detected
+ // below, where we construct a canonical function type that
+ // includes the self parameter as a normal parameter. It's just
+ // that the error messages you get out of this code are a bit more
+ // inscrutable, particularly for cases where one method has no
+ // self.
+
+ let self_string = |method: &ty::AssocItem| {
+ let untransformed_self_ty = match method.container {
+ ty::ImplContainer => impl_trait_ref.self_ty(),
+ ty::TraitContainer => tcx.types.self_param,
+ };
+ let self_arg_ty = tcx.fn_sig(method.def_id).input(0);
+ let param_env = ty::ParamEnv::reveal_all();
+
+ let infcx = tcx.infer_ctxt().build();
+ let self_arg_ty = tcx.liberate_late_bound_regions(method.def_id, self_arg_ty);
+ let can_eq_self = |ty| infcx.can_eq(param_env, untransformed_self_ty, ty).is_ok();
+ match ExplicitSelf::determine(self_arg_ty, can_eq_self) {
+ ExplicitSelf::ByValue => "self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(),
+ _ => format!("self: {self_arg_ty}"),
+ }
+ };
+
+ match (trait_m.fn_has_self_parameter, impl_m.fn_has_self_parameter) {
+ (false, false) | (true, true) => {}
+
+ (false, true) => {
+ let self_descr = self_string(impl_m);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_m_span,
+ E0185,
+ "method `{}` has a `{}` declaration in the impl, but not in the trait",
+ trait_m.name,
+ self_descr
+ );
+ err.span_label(impl_m_span, format!("`{self_descr}` used in impl"));
+ if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
+ err.span_label(span, format!("trait method declared without `{self_descr}`"));
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ let reported = err.emit();
+ return Err(reported);
+ }
+
+ (true, false) => {
+ let self_descr = self_string(trait_m);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_m_span,
+ E0186,
+ "method `{}` has a `{}` declaration in the trait, but not in the impl",
+ trait_m.name,
+ self_descr
+ );
+ err.span_label(impl_m_span, format!("expected `{self_descr}` in impl"));
+ if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
+ err.span_label(span, format!("`{self_descr}` used in trait"));
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ let reported = err.emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+/// Checks that the number of generics on a given assoc item in a trait impl is the same
+/// as the number of generics on the respective assoc item in the trait definition.
+///
+/// For example this code emits the errors in the following code:
+/// ```
+/// trait Trait {
+/// fn foo();
+/// type Assoc<T>;
+/// }
+///
+/// impl Trait for () {
+/// fn foo<T>() {}
+/// //~^ error
+/// type Assoc = u32;
+/// //~^ error
+/// }
+/// ```
+///
+/// Notably this does not error on `foo<T>` implemented as `foo<const N: u8>` or
+/// `foo<const N: u8>` implemented as `foo<const N: u32>`. This is handled in
+/// [`compare_generic_param_kinds`]. This function also does not handle lifetime parameters
+fn compare_number_of_generics<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_: &ty::AssocItem,
+ _impl_span: Span,
+ trait_: &ty::AssocItem,
+ trait_span: Option<Span>,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_own_counts = tcx.generics_of(trait_.def_id).own_counts();
+ let impl_own_counts = tcx.generics_of(impl_.def_id).own_counts();
+
+ // This avoids us erroring on `foo<T>` implemented as `foo<const N: u8>` as this is implemented
+ // in `compare_generic_param_kinds` which will give a nicer error message than something like:
+ // "expected 1 type parameter, found 0 type parameters"
+ if (trait_own_counts.types + trait_own_counts.consts)
+ == (impl_own_counts.types + impl_own_counts.consts)
+ {
+ return Ok(());
+ }
+
+ let matchings = [
+ ("type", trait_own_counts.types, impl_own_counts.types),
+ ("const", trait_own_counts.consts, impl_own_counts.consts),
+ ];
+
+ let item_kind = assoc_item_kind_str(impl_);
+
+ let mut err_occurred = None;
+ for (kind, trait_count, impl_count) in matchings {
+ if impl_count != trait_count {
+ let arg_spans = |kind: ty::AssocKind, generics: &hir::Generics<'_>| {
+ let mut spans = generics
+ .params
+ .iter()
+ .filter(|p| match p.kind {
+ hir::GenericParamKind::Lifetime {
+ kind: hir::LifetimeParamKind::Elided,
+ } => {
+ // A fn can have an arbitrary number of extra elided lifetimes for the
+ // same signature.
+ !matches!(kind, ty::AssocKind::Fn)
+ }
+ _ => true,
+ })
+ .map(|p| p.span)
+ .collect::<Vec<Span>>();
+ if spans.is_empty() {
+ spans = vec![generics.span]
+ }
+ spans
+ };
+ let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() {
+ let trait_item = tcx.hir().expect_trait_item(def_id);
+ let arg_spans: Vec<Span> = arg_spans(trait_.kind, trait_item.generics);
+ let impl_trait_spans: Vec<Span> = trait_item
+ .generics
+ .params
+ .iter()
+ .filter_map(|p| match p.kind {
+ GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
+ _ => None,
+ })
+ .collect();
+ (Some(arg_spans), impl_trait_spans)
+ } else {
+ (trait_span.map(|s| vec![s]), vec![])
+ };
+
+ let impl_item = tcx.hir().expect_impl_item(impl_.def_id.expect_local());
+ let impl_item_impl_trait_spans: Vec<Span> = impl_item
+ .generics
+ .params
+ .iter()
+ .filter_map(|p| match p.kind {
+ GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
+ _ => None,
+ })
+ .collect();
+ let spans = arg_spans(impl_.kind, impl_item.generics);
+ let span = spans.first().copied();
+
+ let mut err = tcx.sess.struct_span_err_with_code(
+ spans,
+ &format!(
+ "{} `{}` has {} {kind} parameter{} but its trait \
+ declaration has {} {kind} parameter{}",
+ item_kind,
+ trait_.name,
+ impl_count,
+ pluralize!(impl_count),
+ trait_count,
+ pluralize!(trait_count),
+ kind = kind,
+ ),
+ DiagnosticId::Error("E0049".into()),
+ );
+
+ let mut suffix = None;
+
+ if let Some(spans) = trait_spans {
+ let mut spans = spans.iter();
+ if let Some(span) = spans.next() {
+ err.span_label(
+ *span,
+ format!(
+ "expected {} {} parameter{}",
+ trait_count,
+ kind,
+ pluralize!(trait_count),
+ ),
+ );
+ }
+ for span in spans {
+ err.span_label(*span, "");
+ }
+ } else {
+ suffix = Some(format!(", expected {trait_count}"));
+ }
+
+ if let Some(span) = span {
+ err.span_label(
+ span,
+ format!(
+ "found {} {} parameter{}{}",
+ impl_count,
+ kind,
+ pluralize!(impl_count),
+ suffix.unwrap_or_else(String::new),
+ ),
+ );
+ }
+
+ for span in impl_trait_spans.iter().chain(impl_item_impl_trait_spans.iter()) {
+ err.span_label(*span, "`impl Trait` introduces an implicit type parameter");
+ }
+
+ let reported = err.emit();
+ err_occurred = Some(reported);
+ }
+ }
+
+ if let Some(reported) = err_occurred { Err(reported) } else { Ok(()) }
+}
+
+fn compare_number_of_method_arguments<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ trait_item_span: Option<Span>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_m_fty = tcx.fn_sig(impl_m.def_id);
+ let trait_m_fty = tcx.fn_sig(trait_m.def_id);
+ let trait_number_args = trait_m_fty.inputs().skip_binder().len();
+ let impl_number_args = impl_m_fty.inputs().skip_binder().len();
+ if trait_number_args != impl_number_args {
+ let trait_span = if let Some(def_id) = trait_m.def_id.as_local() {
+ match tcx.hir().expect_trait_item(def_id).kind {
+ TraitItemKind::Fn(ref trait_m_sig, _) => {
+ let pos = if trait_number_args > 0 { trait_number_args - 1 } else { 0 };
+ if let Some(arg) = trait_m_sig.decl.inputs.get(pos) {
+ Some(if pos == 0 {
+ arg.span
+ } else {
+ arg.span.with_lo(trait_m_sig.decl.inputs[0].span.lo())
+ })
+ } else {
+ trait_item_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ }
+ } else {
+ trait_item_span
+ };
+ let impl_span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref impl_m_sig, _) => {
+ let pos = if impl_number_args > 0 { impl_number_args - 1 } else { 0 };
+ if let Some(arg) = impl_m_sig.decl.inputs.get(pos) {
+ if pos == 0 {
+ arg.span
+ } else {
+ arg.span.with_lo(impl_m_sig.decl.inputs[0].span.lo())
+ }
+ } else {
+ impl_m_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0050,
+ "method `{}` has {} but the declaration in trait `{}` has {}",
+ trait_m.name,
+ potentially_plural_count(impl_number_args, "parameter"),
+ tcx.def_path_str(trait_m.def_id),
+ trait_number_args
+ );
+ if let Some(trait_span) = trait_span {
+ err.span_label(
+ trait_span,
+ format!(
+ "trait requires {}",
+ potentially_plural_count(trait_number_args, "parameter")
+ ),
+ );
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ err.span_label(
+ impl_span,
+ format!(
+ "expected {}, found {}",
+ potentially_plural_count(trait_number_args, "parameter"),
+ impl_number_args
+ ),
+ );
+ let reported = err.emit();
+ return Err(reported);
+ }
+
+ Ok(())
+}
+
+fn compare_synthetic_generics<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+) -> Result<(), ErrorGuaranteed> {
+ // FIXME(chrisvittal) Clean up this function, list of FIXME items:
+ // 1. Better messages for the span labels
+ // 2. Explanation as to what is going on
+ // If we get here, we already have the same number of generics, so the zip will
+ // be okay.
+ let mut error_found = None;
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
+ });
+ let trait_m_type_params = trait_m_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
+ });
+ for ((impl_def_id, impl_synthetic), (trait_def_id, trait_synthetic)) in
+ iter::zip(impl_m_type_params, trait_m_type_params)
+ {
+ if impl_synthetic != trait_synthetic {
+ let impl_def_id = impl_def_id.expect_local();
+ let impl_span = tcx.def_span(impl_def_id);
+ let trait_span = tcx.def_span(trait_def_id);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0643,
+ "method `{}` has incompatible signature for trait",
+ trait_m.name
+ );
+ err.span_label(trait_span, "declaration in trait here");
+ match (impl_synthetic, trait_synthetic) {
+ // The case where the impl method uses `impl Trait` but the trait method uses
+ // explicit generics
+ (true, false) => {
+ err.span_label(impl_span, "expected generic parameter, found `impl Trait`");
+ (|| {
+ // try taking the name from the trait impl
+ // FIXME: this is obviously suboptimal since the name can already be used
+ // as another generic argument
+ let new_name = tcx.opt_item_name(trait_def_id)?;
+ let trait_m = trait_m.def_id.as_local()?;
+ let trait_m = tcx.hir().expect_trait_item(trait_m);
+
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().expect_impl_item(impl_m);
+
+ // in case there are no generics, take the spot between the function name
+ // and the opening paren of the argument list
+ let new_generics_span = tcx.def_ident_span(impl_def_id)?.shrink_to_hi();
+ // in case there are generics, just replace them
+ let generics_span =
+ impl_m.generics.span.substitute_dummy(new_generics_span);
+ // replace with the generics from the trait
+ let new_generics =
+ tcx.sess.source_map().span_to_snippet(trait_m.generics.span).ok()?;
+
+ err.multipart_suggestion(
+ "try changing the `impl Trait` argument to a generic parameter",
+ vec![
+ // replace `impl Trait` with `T`
+ (impl_span, new_name.to_string()),
+ // replace impl method generics with trait method generics
+ // This isn't quite right, as users might have changed the names
+ // of the generics, but it works for the common case
+ (generics_span, new_generics),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ Some(())
+ })();
+ }
+ // The case where the trait method uses `impl Trait`, but the impl method uses
+ // explicit generics.
+ (false, true) => {
+ err.span_label(impl_span, "expected `impl Trait`, found generic parameter");
+ (|| {
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().expect_impl_item(impl_m);
+ let input_tys = match impl_m.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => sig.decl.inputs,
+ _ => unreachable!(),
+ };
+ struct Visitor(Option<Span>, hir::def_id::LocalDefId);
+ impl<'v> intravisit::Visitor<'v> for Visitor {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ intravisit::walk_ty(self, ty);
+ if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) =
+ ty.kind
+ && let Res::Def(DefKind::TyParam, def_id) = path.res
+ && def_id == self.1.to_def_id()
+ {
+ self.0 = Some(ty.span);
+ }
+ }
+ }
+ let mut visitor = Visitor(None, impl_def_id);
+ for ty in input_tys {
+ intravisit::Visitor::visit_ty(&mut visitor, ty);
+ }
+ let span = visitor.0?;
+
+ let bounds = impl_m.generics.bounds_for_param(impl_def_id).next()?.bounds;
+ let bounds = bounds.first()?.span().to(bounds.last()?.span());
+ let bounds = tcx.sess.source_map().span_to_snippet(bounds).ok()?;
+
+ err.multipart_suggestion(
+ "try removing the generic parameter and using `impl Trait` instead",
+ vec![
+ // delete generic parameters
+ (impl_m.generics.span, String::new()),
+ // replace param usage with `impl Trait`
+ (span, format!("impl {bounds}")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ Some(())
+ })();
+ }
+ _ => unreachable!(),
+ }
+ let reported = err.emit();
+ error_found = Some(reported);
+ }
+ }
+ if let Some(reported) = error_found { Err(reported) } else { Ok(()) }
+}
+
+/// Checks that all parameters in the generics of a given assoc item in a trait impl have
+/// the same kind as the respective generic parameter in the trait def.
+///
+/// For example all 4 errors in the following code are emitted here:
+/// ```
+/// trait Foo {
+/// fn foo<const N: u8>();
+/// type bar<const N: u8>;
+/// fn baz<const N: u32>();
+/// type blah<T>;
+/// }
+///
+/// impl Foo for () {
+/// fn foo<const N: u64>() {}
+/// //~^ error
+/// type bar<const N: u64> {}
+/// //~^ error
+/// fn baz<T>() {}
+/// //~^ error
+/// type blah<const N: i64> = u32;
+/// //~^ error
+/// }
+/// ```
+///
+/// This function does not handle lifetime parameters
+fn compare_generic_param_kinds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_item: &ty::AssocItem,
+ trait_item: &ty::AssocItem,
+) -> Result<(), ErrorGuaranteed> {
+ assert_eq!(impl_item.kind, trait_item.kind);
+
+ let ty_const_params_of = |def_id| {
+ tcx.generics_of(def_id).params.iter().filter(|param| {
+ matches!(
+ param.kind,
+ GenericParamDefKind::Const { .. } | GenericParamDefKind::Type { .. }
+ )
+ })
+ };
+
+ for (param_impl, param_trait) in
+ iter::zip(ty_const_params_of(impl_item.def_id), ty_const_params_of(trait_item.def_id))
+ {
+ use GenericParamDefKind::*;
+ if match (&param_impl.kind, &param_trait.kind) {
+ (Const { .. }, Const { .. })
+ if tcx.type_of(param_impl.def_id) != tcx.type_of(param_trait.def_id) =>
+ {
+ true
+ }
+ (Const { .. }, Type { .. }) | (Type { .. }, Const { .. }) => true,
+ // this is exhaustive so that anyone adding new generic param kinds knows
+ // to make sure this error is reported for them.
+ (Const { .. }, Const { .. }) | (Type { .. }, Type { .. }) => false,
+ (Lifetime { .. }, _) | (_, Lifetime { .. }) => unreachable!(),
+ } {
+ let param_impl_span = tcx.def_span(param_impl.def_id);
+ let param_trait_span = tcx.def_span(param_trait.def_id);
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ param_impl_span,
+ E0053,
+ "{} `{}` has an incompatible generic parameter for trait `{}`",
+ assoc_item_kind_str(&impl_item),
+ trait_item.name,
+ &tcx.def_path_str(tcx.parent(trait_item.def_id))
+ );
+
+ let make_param_message = |prefix: &str, param: &ty::GenericParamDef| match param.kind {
+ Const { .. } => {
+ format!("{} const parameter of type `{}`", prefix, tcx.type_of(param.def_id))
+ }
+ Type { .. } => format!("{} type parameter", prefix),
+ Lifetime { .. } => unreachable!(),
+ };
+
+ let trait_header_span = tcx.def_ident_span(tcx.parent(trait_item.def_id)).unwrap();
+ err.span_label(trait_header_span, "");
+ err.span_label(param_trait_span, make_param_message("expected", param_trait));
+
+ let impl_header_span = tcx.def_span(tcx.parent(impl_item.def_id));
+ err.span_label(impl_header_span, "");
+ err.span_label(param_impl_span, make_param_message("found", param_impl));
+
+ let reported = err.emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+/// Use `tcx.compare_assoc_const_impl_item_with_trait_item` instead
+pub(crate) fn raw_compare_const_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (impl_const_item_def, trait_const_item_def): (LocalDefId, DefId),
+) -> Result<(), ErrorGuaranteed> {
+ let impl_const_item = tcx.associated_item(impl_const_item_def);
+ let trait_const_item = tcx.associated_item(trait_const_item_def);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_const_item.container_id(tcx)).unwrap();
+ debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref);
+
+ let impl_c_span = tcx.def_span(impl_const_item_def.to_def_id());
+
+ let infcx = tcx.infer_ctxt().build();
+ let param_env = tcx.param_env(impl_const_item_def.to_def_id());
+ let ocx = ObligationCtxt::new(&infcx);
+
+ // The below is for the most part highly similar to the procedure
+ // for methods above. It is simpler in many respects, especially
+ // because we shouldn't really have to deal with lifetimes or
+ // predicates. In fact some of this should probably be put into
+ // shared functions because of DRY violations...
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ // Create a parameter environment that represents the implementation's
+ // method.
+ let impl_c_hir_id = tcx.hir().local_def_id_to_hir_id(impl_const_item_def);
+
+ // Compute placeholder form of impl and trait const tys.
+ let impl_ty = tcx.type_of(impl_const_item_def.to_def_id());
+ let trait_ty = tcx.bound_type_of(trait_const_item_def).subst(tcx, trait_to_impl_substs);
+ let mut cause = ObligationCause::new(
+ impl_c_span,
+ impl_c_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_const_item_def,
+ trait_item_def_id: trait_const_item_def,
+ kind: impl_const_item.kind,
+ },
+ );
+
+ // There is no "body" here, so just pass dummy id.
+ let impl_ty = ocx.normalize(cause.clone(), param_env, impl_ty);
+
+ debug!("compare_const_impl: impl_ty={:?}", impl_ty);
+
+ let trait_ty = ocx.normalize(cause.clone(), param_env, trait_ty);
+
+ debug!("compare_const_impl: trait_ty={:?}", trait_ty);
+
+ let err = infcx
+ .at(&cause, param_env)
+ .sup(trait_ty, impl_ty)
+ .map(|ok| ocx.register_infer_ok_obligations(ok));
+
+ if let Err(terr) = err {
+ debug!(
+ "checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
+ impl_ty, trait_ty
+ );
+
+ // Locate the Span containing just the type of the offending impl
+ match tcx.hir().expect_impl_item(impl_const_item_def).kind {
+ ImplItemKind::Const(ref ty, _) => cause.span = ty.span,
+ _ => bug!("{:?} is not a impl const", impl_const_item),
+ }
+
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span,
+ E0326,
+ "implemented const `{}` has an incompatible type for trait",
+ trait_const_item.name
+ );
+
+ let trait_c_span = trait_const_item_def.as_local().map(|trait_c_def_id| {
+ // Add a label to the Span containing just the type of the const
+ match tcx.hir().expect_trait_item(trait_c_def_id).kind {
+ TraitItemKind::Const(ref ty, _) => ty.span,
+ _ => bug!("{:?} is not a trait const", trait_const_item),
+ }
+ });
+
+ infcx.err_ctxt().note_type_err(
+ &mut diag,
+ &cause,
+ trait_c_span.map(|span| (span, "type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_ty.into(),
+ found: impl_ty.into(),
+ })),
+ terr,
+ false,
+ false,
+ );
+ return Err(diag.emit());
+ };
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ return Err(infcx.err_ctxt().report_fulfillment_errors(&errors, None, false));
+ }
+
+ // FIXME return `ErrorReported` if region obligations error?
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(impl_const_item_def, &outlives_environment);
+ Ok(())
+}
+
+pub(crate) fn compare_ty_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ trait_ty: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ trait_item_span: Option<Span>,
+) {
+ debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref);
+
+ let _: Result<(), ErrorGuaranteed> = (|| {
+ compare_number_of_generics(tcx, impl_ty, impl_ty_span, trait_ty, trait_item_span)?;
+
+ compare_generic_param_kinds(tcx, impl_ty, trait_ty)?;
+
+ let sp = tcx.def_span(impl_ty.def_id);
+ compare_type_predicate_entailment(tcx, impl_ty, sp, trait_ty, impl_trait_ref)?;
+
+ check_type_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref)
+ })();
+}
+
+/// The equivalent of [compare_predicate_entailment], but for associated types
+/// instead of associated functions.
+fn compare_type_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ trait_ty: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
+ let trait_to_impl_substs =
+ impl_substs.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.substs);
+
+ let impl_ty_generics = tcx.generics_of(impl_ty.def_id);
+ let trait_ty_generics = tcx.generics_of(trait_ty.def_id);
+ let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
+ let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
+
+ check_region_bounds_on_impl_item(
+ tcx,
+ impl_ty,
+ trait_ty,
+ &trait_ty_generics,
+ &impl_ty_generics,
+ )?;
+
+ let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_substs);
+
+ if impl_ty_own_bounds.is_empty() {
+ // Nothing to check.
+ return Ok(());
+ }
+
+ // This `HirId` should be used for the `body_id` field on each
+ // `ObligationCause` (and the `FnCtxt`). This is what
+ // `regionck_item` expects.
+ let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
+ debug!("compare_type_predicate_entailment: trait_to_impl_substs={:?}", trait_to_impl_substs);
+
+ // The predicates declared by the impl definition, the trait and the
+ // associated type in the trait are assumed.
+ let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+ hybrid_preds
+ .predicates
+ .extend(trait_ty_predicates.instantiate_own(tcx, trait_to_impl_substs).predicates);
+
+ debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
+
+ let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_hir_id);
+ let param_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&hybrid_preds.predicates),
+ Reveal::UserFacing,
+ hir::Constness::NotConst,
+ );
+ let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
+ let infcx = tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(&infcx);
+
+ debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+
+ assert_eq!(impl_ty_own_bounds.predicates.len(), impl_ty_own_bounds.spans.len());
+ for (span, predicate) in std::iter::zip(impl_ty_own_bounds.spans, impl_ty_own_bounds.predicates)
+ {
+ let cause = ObligationCause::misc(span, impl_ty_hir_id);
+ let traits::Normalized { value: predicate, obligations } =
+ traits::normalize(&mut selcx, param_env, cause, predicate);
+
+ let cause = ObligationCause::new(
+ span,
+ impl_ty_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_ty.def_id.expect_local(),
+ trait_item_def_id: trait_ty.def_id,
+ kind: impl_ty.kind,
+ },
+ );
+ ocx.register_obligations(obligations);
+ ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ impl_ty.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ Ok(())
+}
+
+/// Validate that `ProjectionCandidate`s created for this associated type will
+/// be valid.
+///
+/// Usually given
+///
+/// trait X { type Y: Copy } impl X for T { type Y = S; }
+///
+/// We are able to normalize `<T as X>::U` to `S`, and so when we check the
+/// impl is well-formed we have to prove `S: Copy`.
+///
+/// For default associated types the normalization is not possible (the value
+/// from the impl could be overridden). We also can't normalize generic
+/// associated types (yet) because they contain bound parameters.
+#[instrument(level = "debug", skip(tcx))]
+pub fn check_type_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ty: &ty::AssocItem,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ // Given
+ //
+ // impl<A, B> Foo<u32> for (A, B) {
+ // type Bar<C> =...
+ // }
+ //
+ // - `impl_trait_ref` would be `<(A, B) as Foo<u32>>
+ // - `impl_ty_substs` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0)
+ // - `rebased_substs` would be `[(A, B), u32, ^0.0]`, combining the substs from
+ // the *trait* with the generic associated type parameters (as bound vars).
+ //
+ // A note regarding the use of bound vars here:
+ // Imagine as an example
+ // ```
+ // trait Family {
+ // type Member<C: Eq>;
+ // }
+ //
+ // impl Family for VecFamily {
+ // type Member<C: Eq> = i32;
+ // }
+ // ```
+ // Here, we would generate
+ // ```notrust
+ // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) }
+ // ```
+ // when we really would like to generate
+ // ```notrust
+ // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) :- Implemented(C: Eq) }
+ // ```
+ // But, this is probably fine, because although the first clause can be used with types C that
+ // do not implement Eq, for it to cause some kind of problem, there would have to be a
+ // VecFamily::Member<X> for some type X where !(X: Eq), that appears in the value of type
+ // Member<C: Eq> = .... That type would fail a well-formedness check that we ought to be doing
+ // elsewhere, which would check that any <T as Family>::Member<X> meets the bounds declared in
+ // the trait (notably, that X: Eq and T: Family).
+ let defs: &ty::Generics = tcx.generics_of(impl_ty.def_id);
+ let mut substs = smallvec::SmallVec::with_capacity(defs.count());
+ if let Some(def_id) = defs.parent {
+ let parent_defs = tcx.generics_of(def_id);
+ InternalSubsts::fill_item(&mut substs, tcx, parent_defs, &mut |param, _| {
+ tcx.mk_param_from_def(param)
+ });
+ }
+ let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
+ smallvec::SmallVec::with_capacity(defs.count());
+ InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param.kind {
+ GenericParamDefKind::Type { .. } => {
+ let kind = ty::BoundTyKind::Param(param.name);
+ let bound_var = ty::BoundVariableKind::Ty(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Lifetime => {
+ let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
+ let bound_var = ty::BoundVariableKind::Region(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Const { .. } => {
+ let bound_var = ty::BoundVariableKind::Const;
+ bound_vars.push(bound_var);
+ tcx.mk_const(ty::ConstS {
+ ty: tcx.type_of(param.def_id),
+ kind: ty::ConstKind::Bound(
+ ty::INNERMOST,
+ ty::BoundVar::from_usize(bound_vars.len() - 1),
+ ),
+ })
+ .into()
+ }
+ });
+ let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter());
+ let impl_ty_substs = tcx.intern_substs(&substs);
+ let container_id = impl_ty.container_id(tcx);
+
+ let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+ let impl_ty_value = tcx.type_of(impl_ty.def_id);
+
+ let param_env = tcx.param_env(impl_ty.def_id);
+
+ // When checking something like
+ //
+ // trait X { type Y: PartialEq<<Self as X>::Y> }
+ // impl X for T { default type Y = S; }
+ //
+ // We will have to prove the bound S: PartialEq<<T as X>::Y>. In this case
+ // we want <T as X>::Y to normalize to S. This is valid because we are
+ // checking the default value specifically here. Add this equality to the
+ // ParamEnv for normalization specifically.
+ let normalize_param_env = {
+ let mut predicates = param_env.caller_bounds().iter().collect::<Vec<_>>();
+ match impl_ty_value.kind() {
+ ty::Projection(proj)
+ if proj.item_def_id == trait_ty.def_id && proj.substs == rebased_substs =>
+ {
+ // Don't include this predicate if the projected type is
+ // exactly the same as the projection. This can occur in
+ // (somewhat dubious) code like this:
+ //
+ // impl<T> X for T where T: X { type Y = <T as X>::Y; }
+ }
+ _ => predicates.push(
+ ty::Binder::bind_with_vars(
+ ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ item_def_id: trait_ty.def_id,
+ substs: rebased_substs,
+ },
+ term: impl_ty_value.into(),
+ },
+ bound_vars,
+ )
+ .to_predicate(tcx),
+ ),
+ };
+ ty::ParamEnv::new(
+ tcx.intern_predicates(&predicates),
+ Reveal::UserFacing,
+ param_env.constness(),
+ )
+ };
+ debug!(?normalize_param_env);
+
+ let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
+ let impl_ty_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
+ let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+
+ let infcx = tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(&infcx);
+
+ let assumed_wf_types =
+ ocx.assumed_wf_types(param_env, impl_ty_span, impl_ty.def_id.expect_local());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let normalize_cause = ObligationCause::new(
+ impl_ty_span,
+ impl_ty_hir_id,
+ ObligationCauseCode::CheckAssociatedTypeBounds {
+ impl_item_def_id: impl_ty.def_id.expect_local(),
+ trait_item_def_id: trait_ty.def_id,
+ },
+ );
+ let mk_cause = |span: Span| {
+ let code = if span.is_dummy() {
+ traits::ItemObligation(trait_ty.def_id)
+ } else {
+ traits::BindingObligation(trait_ty.def_id, span)
+ };
+ ObligationCause::new(impl_ty_span, impl_ty_hir_id, code)
+ };
+
+ let obligations = tcx
+ .bound_explicit_item_bounds(trait_ty.def_id)
+ .subst_iter_copied(tcx, rebased_substs)
+ .map(|(concrete_ty_bound, span)| {
+ debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
+ traits::Obligation::new(mk_cause(span), param_env, concrete_ty_bound)
+ })
+ .collect();
+ debug!("check_type_bounds: item_bounds={:?}", obligations);
+
+ for mut obligation in util::elaborate_obligations(tcx, obligations) {
+ let traits::Normalized { value: normalized_predicate, obligations } = traits::normalize(
+ &mut selcx,
+ normalize_param_env,
+ normalize_cause.clone(),
+ obligation.predicate,
+ );
+ debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
+ obligation.predicate = normalized_predicate;
+
+ ocx.register_obligations(obligations);
+ ocx.register_obligation(obligation);
+ }
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let implied_bounds = infcx.implied_bounds_tys(param_env, impl_ty_hir_id, assumed_wf_types);
+ let outlives_environment =
+ OutlivesEnvironment::with_bounds(param_env, Some(&infcx), implied_bounds);
+
+ infcx.check_region_obligations_and_report_errors(
+ impl_ty.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ let constraints = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ for (key, value) in constraints {
+ infcx
+ .err_ctxt()
+ .report_mismatched_types(
+ &ObligationCause::misc(
+ value.hidden_type.span,
+ tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()),
+ ),
+ tcx.mk_opaque(key.def_id.to_def_id(), key.substs),
+ value.hidden_type.ty,
+ TypeError::Mismatch,
+ )
+ .emit();
+ }
+
+ Ok(())
+}
+
+fn assoc_item_kind_str(impl_item: &ty::AssocItem) -> &'static str {
+ match impl_item.kind {
+ ty::AssocKind::Const => "const",
+ ty::AssocKind::Fn => "method",
+ ty::AssocKind::Type => "type",
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/dropck.rs b/compiler/rustc_hir_analysis/src/check/dropck.rs
new file mode 100644
index 000000000..a74016e22
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/dropck.rs
@@ -0,0 +1,323 @@
+// FIXME(@lcnr): Move this module out of `rustc_hir_analysis`.
+//
+// We don't do any drop checking during hir typeck.
+use crate::hir::def_id::{DefId, LocalDefId};
+use rustc_errors::{struct_span_err, ErrorGuaranteed};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::util::IgnoreRegions;
+use rustc_middle::ty::{self, Predicate, Ty, TyCtxt};
+
+/// This function confirms that the `Drop` implementation identified by
+/// `drop_impl_did` is not any more specialized than the type it is
+/// attached to (Issue #8142).
+///
+/// This means:
+///
+/// 1. The self type must be nominal (this is already checked during
+/// coherence),
+///
+/// 2. The generic region/type parameters of the impl's self type must
+/// all be parameters of the Drop impl itself (i.e., no
+/// specialization like `impl Drop for Foo<i32>`), and,
+///
+/// 3. Any bounds on the generic parameters must be reflected in the
+/// struct/enum definition for the nominal type itself (i.e.
+/// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`).
+///
+pub fn check_drop_impl(tcx: TyCtxt<'_>, drop_impl_did: DefId) -> Result<(), ErrorGuaranteed> {
+ let dtor_self_type = tcx.type_of(drop_impl_did);
+ let dtor_predicates = tcx.predicates_of(drop_impl_did);
+ match dtor_self_type.kind() {
+ ty::Adt(adt_def, self_to_impl_substs) => {
+ ensure_drop_params_and_item_params_correspond(
+ tcx,
+ drop_impl_did.expect_local(),
+ adt_def.did(),
+ self_to_impl_substs,
+ )?;
+
+ ensure_drop_predicates_are_implied_by_item_defn(
+ tcx,
+ dtor_predicates,
+ adt_def.did().expect_local(),
+ self_to_impl_substs,
+ )
+ }
+ _ => {
+ // Destructors only work on nominal types. This was
+ // already checked by coherence, but compilation may
+ // not have been terminated.
+ let span = tcx.def_span(drop_impl_did);
+ let reported = tcx.sess.delay_span_bug(
+ span,
+ &format!("should have been rejected by coherence check: {dtor_self_type}"),
+ );
+ Err(reported)
+ }
+ }
+}
+
+fn ensure_drop_params_and_item_params_correspond<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ drop_impl_did: LocalDefId,
+ self_type_did: DefId,
+ drop_impl_substs: SubstsRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let Err(arg) = tcx.uses_unique_generic_params(drop_impl_substs, IgnoreRegions::No) else {
+ return Ok(())
+ };
+
+ let drop_impl_span = tcx.def_span(drop_impl_did);
+ let item_span = tcx.def_span(self_type_did);
+ let self_descr = tcx.def_kind(self_type_did).descr(self_type_did);
+ let mut err =
+ struct_span_err!(tcx.sess, drop_impl_span, E0366, "`Drop` impls cannot be specialized");
+ match arg {
+ ty::util::NotUniqueParam::DuplicateParam(arg) => {
+ err.note(&format!("`{arg}` is mentioned multiple times"))
+ }
+ ty::util::NotUniqueParam::NotParam(arg) => {
+ err.note(&format!("`{arg}` is not a generic parameter"))
+ }
+ };
+ err.span_note(
+ item_span,
+ &format!(
+ "use the same sequence of generic lifetime, type and const parameters \
+ as the {self_descr} definition",
+ ),
+ );
+ Err(err.emit())
+}
+
+/// Confirms that every predicate imposed by dtor_predicates is
+/// implied by assuming the predicates attached to self_type_did.
+fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ dtor_predicates: ty::GenericPredicates<'tcx>,
+ self_type_did: LocalDefId,
+ self_to_impl_substs: SubstsRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let mut result = Ok(());
+
+ // Here is an example, analogous to that from
+ // `compare_impl_method`.
+ //
+ // Consider a struct type:
+ //
+ // struct Type<'c, 'b:'c, 'a> {
+ // x: &'a Contents // (contents are irrelevant;
+ // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
+ // }
+ //
+ // and a Drop impl:
+ //
+ // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
+ // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
+ // }
+ //
+ // We start out with self_to_impl_substs, that maps the generic
+ // parameters of Type to that of the Drop impl.
+ //
+ // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
+ //
+ // Applying this to the predicates (i.e., assumptions) provided by the item
+ // definition yields the instantiated assumptions:
+ //
+ // ['y : 'z]
+ //
+ // We then check all of the predicates of the Drop impl:
+ //
+ // ['y:'z, 'x:'y]
+ //
+ // and ensure each is in the list of instantiated
+ // assumptions. Here, `'y:'z` is present, but `'x:'y` is
+ // absent. So we report an error that the Drop impl injected a
+ // predicate that is not present on the struct definition.
+
+ // We can assume the predicates attached to struct/enum definition
+ // hold.
+ let generic_assumptions = tcx.predicates_of(self_type_did);
+
+ let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
+ let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
+
+ debug!(?assumptions_in_impl_context, ?dtor_predicates.predicates);
+
+ let self_param_env = tcx.param_env(self_type_did);
+
+ // An earlier version of this code attempted to do this checking
+ // via the traits::fulfill machinery. However, it ran into trouble
+ // since the fulfill machinery merely turns outlives-predicates
+ // 'a:'b and T:'b into region inference constraints. It is simpler
+ // just to look for all the predicates directly.
+
+ assert_eq!(dtor_predicates.parent, None);
+ for &(predicate, predicate_sp) in dtor_predicates.predicates {
+ // (We do not need to worry about deep analysis of type
+ // expressions etc because the Drop impls are already forced
+ // to take on a structure that is roughly an alpha-renaming of
+ // the generic parameters of the item definition.)
+
+ // This path now just checks *all* predicates via an instantiation of
+ // the `SimpleEqRelation`, which simply forwards to the `relate` machinery
+ // after taking care of anonymizing late bound regions.
+ //
+ // However, it may be more efficient in the future to batch
+ // the analysis together via the fulfill (see comment above regarding
+ // the usage of the fulfill machinery), rather than the
+ // repeated `.iter().any(..)` calls.
+
+ // This closure is a more robust way to check `Predicate` equality
+ // than simple `==` checks (which were the previous implementation).
+ // It relies on `ty::relate` for `TraitPredicate`, `ProjectionPredicate`,
+ // `ConstEvaluatable` and `TypeOutlives` (which implement the Relate trait),
+ // while delegating on simple equality for the other `Predicate`.
+ // This implementation solves (Issue #59497) and (Issue #58311).
+ // It is unclear to me at the moment whether the approach based on `relate`
+ // could be extended easily also to the other `Predicate`.
+ let predicate_matches_closure = |p: Predicate<'tcx>| {
+ let mut relator: SimpleEqRelation<'tcx> = SimpleEqRelation::new(tcx, self_param_env);
+ let predicate = predicate.kind();
+ let p = p.kind();
+ match (predicate.skip_binder(), p.skip_binder()) {
+ (ty::PredicateKind::Trait(a), ty::PredicateKind::Trait(b)) => {
+ relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
+ }
+ (ty::PredicateKind::Projection(a), ty::PredicateKind::Projection(b)) => {
+ relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
+ }
+ (
+ ty::PredicateKind::ConstEvaluatable(a),
+ ty::PredicateKind::ConstEvaluatable(b),
+ ) => relator.relate(predicate.rebind(a), predicate.rebind(b)).is_ok(),
+ (
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)),
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)),
+ ) => {
+ relator.relate(predicate.rebind(ty_a), p.rebind(ty_b)).is_ok()
+ && relator.relate(predicate.rebind(lt_a), p.rebind(lt_b)).is_ok()
+ }
+ (ty::PredicateKind::WellFormed(arg_a), ty::PredicateKind::WellFormed(arg_b)) => {
+ relator.relate(predicate.rebind(arg_a), p.rebind(arg_b)).is_ok()
+ }
+ _ => predicate == p,
+ }
+ };
+
+ if !assumptions_in_impl_context.iter().copied().any(predicate_matches_closure) {
+ let item_span = tcx.def_span(self_type_did);
+ let self_descr = tcx.def_kind(self_type_did).descr(self_type_did.to_def_id());
+ let reported = struct_span_err!(
+ tcx.sess,
+ predicate_sp,
+ E0367,
+ "`Drop` impl requires `{predicate}` but the {self_descr} it is implemented for does not",
+ )
+ .span_note(item_span, "the implementor must specify the same requirement")
+ .emit();
+ result = Err(reported);
+ }
+ }
+
+ result
+}
+
+// This is an implementation of the TypeRelation trait with the
+// aim of simply comparing for equality (without side-effects).
+// It is not intended to be used anywhere else other than here.
+pub(crate) struct SimpleEqRelation<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> SimpleEqRelation<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> SimpleEqRelation<'tcx> {
+ SimpleEqRelation { tcx, param_env }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for SimpleEqRelation<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
+ fn tag(&self) -> &'static str {
+ "dropck::SimpleEqRelation"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ // Here we ignore variance because we require drop impl's types
+ // to be *exactly* the same as to the ones in the struct definition.
+ self.relate(a, b)
+ }
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ debug!("SimpleEqRelation::tys(a={:?}, b={:?})", a, b);
+ ty::relate::super_relate_tys(self, a, b)
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("SimpleEqRelation::regions(a={:?}, b={:?})", a, b);
+
+ // We can just equate the regions because LBRs have been
+ // already anonymized.
+ if a == b {
+ Ok(a)
+ } else {
+ // I'm not sure is this `TypeError` is the right one, but
+ // it should not matter as it won't be checked (the dropck
+ // will emit its own, more informative and higher-level errors
+ // in case anything goes wrong).
+ Err(TypeError::RegionsPlaceholderMismatch)
+ }
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("SimpleEqRelation::consts(a={:?}, b={:?})", a, b);
+ ty::relate::super_relate_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ debug!("SimpleEqRelation::binders({:?}: {:?}", a, b);
+
+ // Anonymizing the LBRs is necessary to solve (Issue #59497).
+ // After we do so, it should be totally fine to skip the binders.
+ let anon_a = self.tcx.anonymize_bound_vars(a);
+ let anon_b = self.tcx.anonymize_bound_vars(b);
+ self.relate(anon_a.skip_binder(), anon_b.skip_binder())?;
+
+ Ok(a)
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
new file mode 100644
index 000000000..609095c9c
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -0,0 +1,549 @@
+//! Type-checking for the rust-intrinsic and platform-intrinsic
+//! intrinsics that the compiler exposes.
+
+use crate::errors::{
+ UnrecognizedAtomicOperation, UnrecognizedIntrinsicFunction,
+ WrongNumberOfGenericArgumentsToIntrinsic,
+};
+use crate::require_same_types;
+
+use hir::def_id::DefId;
+use rustc_errors::{struct_span_err, DiagnosticMessage};
+use rustc_hir as hir;
+use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_target::spec::abi::Abi;
+
+use std::iter;
+
+fn equate_intrinsic_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ it: &hir::ForeignItem<'_>,
+ n_tps: usize,
+ n_lts: usize,
+ sig: ty::PolyFnSig<'tcx>,
+) {
+ let (own_counts, span) = match &it.kind {
+ hir::ForeignItemKind::Fn(.., generics) => {
+ let own_counts = tcx.generics_of(it.owner_id.to_def_id()).own_counts();
+ (own_counts, generics.span)
+ }
+ _ => {
+ struct_span_err!(tcx.sess, it.span, E0622, "intrinsic must be a function")
+ .span_label(it.span, "expected a function")
+ .emit();
+ return;
+ }
+ };
+
+ let gen_count_ok = |found: usize, expected: usize, descr: &str| -> bool {
+ if found != expected {
+ tcx.sess.emit_err(WrongNumberOfGenericArgumentsToIntrinsic {
+ span,
+ found,
+ expected,
+ descr,
+ });
+ false
+ } else {
+ true
+ }
+ };
+
+ if gen_count_ok(own_counts.lifetimes, n_lts, "lifetime")
+ && gen_count_ok(own_counts.types, n_tps, "type")
+ && gen_count_ok(own_counts.consts, 0, "const")
+ {
+ let fty = tcx.mk_fn_ptr(sig);
+ let cause = ObligationCause::new(it.span, it.hir_id(), ObligationCauseCode::IntrinsicType);
+ require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(it.owner_id)), fty);
+ }
+}
+
+/// Returns the unsafety of the given intrinsic.
+pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir::Unsafety {
+ let has_safe_attr = match tcx.has_attr(intrinsic_id, sym::rustc_safe_intrinsic) {
+ true => hir::Unsafety::Normal,
+ false => hir::Unsafety::Unsafe,
+ };
+ let is_in_list = match tcx.item_name(intrinsic_id) {
+ // When adding a new intrinsic to this list,
+ // it's usually worth updating that intrinsic's documentation
+ // to note that it's safe to call, since
+ // safe extern fns are otherwise unprecedented.
+ sym::abort
+ | sym::assert_inhabited
+ | sym::assert_zero_valid
+ | sym::assert_uninit_valid
+ | sym::size_of
+ | sym::min_align_of
+ | sym::needs_drop
+ | sym::caller_location
+ | sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow
+ | sym::wrapping_add
+ | sym::wrapping_sub
+ | sym::wrapping_mul
+ | sym::saturating_add
+ | sym::saturating_sub
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::ctpop
+ | sym::ctlz
+ | sym::cttz
+ | sym::bswap
+ | sym::bitreverse
+ | sym::discriminant_value
+ | sym::type_id
+ | sym::likely
+ | sym::unlikely
+ | sym::ptr_guaranteed_cmp
+ | sym::minnumf32
+ | sym::minnumf64
+ | sym::maxnumf32
+ | sym::rustc_peek
+ | sym::maxnumf64
+ | sym::type_name
+ | sym::forget
+ | sym::black_box
+ | sym::variant_count
+ | sym::ptr_mask => hir::Unsafety::Normal,
+ _ => hir::Unsafety::Unsafe,
+ };
+
+ if has_safe_attr != is_in_list {
+ tcx.sess.struct_span_err(
+ tcx.def_span(intrinsic_id),
+ DiagnosticMessage::Str(format!(
+ "intrinsic safety mismatch between list of intrinsics within the compiler and core library intrinsics for intrinsic `{}`",
+ tcx.item_name(intrinsic_id)
+ ))).emit();
+ }
+
+ is_in_list
+}
+
+/// Remember to add all intrinsics here, in `compiler/rustc_codegen_llvm/src/intrinsic.rs`,
+/// and in `library/core/src/intrinsics.rs`.
+pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
+ let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)));
+ let intrinsic_id = it.owner_id.to_def_id();
+ let intrinsic_name = tcx.item_name(intrinsic_id);
+ let name_str = intrinsic_name.as_str();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ [ty::BoundVariableKind::Region(ty::BrAnon(0)), ty::BoundVariableKind::Region(ty::BrEnv)]
+ .iter()
+ .copied(),
+ );
+ let mk_va_list_ty = |mutbl| {
+ tcx.lang_items().va_list().map(|did| {
+ let region = tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) },
+ ));
+ let env_region = tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrEnv },
+ ));
+ let va_list_ty = tcx.bound_type_of(did).subst(tcx, &[region.into()]);
+ (tcx.mk_ref(env_region, ty::TypeAndMut { ty: va_list_ty, mutbl }), va_list_ty)
+ })
+ };
+
+ let (n_tps, n_lts, inputs, output, unsafety) = if name_str.starts_with("atomic_") {
+ let split: Vec<&str> = name_str.split('_').collect();
+ assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format");
+
+ //We only care about the operation here
+ let (n_tps, inputs, output) = match split[1] {
+ "cxchg" | "cxchgweak" => (
+ 1,
+ vec![tcx.mk_mut_ptr(param(0)), param(0), param(0)],
+ tcx.intern_tup(&[param(0), tcx.types.bool]),
+ ),
+ "load" => (1, vec![tcx.mk_imm_ptr(param(0))], param(0)),
+ "store" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()),
+
+ "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax"
+ | "umin" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], param(0)),
+ "fence" | "singlethreadfence" => (0, Vec::new(), tcx.mk_unit()),
+ op => {
+ tcx.sess.emit_err(UnrecognizedAtomicOperation { span: it.span, op });
+ return;
+ }
+ };
+ (n_tps, 0, inputs, output, hir::Unsafety::Unsafe)
+ } else {
+ let unsafety = intrinsic_operation_unsafety(tcx, intrinsic_id);
+ let (n_tps, inputs, output) = match intrinsic_name {
+ sym::abort => (0, Vec::new(), tcx.types.never),
+ sym::unreachable => (0, Vec::new(), tcx.types.never),
+ sym::breakpoint => (0, Vec::new(), tcx.mk_unit()),
+ sym::size_of | sym::pref_align_of | sym::min_align_of | sym::variant_count => {
+ (1, Vec::new(), tcx.types.usize)
+ }
+ sym::size_of_val | sym::min_align_of_val => {
+ (1, vec![tcx.mk_imm_ptr(param(0))], tcx.types.usize)
+ }
+ sym::rustc_peek => (1, vec![param(0)], param(0)),
+ sym::caller_location => (0, vec![], tcx.caller_location_ty()),
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ (1, Vec::new(), tcx.mk_unit())
+ }
+ sym::forget => (1, vec![param(0)], tcx.mk_unit()),
+ sym::transmute => (2, vec![param(0)], param(1)),
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.i32,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::drop_in_place => (1, vec![tcx.mk_mut_ptr(param(0))], tcx.mk_unit()),
+ sym::needs_drop => (1, Vec::new(), tcx.types.bool),
+
+ sym::type_name => (1, Vec::new(), tcx.mk_static_str()),
+ sym::type_id => (1, Vec::new(), tcx.types.u64),
+ sym::offset | sym::arith_offset => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.isize,
+ ],
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ ),
+ sym::ptr_mask => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.usize,
+ ],
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ ),
+
+ sym::copy | sym::copy_nonoverlapping => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
+ tcx.types.usize,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.usize,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::write_bytes | sym::volatile_set_memory => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
+ tcx.types.u8,
+ tcx.types.usize,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::sqrtf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::sqrtf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::powif32 => (0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32),
+ sym::powif64 => (0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64),
+ sym::sinf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::sinf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::cosf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::cosf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::powf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::powf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::expf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::expf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::exp2f32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::exp2f64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::logf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::logf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::log10f32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::log10f64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::log2f32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::log2f64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::fmaf32 => (0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::fmaf64 => (0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::fabsf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::fabsf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::minnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::minnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::maxnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::maxnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::copysignf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::copysignf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::floorf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::floorf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::ceilf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::ceilf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::truncf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::truncf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::rintf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::rintf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::nearbyintf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::nearbyintf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::roundf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::roundf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ (1, vec![tcx.mk_imm_ptr(param(0))], param(0))
+ }
+ sym::volatile_store | sym::unaligned_volatile_store => {
+ (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit())
+ }
+
+ sym::ctpop
+ | sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::bswap
+ | sym::bitreverse => (1, vec![param(0)], param(0)),
+
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ (1, vec![param(0), param(0)], tcx.intern_tup(&[param(0), tcx.types.bool]))
+ }
+
+ sym::ptr_guaranteed_cmp => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.u8)
+ }
+
+ sym::const_allocate => {
+ (0, vec![tcx.types.usize, tcx.types.usize], tcx.mk_mut_ptr(tcx.types.u8))
+ }
+ sym::const_deallocate => (
+ 0,
+ vec![tcx.mk_mut_ptr(tcx.types.u8), tcx.types.usize, tcx.types.usize],
+ tcx.mk_unit(),
+ ),
+
+ sym::ptr_offset_from => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.isize)
+ }
+ sym::ptr_offset_from_unsigned => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.usize)
+ }
+ sym::unchecked_div | sym::unchecked_rem | sym::exact_div => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::unchecked_shl | sym::unchecked_shr | sym::rotate_left | sym::rotate_right => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::saturating_add | sym::saturating_sub => (1, vec![param(0), param(0)], param(0)),
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::float_to_int_unchecked => (2, vec![param(0)], param(1)),
+
+ sym::assume => (0, vec![tcx.types.bool], tcx.mk_unit()),
+ sym::likely => (0, vec![tcx.types.bool], tcx.types.bool),
+ sym::unlikely => (0, vec![tcx.types.bool], tcx.types.bool),
+
+ sym::discriminant_value => {
+ let assoc_items = tcx.associated_item_def_ids(
+ tcx.require_lang_item(hir::LangItem::DiscriminantKind, None),
+ );
+ let discriminant_def_id = assoc_items[0];
+
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) };
+ (
+ 1,
+ vec![
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)), param(0)),
+ ],
+ tcx.mk_projection(discriminant_def_id, tcx.mk_substs([param(0).into()].iter())),
+ )
+ }
+
+ kw::Try => {
+ let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8);
+ let try_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
+ iter::once(mut_u8),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+ let catch_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
+ [mut_u8, mut_u8].iter().cloned(),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+ (
+ 0,
+ vec![tcx.mk_fn_ptr(try_fn_ty), mut_u8, tcx.mk_fn_ptr(catch_fn_ty)],
+ tcx.types.i32,
+ )
+ }
+
+ sym::va_start | sym::va_end => match mk_va_list_ty(hir::Mutability::Mut) {
+ Some((va_list_ref_ty, _)) => (0, vec![va_list_ref_ty], tcx.mk_unit()),
+ None => bug!("`va_list` language item needed for C-variadic intrinsics"),
+ },
+
+ sym::va_copy => match mk_va_list_ty(hir::Mutability::Not) {
+ Some((va_list_ref_ty, va_list_ty)) => {
+ let va_list_ptr_ty = tcx.mk_mut_ptr(va_list_ty);
+ (0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.mk_unit())
+ }
+ None => bug!("`va_list` language item needed for C-variadic intrinsics"),
+ },
+
+ sym::va_arg => match mk_va_list_ty(hir::Mutability::Mut) {
+ Some((va_list_ref_ty, _)) => (1, vec![va_list_ref_ty], param(0)),
+ None => bug!("`va_list` language item needed for C-variadic intrinsics"),
+ },
+
+ sym::nontemporal_store => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()),
+
+ sym::raw_eq => {
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) };
+ let param_ty =
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)), param(0));
+ (1, vec![param_ty; 2], tcx.types.bool)
+ }
+
+ sym::black_box => (1, vec![param(0)], param(0)),
+
+ sym::const_eval_select => (4, vec![param(0), param(1), param(2)], param(3)),
+
+ sym::vtable_size | sym::vtable_align => {
+ (0, vec![tcx.mk_imm_ptr(tcx.mk_unit())], tcx.types.usize)
+ }
+
+ other => {
+ tcx.sess.emit_err(UnrecognizedIntrinsicFunction { span: it.span, name: other });
+ return;
+ }
+ };
+ (n_tps, 0, inputs, output, unsafety)
+ };
+ let sig = tcx.mk_fn_sig(inputs.into_iter(), output, false, unsafety, Abi::RustIntrinsic);
+ let sig = ty::Binder::bind_with_vars(sig, bound_vars);
+ equate_intrinsic_type(tcx, it, n_tps, n_lts, sig)
+}
+
+/// Type-check `extern "platform-intrinsic" { ... }` functions.
+pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
+ let param = |n| {
+ let name = Symbol::intern(&format!("P{}", n));
+ tcx.mk_ty_param(n, name)
+ };
+
+ let name = it.ident.name;
+
+ let (n_tps, inputs, output) = match name {
+ sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
+ (2, vec![param(0), param(0)], param(1))
+ }
+ sym::simd_add
+ | sym::simd_sub
+ | sym::simd_mul
+ | sym::simd_rem
+ | sym::simd_div
+ | sym::simd_shl
+ | sym::simd_shr
+ | sym::simd_and
+ | sym::simd_or
+ | sym::simd_xor
+ | sym::simd_fmin
+ | sym::simd_fmax
+ | sym::simd_fpow
+ | sym::simd_saturating_add
+ | sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)),
+ sym::simd_arith_offset => (2, vec![param(0), param(1)], param(0)),
+ sym::simd_neg
+ | sym::simd_fsqrt
+ | sym::simd_fsin
+ | sym::simd_fcos
+ | sym::simd_fexp
+ | sym::simd_fexp2
+ | sym::simd_flog2
+ | sym::simd_flog10
+ | sym::simd_flog
+ | sym::simd_fabs
+ | sym::simd_ceil
+ | sym::simd_floor
+ | sym::simd_round
+ | sym::simd_trunc => (1, vec![param(0)], param(0)),
+ sym::simd_fpowi => (1, vec![param(0), tcx.types.i32], param(0)),
+ sym::simd_fma => (1, vec![param(0), param(0), param(0)], param(0)),
+ sym::simd_gather => (3, vec![param(0), param(1), param(2)], param(0)),
+ sym::simd_scatter => (3, vec![param(0), param(1), param(2)], tcx.mk_unit()),
+ sym::simd_insert => (2, vec![param(0), tcx.types.u32, param(1)], param(0)),
+ sym::simd_extract => (2, vec![param(0), tcx.types.u32], param(1)),
+ sym::simd_cast
+ | sym::simd_as
+ | sym::simd_cast_ptr
+ | sym::simd_expose_addr
+ | sym::simd_from_exposed_addr => (2, vec![param(0)], param(1)),
+ sym::simd_bitmask => (2, vec![param(0)], param(1)),
+ sym::simd_select | sym::simd_select_bitmask => {
+ (2, vec![param(0), param(1), param(1)], param(1))
+ }
+ sym::simd_reduce_all | sym::simd_reduce_any => (1, vec![param(0)], tcx.types.bool),
+ sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => {
+ (2, vec![param(0), param(1)], param(1))
+ }
+ sym::simd_reduce_add_unordered
+ | sym::simd_reduce_mul_unordered
+ | sym::simd_reduce_and
+ | sym::simd_reduce_or
+ | sym::simd_reduce_xor
+ | sym::simd_reduce_min
+ | sym::simd_reduce_max
+ | sym::simd_reduce_min_nanless
+ | sym::simd_reduce_max_nanless => (2, vec![param(0)], param(1)),
+ sym::simd_shuffle => (3, vec![param(0), param(0), param(1)], param(2)),
+ name if name.as_str().starts_with("simd_shuffle") => {
+ match name.as_str()["simd_shuffle".len()..].parse() {
+ Ok(n) => {
+ let params = vec![param(0), param(0), tcx.mk_array(tcx.types.u32, n)];
+ (2, params, param(1))
+ }
+ Err(_) => {
+ let msg =
+ format!("unrecognized platform-specific intrinsic function: `{name}`");
+ tcx.sess.struct_span_err(it.span, &msg).emit();
+ return;
+ }
+ }
+ }
+ _ => {
+ let msg = format!("unrecognized platform-specific intrinsic function: `{name}`");
+ tcx.sess.struct_span_err(it.span, &msg).emit();
+ return;
+ }
+ };
+
+ let sig = tcx.mk_fn_sig(
+ inputs.into_iter(),
+ output,
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::PlatformIntrinsic,
+ );
+ let sig = ty::Binder::dummy(sig);
+ equate_intrinsic_type(tcx, it, n_tps, 0, sig)
+}
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
new file mode 100644
index 000000000..17c4d0d48
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
@@ -0,0 +1,437 @@
+use rustc_ast::InlineAsmTemplatePiece;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::ty::{self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitable, UintTy};
+use rustc_session::lint;
+use rustc_span::{Symbol, DUMMY_SP};
+use rustc_target::asm::{InlineAsmReg, InlineAsmRegClass, InlineAsmRegOrRegClass, InlineAsmType};
+
+pub struct InlineAsmCtxt<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ get_operand_ty: Box<dyn Fn(&'tcx hir::Expr<'tcx>) -> Ty<'tcx> + 'a>,
+}
+
+impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
+ pub fn new_global_asm(tcx: TyCtxt<'tcx>) -> Self {
+ InlineAsmCtxt {
+ tcx,
+ param_env: ty::ParamEnv::empty(),
+ get_operand_ty: Box::new(|e| bug!("asm operand in global asm: {e:?}")),
+ }
+ }
+
+ pub fn new_in_fn(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ get_operand_ty: impl Fn(&'tcx hir::Expr<'tcx>) -> Ty<'tcx> + 'a,
+ ) -> Self {
+ InlineAsmCtxt { tcx, param_env, get_operand_ty: Box::new(get_operand_ty) }
+ }
+
+ // FIXME(compiler-errors): This could use `<$ty as Pointee>::Metadata == ()`
+ fn is_thin_ptr_ty(&self, ty: Ty<'tcx>) -> bool {
+ // Type still may have region variables, but `Sized` does not depend
+ // on those, so just erase them before querying.
+ if ty.is_sized(self.tcx, self.param_env) {
+ return true;
+ }
+ if let ty::Foreign(..) = ty.kind() {
+ return true;
+ }
+ false
+ }
+
+ fn check_asm_operand_type(
+ &self,
+ idx: usize,
+ reg: InlineAsmRegOrRegClass,
+ expr: &'tcx hir::Expr<'tcx>,
+ template: &[InlineAsmTemplatePiece],
+ is_input: bool,
+ tied_input: Option<(&'tcx hir::Expr<'tcx>, Option<InlineAsmType>)>,
+ target_features: &FxHashSet<Symbol>,
+ ) -> Option<InlineAsmType> {
+ let ty = (self.get_operand_ty)(expr);
+ if ty.has_non_region_infer() {
+ bug!("inference variable in asm operand ty: {:?} {:?}", expr, ty);
+ }
+ let asm_ty_isize = match self.tcx.sess.target.pointer_width {
+ 16 => InlineAsmType::I16,
+ 32 => InlineAsmType::I32,
+ 64 => InlineAsmType::I64,
+ _ => unreachable!(),
+ };
+
+ let asm_ty = match *ty.kind() {
+ // `!` is allowed for input but not for output (issue #87802)
+ ty::Never if is_input => return None,
+ ty::Error(_) => return None,
+ ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8),
+ ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16),
+ ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32),
+ ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64),
+ ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128),
+ ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize),
+ ty::Float(FloatTy::F32) => Some(InlineAsmType::F32),
+ ty::Float(FloatTy::F64) => Some(InlineAsmType::F64),
+ ty::FnPtr(_) => Some(asm_ty_isize),
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if self.is_thin_ptr_ty(ty) => {
+ Some(asm_ty_isize)
+ }
+ ty::Adt(adt, substs) if adt.repr().simd() => {
+ let fields = &adt.non_enum_variant().fields;
+ let elem_ty = fields[0].ty(self.tcx, substs);
+ match elem_ty.kind() {
+ ty::Never | ty::Error(_) => return None,
+ ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => {
+ Some(InlineAsmType::VecI8(fields.len() as u64))
+ }
+ ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => {
+ Some(InlineAsmType::VecI16(fields.len() as u64))
+ }
+ ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => {
+ Some(InlineAsmType::VecI32(fields.len() as u64))
+ }
+ ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => {
+ Some(InlineAsmType::VecI64(fields.len() as u64))
+ }
+ ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => {
+ Some(InlineAsmType::VecI128(fields.len() as u64))
+ }
+ ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => {
+ Some(match self.tcx.sess.target.pointer_width {
+ 16 => InlineAsmType::VecI16(fields.len() as u64),
+ 32 => InlineAsmType::VecI32(fields.len() as u64),
+ 64 => InlineAsmType::VecI64(fields.len() as u64),
+ _ => unreachable!(),
+ })
+ }
+ ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(fields.len() as u64)),
+ ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(fields.len() as u64)),
+ _ => None,
+ }
+ }
+ ty::Infer(_) => unreachable!(),
+ _ => None,
+ };
+ let Some(asm_ty) = asm_ty else {
+ let msg = &format!("cannot use value of type `{ty}` for inline assembly");
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(
+ "only integers, floats, SIMD vectors, pointers and function pointers \
+ can be used as arguments for inline assembly",
+ );
+ err.emit();
+ return None;
+ };
+
+ // Check that the type implements Copy. The only case where this can
+ // possibly fail is for SIMD types which don't #[derive(Copy)].
+ if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
+ let msg = "arguments for inline assembly must be copyable";
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(&format!("`{ty}` does not implement the Copy trait"));
+ err.emit();
+ }
+
+ // Ideally we wouldn't need to do this, but LLVM's register allocator
+ // really doesn't like it when tied operands have different types.
+ //
+ // This is purely an LLVM limitation, but we have to live with it since
+ // there is no way to hide this with implicit conversions.
+ //
+ // For the purposes of this check we only look at the `InlineAsmType`,
+ // which means that pointers and integers are treated as identical (modulo
+ // size).
+ if let Some((in_expr, Some(in_asm_ty))) = tied_input {
+ if in_asm_ty != asm_ty {
+ let msg = "incompatible types for asm inout argument";
+ let mut err = self.tcx.sess.struct_span_err(vec![in_expr.span, expr.span], msg);
+
+ let in_expr_ty = (self.get_operand_ty)(in_expr);
+ err.span_label(in_expr.span, &format!("type `{in_expr_ty}`"));
+ err.span_label(expr.span, &format!("type `{ty}`"));
+ err.note(
+ "asm inout arguments must have the same type, \
+ unless they are both pointers or integers of the same size",
+ );
+ err.emit();
+ }
+
+ // All of the later checks have already been done on the input, so
+ // let's not emit errors and warnings twice.
+ return Some(asm_ty);
+ }
+
+ // Check the type against the list of types supported by the selected
+ // register class.
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+ let reg_class = reg.reg_class();
+ let supported_tys = reg_class.supported_types(asm_arch);
+ let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else {
+ let msg = &format!("type `{ty}` cannot be used with this register class");
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ let supported_tys: Vec<_> =
+ supported_tys.iter().map(|(t, _)| t.to_string()).collect();
+ err.note(&format!(
+ "register class `{}` supports these types: {}",
+ reg_class.name(),
+ supported_tys.join(", "),
+ ));
+ if let Some(suggest) = reg_class.suggest_class(asm_arch, asm_ty) {
+ err.help(&format!(
+ "consider using the `{}` register class instead",
+ suggest.name()
+ ));
+ }
+ err.emit();
+ return Some(asm_ty);
+ };
+
+ // Check whether the selected type requires a target feature. Note that
+ // this is different from the feature check we did earlier. While the
+ // previous check checked that this register class is usable at all
+ // with the currently enabled features, some types may only be usable
+ // with a register class when a certain feature is enabled. We check
+ // this here since it depends on the results of typeck.
+ //
+ // Also note that this check isn't run when the operand type is never
+ // (!). In that case we still need the earlier check to verify that the
+ // register class is usable at all.
+ if let Some(feature) = feature {
+ if !target_features.contains(&feature) {
+ let msg = &format!("`{}` target feature is not enabled", feature);
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(&format!(
+ "this is required to use type `{}` with register class `{}`",
+ ty,
+ reg_class.name(),
+ ));
+ err.emit();
+ return Some(asm_ty);
+ }
+ }
+
+ // Check whether a modifier is suggested for using this type.
+ if let Some((suggested_modifier, suggested_result)) =
+ reg_class.suggest_modifier(asm_arch, asm_ty)
+ {
+ // Search for any use of this operand without a modifier and emit
+ // the suggestion for them.
+ let mut spans = vec![];
+ for piece in template {
+ if let &InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span } = piece
+ {
+ if operand_idx == idx && modifier.is_none() {
+ spans.push(span);
+ }
+ }
+ }
+ if !spans.is_empty() {
+ let (default_modifier, default_result) =
+ reg_class.default_modifier(asm_arch).unwrap();
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::ASM_SUB_REGISTER,
+ expr.hir_id,
+ spans,
+ "formatting may not be suitable for sub-register argument",
+ |lint| {
+ lint.span_label(expr.span, "for this argument");
+ lint.help(&format!(
+ "use `{{{idx}:{suggested_modifier}}}` to have the register formatted as `{suggested_result}`",
+ ));
+ lint.help(&format!(
+ "or use `{{{idx}:{default_modifier}}}` to keep the default formatting of `{default_result}`",
+ ));
+ lint
+ },
+ );
+ }
+ }
+
+ Some(asm_ty)
+ }
+
+ pub fn check_asm(&self, asm: &hir::InlineAsm<'tcx>, enclosing_id: hir::HirId) {
+ let hir = self.tcx.hir();
+ let enclosing_def_id = hir.local_def_id(enclosing_id).to_def_id();
+ let target_features = self.tcx.asm_target_features(enclosing_def_id);
+ let Some(asm_arch) = self.tcx.sess.asm_arch else {
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "target architecture does not support asm");
+ return;
+ };
+ for (idx, (op, op_sp)) in asm.operands.iter().enumerate() {
+ // Validate register classes against currently enabled target
+ // features. We check that at least one type is available for
+ // the enabled features.
+ //
+ // We ignore target feature requirements for clobbers: if the
+ // feature is disabled then the compiler doesn't care what we
+ // do with the registers.
+ //
+ // Note that this is only possible for explicit register
+ // operands, which cannot be used in the asm string.
+ if let Some(reg) = op.reg() {
+ // Some explicit registers cannot be used depending on the
+ // target. Reject those here.
+ if let InlineAsmRegOrRegClass::Reg(reg) = reg {
+ if let InlineAsmReg::Err = reg {
+ // `validate` will panic on `Err`, as an error must
+ // already have been reported.
+ continue;
+ }
+ if let Err(msg) = reg.validate(
+ asm_arch,
+ self.tcx.sess.relocation_model(),
+ &target_features,
+ &self.tcx.sess.target,
+ op.is_clobber(),
+ ) {
+ let msg = format!("cannot use register `{}`: {}", reg.name(), msg);
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ continue;
+ }
+ }
+
+ if !op.is_clobber() {
+ let mut missing_required_features = vec![];
+ let reg_class = reg.reg_class();
+ if let InlineAsmRegClass::Err = reg_class {
+ continue;
+ }
+ for &(_, feature) in reg_class.supported_types(asm_arch) {
+ match feature {
+ Some(feature) => {
+ if target_features.contains(&feature) {
+ missing_required_features.clear();
+ break;
+ } else {
+ missing_required_features.push(feature);
+ }
+ }
+ None => {
+ missing_required_features.clear();
+ break;
+ }
+ }
+ }
+
+ // We are sorting primitive strs here and can use unstable sort here
+ missing_required_features.sort_unstable();
+ missing_required_features.dedup();
+ match &missing_required_features[..] {
+ [] => {}
+ [feature] => {
+ let msg = format!(
+ "register class `{}` requires the `{}` target feature",
+ reg_class.name(),
+ feature
+ );
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ // register isn't enabled, don't do more checks
+ continue;
+ }
+ features => {
+ let msg = format!(
+ "register class `{}` requires at least one of the following target features: {}",
+ reg_class.name(),
+ features
+ .iter()
+ .map(|f| f.as_str())
+ .intersperse(", ")
+ .collect::<String>(),
+ );
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ // register isn't enabled, don't do more checks
+ continue;
+ }
+ }
+ }
+ }
+
+ match *op {
+ hir::InlineAsmOperand::In { reg, ref expr } => {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ true,
+ None,
+ &target_features,
+ );
+ }
+ hir::InlineAsmOperand::Out { reg, late: _, ref expr } => {
+ if let Some(expr) = expr {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ false,
+ None,
+ &target_features,
+ );
+ }
+ }
+ hir::InlineAsmOperand::InOut { reg, late: _, ref expr } => {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ false,
+ None,
+ &target_features,
+ );
+ }
+ hir::InlineAsmOperand::SplitInOut { reg, late: _, ref in_expr, ref out_expr } => {
+ let in_ty = self.check_asm_operand_type(
+ idx,
+ reg,
+ in_expr,
+ asm.template,
+ true,
+ None,
+ &target_features,
+ );
+ if let Some(out_expr) = out_expr {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ out_expr,
+ asm.template,
+ false,
+ Some((in_expr, in_ty)),
+ &target_features,
+ );
+ }
+ }
+ // No special checking is needed for these:
+ // - Typeck has checked that Const operands are integers.
+ // - AST lowering guarantees that SymStatic points to a static.
+ hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymStatic { .. } => {}
+ // Check that sym actually points to a function. Later passes
+ // depend on this.
+ hir::InlineAsmOperand::SymFn { anon_const } => {
+ let ty = self.tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
+ match ty.kind() {
+ ty::Never | ty::Error(_) => {}
+ ty::FnDef(..) => {}
+ _ => {
+ let mut err =
+ self.tcx.sess.struct_span_err(*op_sp, "invalid `sym` operand");
+ err.span_label(
+ self.tcx.hir().span(anon_const.body.hir_id),
+ &format!("is {} `{}`", ty.kind().article(), ty),
+ );
+ err.help("`sym` operands must refer to either a function or a static");
+ err.emit();
+ }
+ };
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs
new file mode 100644
index 000000000..2e7b10257
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/mod.rs
@@ -0,0 +1,515 @@
+/*!
+
+# typeck: check phase
+
+Within the check phase of type check, we check each item one at a time
+(bodies of function expressions are checked as part of the containing
+function). Inference is used to supply types wherever they are unknown.
+
+By far the most complex case is checking the body of a function. This
+can be broken down into several distinct phases:
+
+- gather: creates type variables to represent the type of each local
+ variable and pattern binding.
+
+- main: the main pass does the lion's share of the work: it
+ determines the types of all expressions, resolves
+ methods, checks for most invalid conditions, and so forth. In
+ some cases, where a type is unknown, it may create a type or region
+ variable and use that as the type of an expression.
+
+ In the process of checking, various constraints will be placed on
+ these type variables through the subtyping relationships requested
+ through the `demand` module. The `infer` module is in charge
+ of resolving those constraints.
+
+- regionck: after main is complete, the regionck pass goes over all
+ types looking for regions and making sure that they did not escape
+ into places where they are not in scope. This may also influence the
+ final assignments of the various region variables if there is some
+ flexibility.
+
+- writeback: writes the final types within a function body, replacing
+ type variables with their final inferred types. These final types
+ are written into the `tcx.node_types` table, which should *never* contain
+ any reference to a type variable.
+
+## Intermediate types
+
+While type checking a function, the intermediate types for the
+expressions, blocks, and so forth contained within the function are
+stored in `fcx.node_types` and `fcx.node_substs`. These types
+may contain unresolved type variables. After type checking is
+complete, the functions in the writeback module are used to take the
+types from this table, resolve them, and then write them into their
+permanent home in the type context `tcx`.
+
+This means that during inferencing you should use `fcx.write_ty()`
+and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of
+nodes within the function.
+
+The types of top-level items, which never contain unbound type
+variables, are stored directly into the `tcx` typeck_results.
+
+N.B., a type variable is not the same thing as a type parameter. A
+type variable is an instance of a type parameter. That is,
+given a generic function `fn foo<T>(t: T)`, while checking the
+function `foo`, the type `ty_param(0)` refers to the type `T`, which
+is treated in abstract. However, when `foo()` is called, `T` will be
+substituted for a fresh type variable `N`. This variable will
+eventually be resolved to some concrete type (which might itself be
+a type parameter).
+
+*/
+
+mod check;
+mod compare_method;
+pub mod dropck;
+pub mod intrinsic;
+pub mod intrinsicck;
+mod region;
+pub mod wfcheck;
+
+pub use check::check_abi;
+
+use check::check_mod_item_types;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::Visitor;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_session::parse::feature_err;
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{self, BytePos, Span, Symbol};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::suggestions::ReturnsVisitor;
+use std::num::NonZeroU32;
+
+use crate::require_c_abi_if_c_variadic;
+use crate::util::common::indenter;
+
+use self::compare_method::collect_trait_impl_trait_tys;
+use self::region::region_scope_tree;
+
+pub fn provide(providers: &mut Providers) {
+ wfcheck::provide(providers);
+ *providers = Providers {
+ adt_destructor,
+ check_mod_item_types,
+ region_scope_tree,
+ collect_trait_impl_trait_tys,
+ compare_assoc_const_impl_item_with_trait_item: compare_method::raw_compare_const_impl,
+ ..*providers
+ };
+}
+
+fn adt_destructor(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::Destructor> {
+ tcx.calculate_dtor(def_id, dropck::check_drop_impl)
+}
+
+/// Given a `DefId` for an opaque type in return position, find its parent item's return
+/// expressions.
+fn get_owner_return_paths<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+) -> Option<(LocalDefId, ReturnsVisitor<'tcx>)> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let parent_id = tcx.hir().get_parent_item(hir_id).def_id;
+ tcx.hir().find_by_def_id(parent_id).and_then(|node| node.body_id()).map(|body_id| {
+ let body = tcx.hir().body(body_id);
+ let mut visitor = ReturnsVisitor::default();
+ visitor.visit_body(body);
+ (parent_id, visitor)
+ })
+}
+
+/// Forbid defining intrinsics in Rust code,
+/// as they must always be defined by the compiler.
+// FIXME: Move this to a more appropriate place.
+pub fn fn_maybe_err(tcx: TyCtxt<'_>, sp: Span, abi: Abi) {
+ if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = abi {
+ tcx.sess.span_err(sp, "intrinsic must be in `extern \"rust-intrinsic\" { ... }` block");
+ }
+}
+
+fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) {
+ // Only restricted on wasm target for now
+ if !tcx.sess.target.is_like_wasm {
+ return;
+ }
+
+ // If `#[link_section]` is missing, then nothing to verify
+ let attrs = tcx.codegen_fn_attrs(id);
+ if attrs.link_section.is_none() {
+ return;
+ }
+
+ // For the wasm32 target statics with `#[link_section]` are placed into custom
+ // sections of the final output file, but this isn't link custom sections of
+ // other executable formats. Namely we can only embed a list of bytes,
+ // nothing with provenance (pointers to anything else). If any provenance
+ // show up, reject it here.
+ // `#[link_section]` may contain arbitrary, or even undefined bytes, but it is
+ // the consumer's responsibility to ensure all bytes that have been read
+ // have defined values.
+ if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id())
+ && alloc.inner().provenance().len() != 0
+ {
+ let msg = "statics with a custom `#[link_section]` must be a \
+ simple list of bytes on the wasm target with no \
+ extra levels of indirection such as references";
+ tcx.sess.span_err(tcx.def_span(id), msg);
+ }
+}
+
+fn report_forbidden_specialization(
+ tcx: TyCtxt<'_>,
+ impl_item: &hir::ImplItemRef,
+ parent_impl: DefId,
+) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_item.span,
+ E0520,
+ "`{}` specializes an item from a parent `impl`, but \
+ that item is not marked `default`",
+ impl_item.ident
+ );
+ err.span_label(impl_item.span, format!("cannot specialize default item `{}`", impl_item.ident));
+
+ match tcx.span_of_impl(parent_impl) {
+ Ok(span) => {
+ err.span_label(span, "parent `impl` is here");
+ err.note(&format!(
+ "to specialize, `{}` in the parent `impl` must be marked `default`",
+ impl_item.ident
+ ));
+ }
+ Err(cname) => {
+ err.note(&format!("parent implementation is in crate `{cname}`"));
+ }
+ }
+
+ err.emit();
+}
+
+fn missing_items_err(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ missing_items: &[&ty::AssocItem],
+ full_impl_span: Span,
+) {
+ let missing_items_msg = missing_items
+ .iter()
+ .map(|trait_item| trait_item.name.to_string())
+ .collect::<Vec<_>>()
+ .join("`, `");
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing: `{missing_items_msg}`",
+ );
+ err.span_label(impl_span, format!("missing `{missing_items_msg}` in implementation"));
+
+ // `Span` before impl block closing brace.
+ let hi = full_impl_span.hi() - BytePos(1);
+ // Point at the place right before the closing brace of the relevant `impl` to suggest
+ // adding the associated item at the end of its body.
+ let sugg_sp = full_impl_span.with_lo(hi).with_hi(hi);
+ // Obtain the level of indentation ending in `sugg_sp`.
+ let padding =
+ tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new());
+
+ for trait_item in missing_items {
+ let snippet = suggestion_signature(trait_item, tcx);
+ let code = format!("{}{}\n{}", padding, snippet, padding);
+ let msg = format!("implement the missing item: `{snippet}`");
+ let appl = Applicability::HasPlaceholders;
+ if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) {
+ err.span_label(span, format!("`{}` from trait", trait_item.name));
+ err.tool_only_span_suggestion(sugg_sp, &msg, code, appl);
+ } else {
+ err.span_suggestion_hidden(sugg_sp, &msg, code, appl);
+ }
+ }
+ err.emit();
+}
+
+fn missing_items_must_implement_one_of_err(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ missing_items: &[Ident],
+ annotation_span: Option<Span>,
+) {
+ let missing_items_msg =
+ missing_items.iter().map(Ident::to_string).collect::<Vec<_>>().join("`, `");
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing one of: `{missing_items_msg}`",
+ );
+ err.span_label(impl_span, format!("missing one of `{missing_items_msg}` in implementation"));
+
+ if let Some(annotation_span) = annotation_span {
+ err.span_note(annotation_span, "required because of this annotation");
+ }
+
+ err.emit();
+}
+
+fn default_body_is_unstable(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ item_did: DefId,
+ feature: Symbol,
+ reason: Option<Symbol>,
+ issue: Option<NonZeroU32>,
+) {
+ let missing_item_name = &tcx.associated_item(item_did).name;
+ let use_of_unstable_library_feature_note = match reason {
+ Some(r) => format!("use of unstable library feature '{feature}': {r}"),
+ None => format!("use of unstable library feature '{feature}'"),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing: `{missing_item_name}`",
+ );
+ err.note(format!("default implementation of `{missing_item_name}` is unstable"));
+ err.note(use_of_unstable_library_feature_note);
+ rustc_session::parse::add_feature_diagnostics_for_issue(
+ &mut err,
+ &tcx.sess.parse_sess,
+ feature,
+ rustc_feature::GateIssue::Library(issue),
+ );
+ err.emit();
+}
+
+/// Re-sugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions.
+fn bounds_from_generic_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: ty::GenericPredicates<'tcx>,
+) -> (String, String) {
+ let mut types: FxHashMap<Ty<'tcx>, Vec<DefId>> = FxHashMap::default();
+ let mut projections = vec![];
+ for (predicate, _) in predicates.predicates {
+ debug!("predicate {:?}", predicate);
+ let bound_predicate = predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_predicate) => {
+ let entry = types.entry(trait_predicate.self_ty()).or_default();
+ let def_id = trait_predicate.def_id();
+ if Some(def_id) != tcx.lang_items().sized_trait() {
+ // Type params are `Sized` by default, do not add that restriction to the list
+ // if it is a positive requirement.
+ entry.push(trait_predicate.def_id());
+ }
+ }
+ ty::PredicateKind::Projection(projection_pred) => {
+ projections.push(bound_predicate.rebind(projection_pred));
+ }
+ _ => {}
+ }
+ }
+ let generics = if types.is_empty() {
+ "".to_string()
+ } else {
+ format!(
+ "<{}>",
+ types
+ .keys()
+ .filter_map(|t| match t.kind() {
+ ty::Param(_) => Some(t.to_string()),
+ // Avoid suggesting the following:
+ // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {}
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ )
+ };
+ let mut where_clauses = vec![];
+ for (ty, bounds) in types {
+ where_clauses
+ .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))));
+ }
+ for projection in &projections {
+ let p = projection.skip_binder();
+ // FIXME: this is not currently supported syntax, we should be looking at the `types` and
+ // insert the associated types where they correspond, but for now let's be "lazy" and
+ // propose this instead of the following valid resugaring:
+ // `T: Trait, Trait::Assoc = K` → `T: Trait<Assoc = K>`
+ where_clauses.push(format!(
+ "{} = {}",
+ tcx.def_path_str(p.projection_ty.item_def_id),
+ p.term,
+ ));
+ }
+ let where_clauses = if where_clauses.is_empty() {
+ String::new()
+ } else {
+ format!(" where {}", where_clauses.join(", "))
+ };
+ (generics, where_clauses)
+}
+
+/// Return placeholder code for the given function.
+fn fn_sig_suggestion<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sig: ty::FnSig<'tcx>,
+ ident: Ident,
+ predicates: ty::GenericPredicates<'tcx>,
+ assoc: &ty::AssocItem,
+) -> String {
+ let args = sig
+ .inputs()
+ .iter()
+ .enumerate()
+ .map(|(i, ty)| {
+ Some(match ty.kind() {
+ ty::Param(_) if assoc.fn_has_self_parameter && i == 0 => "self".to_string(),
+ ty::Ref(reg, ref_ty, mutability) if i == 0 => {
+ let reg = format!("{reg} ");
+ let reg = match &reg[..] {
+ "'_ " | " " => "",
+ reg => reg,
+ };
+ if assoc.fn_has_self_parameter {
+ match ref_ty.kind() {
+ ty::Param(param) if param.name == kw::SelfUpper => {
+ format!("&{}{}self", reg, mutability.prefix_str())
+ }
+
+ _ => format!("self: {ty}"),
+ }
+ } else {
+ format!("_: {ty}")
+ }
+ }
+ _ => {
+ if assoc.fn_has_self_parameter && i == 0 {
+ format!("self: {ty}")
+ } else {
+ format!("_: {ty}")
+ }
+ }
+ })
+ })
+ .chain(std::iter::once(if sig.c_variadic { Some("...".to_string()) } else { None }))
+ .flatten()
+ .collect::<Vec<String>>()
+ .join(", ");
+ let output = sig.output();
+ let output = if !output.is_unit() { format!(" -> {output}") } else { String::new() };
+
+ let unsafety = sig.unsafety.prefix_str();
+ let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates);
+
+ // FIXME: this is not entirely correct, as the lifetimes from borrowed params will
+ // not be present in the `fn` definition, not will we account for renamed
+ // lifetimes between the `impl` and the `trait`, but this should be good enough to
+ // fill in a significant portion of the missing code, and other subsequent
+ // suggestions can help the user fix the code.
+ format!("{unsafety}fn {ident}{generics}({args}){output}{where_clauses} {{ todo!() }}")
+}
+
+pub fn ty_kind_suggestion(ty: Ty<'_>) -> Option<&'static str> {
+ Some(match ty.kind() {
+ ty::Bool => "true",
+ ty::Char => "'a'",
+ ty::Int(_) | ty::Uint(_) => "42",
+ ty::Float(_) => "3.14159",
+ ty::Error(_) | ty::Never => return None,
+ _ => "value",
+ })
+}
+
+/// Return placeholder code for the given associated item.
+/// Similar to `ty::AssocItem::suggestion`, but appropriate for use as the code snippet of a
+/// structured suggestion.
+fn suggestion_signature(assoc: &ty::AssocItem, tcx: TyCtxt<'_>) -> String {
+ match assoc.kind {
+ ty::AssocKind::Fn => {
+ // We skip the binder here because the binder would deanonymize all
+ // late-bound regions, and we don't want method signatures to show up
+ // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
+ // regions just fine, showing `fn(&MyType)`.
+ fn_sig_suggestion(
+ tcx,
+ tcx.fn_sig(assoc.def_id).skip_binder(),
+ assoc.ident(tcx),
+ tcx.predicates_of(assoc.def_id),
+ assoc,
+ )
+ }
+ ty::AssocKind::Type => format!("type {} = Type;", assoc.name),
+ ty::AssocKind::Const => {
+ let ty = tcx.type_of(assoc.def_id);
+ let val = ty_kind_suggestion(ty).unwrap_or("value");
+ format!("const {}: {} = {};", assoc.name, ty, val)
+ }
+ }
+}
+
+/// Emit an error when encountering two or more variants in a transparent enum.
+fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>, sp: Span, did: DefId) {
+ let variant_spans: Vec<_> = adt
+ .variants()
+ .iter()
+ .map(|variant| tcx.hir().span_if_local(variant.def_id).unwrap())
+ .collect();
+ let msg = format!("needs exactly one variant, but has {}", adt.variants().len(),);
+ let mut err = struct_span_err!(tcx.sess, sp, E0731, "transparent enum {msg}");
+ err.span_label(sp, &msg);
+ if let [start @ .., end] = &*variant_spans {
+ for variant_span in start {
+ err.span_label(*variant_span, "");
+ }
+ err.span_label(*end, &format!("too many variants in `{}`", tcx.def_path_str(did)));
+ }
+ err.emit();
+}
+
+/// Emit an error when encountering two or more non-zero-sized fields in a transparent
+/// enum.
+fn bad_non_zero_sized_fields<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ adt: ty::AdtDef<'tcx>,
+ field_count: usize,
+ field_spans: impl Iterator<Item = Span>,
+ sp: Span,
+) {
+ let msg = format!("needs at most one non-zero-sized field, but has {field_count}");
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0690,
+ "{}transparent {} {}",
+ if adt.is_enum() { "the variant of a " } else { "" },
+ adt.descr(),
+ msg,
+ );
+ err.span_label(sp, &msg);
+ for sp in field_spans {
+ err.span_label(sp, "this field is non-zero-sized");
+ }
+ err.emit();
+}
+
+// FIXME: Consider moving this method to a more fitting place.
+pub fn potentially_plural_count(count: usize, word: &str) -> String {
+ format!("{} {}{}", count, word, pluralize!(count))
+}
diff --git a/compiler/rustc_hir_analysis/src/check/region.rs b/compiler/rustc_hir_analysis/src/check/region.rs
new file mode 100644
index 000000000..ff32329e4
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/region.rs
@@ -0,0 +1,856 @@
+//! This file builds up the `ScopeTree`, which describes
+//! the parent links in the region hierarchy.
+//!
+//! For more information about how MIR-based region-checking works,
+//! see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
+
+use rustc_ast::walk_list;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Arm, Block, Expr, Local, Pat, PatKind, Stmt};
+use rustc_index::vec::Idx;
+use rustc_middle::middle::region::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::source_map;
+use rustc_span::Span;
+
+use std::mem;
+
+#[derive(Debug, Copy, Clone)]
+pub struct Context {
+ /// The scope that contains any new variables declared, plus its depth in
+ /// the scope tree.
+ var_parent: Option<(Scope, ScopeDepth)>,
+
+ /// Region parent of expressions, etc., plus its depth in the scope tree.
+ parent: Option<(Scope, ScopeDepth)>,
+}
+
+struct RegionResolutionVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ // The number of expressions and patterns visited in the current body.
+ expr_and_pat_count: usize,
+ // When this is `true`, we record the `Scopes` we encounter
+ // when processing a Yield expression. This allows us to fix
+ // up their indices.
+ pessimistic_yield: bool,
+ // Stores scopes when `pessimistic_yield` is `true`.
+ fixup_scopes: Vec<Scope>,
+ // The generated scope tree.
+ scope_tree: ScopeTree,
+
+ cx: Context,
+
+ /// `terminating_scopes` is a set containing the ids of each
+ /// statement, or conditional/repeating expression. These scopes
+ /// are calling "terminating scopes" because, when attempting to
+ /// find the scope of a temporary, by default we search up the
+ /// enclosing scopes until we encounter the terminating scope. A
+ /// conditional/repeating expression is one which is not
+ /// guaranteed to execute exactly once upon entering the parent
+ /// scope. This could be because the expression only executes
+ /// conditionally, such as the expression `b` in `a && b`, or
+ /// because the expression may execute many times, such as a loop
+ /// body. The reason that we distinguish such expressions is that,
+ /// upon exiting the parent scope, we cannot statically know how
+ /// many times the expression executed, and thus if the expression
+ /// creates temporaries we cannot know statically how many such
+ /// temporaries we would have to cleanup. Therefore, we ensure that
+ /// the temporaries never outlast the conditional/repeating
+ /// expression, preventing the need for dynamic checks and/or
+ /// arbitrary amounts of stack space. Terminating scopes end
+ /// up being contained in a DestructionScope that contains the
+ /// destructor's execution.
+ terminating_scopes: FxHashSet<hir::ItemLocalId>,
+}
+
+/// Records the lifetime of a local variable as `cx.var_parent`
+fn record_var_lifetime(
+ visitor: &mut RegionResolutionVisitor<'_>,
+ var_id: hir::ItemLocalId,
+ _sp: Span,
+) {
+ match visitor.cx.var_parent {
+ None => {
+ // this can happen in extern fn declarations like
+ //
+ // extern fn isalnum(c: c_int) -> c_int
+ }
+ Some((parent_scope, _)) => visitor.scope_tree.record_var_scope(var_id, parent_scope),
+ }
+}
+
+fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx hir::Block<'tcx>) {
+ debug!("resolve_block(blk.hir_id={:?})", blk.hir_id);
+
+ let prev_cx = visitor.cx;
+
+ // We treat the tail expression in the block (if any) somewhat
+ // differently from the statements. The issue has to do with
+ // temporary lifetimes. Consider the following:
+ //
+ // quux({
+ // let inner = ... (&bar()) ...;
+ //
+ // (... (&foo()) ...) // (the tail expression)
+ // }, other_argument());
+ //
+ // Each of the statements within the block is a terminating
+ // scope, and thus a temporary (e.g., the result of calling
+ // `bar()` in the initializer expression for `let inner = ...;`)
+ // will be cleaned up immediately after its corresponding
+ // statement (i.e., `let inner = ...;`) executes.
+ //
+ // On the other hand, temporaries associated with evaluating the
+ // tail expression for the block are assigned lifetimes so that
+ // they will be cleaned up as part of the terminating scope
+ // *surrounding* the block expression. Here, the terminating
+ // scope for the block expression is the `quux(..)` call; so
+ // those temporaries will only be cleaned up *after* both
+ // `other_argument()` has run and also the call to `quux(..)`
+ // itself has returned.
+
+ visitor.enter_node_scope_with_dtor(blk.hir_id.local_id);
+ visitor.cx.var_parent = visitor.cx.parent;
+
+ {
+ // This block should be kept approximately in sync with
+ // `intravisit::walk_block`. (We manually walk the block, rather
+ // than call `walk_block`, in order to maintain precise
+ // index information.)
+
+ for (i, statement) in blk.stmts.iter().enumerate() {
+ match statement.kind {
+ hir::StmtKind::Local(hir::Local { els: Some(els), .. }) => {
+ // Let-else has a special lexical structure for variables.
+ // First we take a checkpoint of the current scope context here.
+ let mut prev_cx = visitor.cx;
+
+ visitor.enter_scope(Scope {
+ id: blk.hir_id.local_id,
+ data: ScopeData::Remainder(FirstStatementIndex::new(i)),
+ });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_stmt(statement);
+ // We need to back out temporarily to the last enclosing scope
+ // for the `else` block, so that even the temporaries receiving
+ // extended lifetime will be dropped inside this block.
+ // We are visiting the `else` block in this order so that
+ // the sequence of visits agree with the order in the default
+ // `hir::intravisit` visitor.
+ mem::swap(&mut prev_cx, &mut visitor.cx);
+ visitor.terminating_scopes.insert(els.hir_id.local_id);
+ visitor.visit_block(els);
+ // From now on, we continue normally.
+ visitor.cx = prev_cx;
+ }
+ hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => {
+ // Each declaration introduces a subscope for bindings
+ // introduced by the declaration; this subscope covers a
+ // suffix of the block. Each subscope in a block has the
+ // previous subscope in the block as a parent, except for
+ // the first such subscope, which has the block itself as a
+ // parent.
+ visitor.enter_scope(Scope {
+ id: blk.hir_id.local_id,
+ data: ScopeData::Remainder(FirstStatementIndex::new(i)),
+ });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_stmt(statement)
+ }
+ hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => visitor.visit_stmt(statement),
+ }
+ }
+ walk_list!(visitor, visit_expr, &blk.expr);
+ }
+
+ visitor.cx = prev_cx;
+}
+
+fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir::Arm<'tcx>) {
+ let prev_cx = visitor.cx;
+
+ visitor.enter_scope(Scope { id: arm.hir_id.local_id, data: ScopeData::Node });
+ visitor.cx.var_parent = visitor.cx.parent;
+
+ visitor.terminating_scopes.insert(arm.body.hir_id.local_id);
+
+ if let Some(hir::Guard::If(ref expr)) = arm.guard {
+ visitor.terminating_scopes.insert(expr.hir_id.local_id);
+ }
+
+ intravisit::walk_arm(visitor, arm);
+
+ visitor.cx = prev_cx;
+}
+
+fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) {
+ visitor.record_child_scope(Scope { id: pat.hir_id.local_id, data: ScopeData::Node });
+
+ // If this is a binding then record the lifetime of that binding.
+ if let PatKind::Binding(..) = pat.kind {
+ record_var_lifetime(visitor, pat.hir_id.local_id, pat.span);
+ }
+
+ debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
+
+ intravisit::walk_pat(visitor, pat);
+
+ visitor.expr_and_pat_count += 1;
+
+ debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
+}
+
+fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) {
+ let stmt_id = stmt.hir_id.local_id;
+ debug!("resolve_stmt(stmt.id={:?})", stmt_id);
+
+ // Every statement will clean up the temporaries created during
+ // execution of that statement. Therefore each statement has an
+ // associated destruction scope that represents the scope of the
+ // statement plus its destructors, and thus the scope for which
+ // regions referenced by the destructors need to survive.
+ visitor.terminating_scopes.insert(stmt_id);
+
+ let prev_parent = visitor.cx.parent;
+ visitor.enter_node_scope_with_dtor(stmt_id);
+
+ intravisit::walk_stmt(visitor, stmt);
+
+ visitor.cx.parent = prev_parent;
+}
+
+fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
+ debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr);
+
+ let prev_cx = visitor.cx;
+ visitor.enter_node_scope_with_dtor(expr.hir_id.local_id);
+
+ {
+ let terminating_scopes = &mut visitor.terminating_scopes;
+ let mut terminating = |id: hir::ItemLocalId| {
+ terminating_scopes.insert(id);
+ };
+ match expr.kind {
+ // Conditional or repeating scopes are always terminating
+ // scopes, meaning that temporaries cannot outlive them.
+ // This ensures fixed size stacks.
+ hir::ExprKind::Binary(
+ source_map::Spanned { node: hir::BinOpKind::And, .. },
+ _,
+ ref r,
+ )
+ | hir::ExprKind::Binary(
+ source_map::Spanned { node: hir::BinOpKind::Or, .. },
+ _,
+ ref r,
+ ) => {
+ // For shortcircuiting operators, mark the RHS as a terminating
+ // scope since it only executes conditionally.
+
+ // `Let` expressions (in a let-chain) shouldn't be terminating, as their temporaries
+ // should live beyond the immediate expression
+ if !matches!(r.kind, hir::ExprKind::Let(_)) {
+ terminating(r.hir_id.local_id);
+ }
+ }
+ hir::ExprKind::If(_, ref then, Some(ref otherwise)) => {
+ terminating(then.hir_id.local_id);
+ terminating(otherwise.hir_id.local_id);
+ }
+
+ hir::ExprKind::If(_, ref then, None) => {
+ terminating(then.hir_id.local_id);
+ }
+
+ hir::ExprKind::Loop(ref body, _, _, _) => {
+ terminating(body.hir_id.local_id);
+ }
+
+ hir::ExprKind::DropTemps(ref expr) => {
+ // `DropTemps(expr)` does not denote a conditional scope.
+ // Rather, we want to achieve the same behavior as `{ let _t = expr; _t }`.
+ terminating(expr.hir_id.local_id);
+ }
+
+ hir::ExprKind::AssignOp(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Unary(..)
+ | hir::ExprKind::Call(..)
+ | hir::ExprKind::MethodCall(..) => {
+ // FIXME(https://github.com/rust-lang/rfcs/issues/811) Nested method calls
+ //
+ // The lifetimes for a call or method call look as follows:
+ //
+ // call.id
+ // - arg0.id
+ // - ...
+ // - argN.id
+ // - call.callee_id
+ //
+ // The idea is that call.callee_id represents *the time when
+ // the invoked function is actually running* and call.id
+ // represents *the time to prepare the arguments and make the
+ // call*. See the section "Borrows in Calls" borrowck/README.md
+ // for an extended explanation of why this distinction is
+ // important.
+ //
+ // record_superlifetime(new_cx, expr.callee_id);
+ }
+
+ _ => {}
+ }
+ }
+
+ let prev_pessimistic = visitor.pessimistic_yield;
+
+ // Ordinarily, we can rely on the visit order of HIR intravisit
+ // to correspond to the actual execution order of statements.
+ // However, there's a weird corner case with compound assignment
+ // operators (e.g. `a += b`). The evaluation order depends on whether
+ // or not the operator is overloaded (e.g. whether or not a trait
+ // like AddAssign is implemented).
+
+ // For primitive types (which, despite having a trait impl, don't actually
+ // end up calling it), the evaluation order is right-to-left. For example,
+ // the following code snippet:
+ //
+ // let y = &mut 0;
+ // *{println!("LHS!"); y} += {println!("RHS!"); 1};
+ //
+ // will print:
+ //
+ // RHS!
+ // LHS!
+ //
+ // However, if the operator is used on a non-primitive type,
+ // the evaluation order will be left-to-right, since the operator
+ // actually get desugared to a method call. For example, this
+ // nearly identical code snippet:
+ //
+ // let y = &mut String::new();
+ // *{println!("LHS String"); y} += {println!("RHS String"); "hi"};
+ //
+ // will print:
+ // LHS String
+ // RHS String
+ //
+ // To determine the actual execution order, we need to perform
+ // trait resolution. Unfortunately, we need to be able to compute
+ // yield_in_scope before type checking is even done, as it gets
+ // used by AST borrowcheck.
+ //
+ // Fortunately, we don't need to know the actual execution order.
+ // It suffices to know the 'worst case' order with respect to yields.
+ // Specifically, we need to know the highest 'expr_and_pat_count'
+ // that we could assign to the yield expression. To do this,
+ // we pick the greater of the two values from the left-hand
+ // and right-hand expressions. This makes us overly conservative
+ // about what types could possibly live across yield points,
+ // but we will never fail to detect that a type does actually
+ // live across a yield point. The latter part is critical -
+ // we're already overly conservative about what types will live
+ // across yield points, as the generated MIR will determine
+ // when things are actually live. However, for typecheck to work
+ // properly, we can't miss any types.
+
+ match expr.kind {
+ // Manually recurse over closures and inline consts, because they are the only
+ // case of nested bodies that share the parent environment.
+ hir::ExprKind::Closure(&hir::Closure { body, .. })
+ | hir::ExprKind::ConstBlock(hir::AnonConst { body, .. }) => {
+ let body = visitor.tcx.hir().body(body);
+ visitor.visit_body(body);
+ }
+ hir::ExprKind::AssignOp(_, ref left_expr, ref right_expr) => {
+ debug!(
+ "resolve_expr - enabling pessimistic_yield, was previously {}",
+ prev_pessimistic
+ );
+
+ let start_point = visitor.fixup_scopes.len();
+ visitor.pessimistic_yield = true;
+
+ // If the actual execution order turns out to be right-to-left,
+ // then we're fine. However, if the actual execution order is left-to-right,
+ // then we'll assign too low a count to any `yield` expressions
+ // we encounter in 'right_expression' - they should really occur after all of the
+ // expressions in 'left_expression'.
+ visitor.visit_expr(&right_expr);
+ visitor.pessimistic_yield = prev_pessimistic;
+
+ debug!("resolve_expr - restoring pessimistic_yield to {}", prev_pessimistic);
+ visitor.visit_expr(&left_expr);
+ debug!("resolve_expr - fixing up counts to {}", visitor.expr_and_pat_count);
+
+ // Remove and process any scopes pushed by the visitor
+ let target_scopes = visitor.fixup_scopes.drain(start_point..);
+
+ for scope in target_scopes {
+ let mut yield_data =
+ visitor.scope_tree.yield_in_scope.get_mut(&scope).unwrap().last_mut().unwrap();
+ let count = yield_data.expr_and_pat_count;
+ let span = yield_data.span;
+
+ // expr_and_pat_count never decreases. Since we recorded counts in yield_in_scope
+ // before walking the left-hand side, it should be impossible for the recorded
+ // count to be greater than the left-hand side count.
+ if count > visitor.expr_and_pat_count {
+ bug!(
+ "Encountered greater count {} at span {:?} - expected no greater than {}",
+ count,
+ span,
+ visitor.expr_and_pat_count
+ );
+ }
+ let new_count = visitor.expr_and_pat_count;
+ debug!(
+ "resolve_expr - increasing count for scope {:?} from {} to {} at span {:?}",
+ scope, count, new_count, span
+ );
+
+ yield_data.expr_and_pat_count = new_count;
+ }
+ }
+
+ hir::ExprKind::If(ref cond, ref then, Some(ref otherwise)) => {
+ let expr_cx = visitor.cx;
+ visitor.enter_scope(Scope { id: then.hir_id.local_id, data: ScopeData::IfThen });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_expr(cond);
+ visitor.visit_expr(then);
+ visitor.cx = expr_cx;
+ visitor.visit_expr(otherwise);
+ }
+
+ hir::ExprKind::If(ref cond, ref then, None) => {
+ let expr_cx = visitor.cx;
+ visitor.enter_scope(Scope { id: then.hir_id.local_id, data: ScopeData::IfThen });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_expr(cond);
+ visitor.visit_expr(then);
+ visitor.cx = expr_cx;
+ }
+
+ _ => intravisit::walk_expr(visitor, expr),
+ }
+
+ visitor.expr_and_pat_count += 1;
+
+ debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr);
+
+ if let hir::ExprKind::Yield(_, source) = &expr.kind {
+ // Mark this expr's scope and all parent scopes as containing `yield`.
+ let mut scope = Scope { id: expr.hir_id.local_id, data: ScopeData::Node };
+ loop {
+ let span = match expr.kind {
+ hir::ExprKind::Yield(expr, hir::YieldSource::Await { .. }) => {
+ expr.span.shrink_to_hi().to(expr.span)
+ }
+ _ => expr.span,
+ };
+ let data =
+ YieldData { span, expr_and_pat_count: visitor.expr_and_pat_count, source: *source };
+ match visitor.scope_tree.yield_in_scope.get_mut(&scope) {
+ Some(yields) => yields.push(data),
+ None => {
+ visitor.scope_tree.yield_in_scope.insert(scope, vec![data]);
+ }
+ }
+
+ if visitor.pessimistic_yield {
+ debug!("resolve_expr in pessimistic_yield - marking scope {:?} for fixup", scope);
+ visitor.fixup_scopes.push(scope);
+ }
+
+ // Keep traversing up while we can.
+ match visitor.scope_tree.parent_map.get(&scope) {
+ // Don't cross from closure bodies to their parent.
+ Some(&(superscope, _)) => match superscope.data {
+ ScopeData::CallSite => break,
+ _ => scope = superscope,
+ },
+ None => break,
+ }
+ }
+ }
+
+ visitor.cx = prev_cx;
+}
+
+fn resolve_local<'tcx>(
+ visitor: &mut RegionResolutionVisitor<'tcx>,
+ pat: Option<&'tcx hir::Pat<'tcx>>,
+ init: Option<&'tcx hir::Expr<'tcx>>,
+) {
+ debug!("resolve_local(pat={:?}, init={:?})", pat, init);
+
+ let blk_scope = visitor.cx.var_parent.map(|(p, _)| p);
+
+ // As an exception to the normal rules governing temporary
+ // lifetimes, initializers in a let have a temporary lifetime
+ // of the enclosing block. This means that e.g., a program
+ // like the following is legal:
+ //
+ // let ref x = HashMap::new();
+ //
+ // Because the hash map will be freed in the enclosing block.
+ //
+ // We express the rules more formally based on 3 grammars (defined
+ // fully in the helpers below that implement them):
+ //
+ // 1. `E&`, which matches expressions like `&<rvalue>` that
+ // own a pointer into the stack.
+ //
+ // 2. `P&`, which matches patterns like `ref x` or `(ref x, ref
+ // y)` that produce ref bindings into the value they are
+ // matched against or something (at least partially) owned by
+ // the value they are matched against. (By partially owned,
+ // I mean that creating a binding into a ref-counted or managed value
+ // would still count.)
+ //
+ // 3. `ET`, which matches both rvalues like `foo()` as well as places
+ // based on rvalues like `foo().x[2].y`.
+ //
+ // A subexpression `<rvalue>` that appears in a let initializer
+ // `let pat [: ty] = expr` has an extended temporary lifetime if
+ // any of the following conditions are met:
+ //
+ // A. `pat` matches `P&` and `expr` matches `ET`
+ // (covers cases where `pat` creates ref bindings into an rvalue
+ // produced by `expr`)
+ // B. `ty` is a borrowed pointer and `expr` matches `ET`
+ // (covers cases where coercion creates a borrow)
+ // C. `expr` matches `E&`
+ // (covers cases `expr` borrows an rvalue that is then assigned
+ // to memory (at least partially) owned by the binding)
+ //
+ // Here are some examples hopefully giving an intuition where each
+ // rule comes into play and why:
+ //
+ // Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)`
+ // would have an extended lifetime, but not `foo()`.
+ //
+ // Rule B. `let x = &foo().x`. The rvalue `foo()` would have extended
+ // lifetime.
+ //
+ // In some cases, multiple rules may apply (though not to the same
+ // rvalue). For example:
+ //
+ // let ref x = [&a(), &b()];
+ //
+ // Here, the expression `[...]` has an extended lifetime due to rule
+ // A, but the inner rvalues `a()` and `b()` have an extended lifetime
+ // due to rule C.
+
+ if let Some(expr) = init {
+ record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope);
+
+ if let Some(pat) = pat {
+ if is_binding_pat(pat) {
+ visitor.scope_tree.record_rvalue_candidate(
+ expr.hir_id,
+ RvalueCandidateType::Pattern {
+ target: expr.hir_id.local_id,
+ lifetime: blk_scope,
+ },
+ );
+ }
+ }
+ }
+
+ // Make sure we visit the initializer first, so expr_and_pat_count remains correct.
+ // The correct order, as shared between generator_interior, drop_ranges and intravisitor,
+ // is to walk initializer, followed by pattern bindings, finally followed by the `else` block.
+ if let Some(expr) = init {
+ visitor.visit_expr(expr);
+ }
+ if let Some(pat) = pat {
+ visitor.visit_pat(pat);
+ }
+
+ /// Returns `true` if `pat` match the `P&` non-terminal.
+ ///
+ /// ```text
+ /// P& = ref X
+ /// | StructName { ..., P&, ... }
+ /// | VariantName(..., P&, ...)
+ /// | [ ..., P&, ... ]
+ /// | ( ..., P&, ... )
+ /// | ... "|" P& "|" ...
+ /// | box P&
+ /// ```
+ fn is_binding_pat(pat: &hir::Pat<'_>) -> bool {
+ // Note that the code below looks for *explicit* refs only, that is, it won't
+ // know about *implicit* refs as introduced in #42640.
+ //
+ // This is not a problem. For example, consider
+ //
+ // let (ref x, ref y) = (Foo { .. }, Bar { .. });
+ //
+ // Due to the explicit refs on the left hand side, the below code would signal
+ // that the temporary value on the right hand side should live until the end of
+ // the enclosing block (as opposed to being dropped after the let is complete).
+ //
+ // To create an implicit ref, however, you must have a borrowed value on the RHS
+ // already, as in this example (which won't compile before #42640):
+ //
+ // let Foo { x, .. } = &Foo { x: ..., ... };
+ //
+ // in place of
+ //
+ // let Foo { ref x, .. } = Foo { ... };
+ //
+ // In the former case (the implicit ref version), the temporary is created by the
+ // & expression, and its lifetime would be extended to the end of the block (due
+ // to a different rule, not the below code).
+ match pat.kind {
+ PatKind::Binding(hir::BindingAnnotation(hir::ByRef::Yes, _), ..) => true,
+
+ PatKind::Struct(_, ref field_pats, _) => {
+ field_pats.iter().any(|fp| is_binding_pat(&fp.pat))
+ }
+
+ PatKind::Slice(ref pats1, ref pats2, ref pats3) => {
+ pats1.iter().any(|p| is_binding_pat(&p))
+ || pats2.iter().any(|p| is_binding_pat(&p))
+ || pats3.iter().any(|p| is_binding_pat(&p))
+ }
+
+ PatKind::Or(ref subpats)
+ | PatKind::TupleStruct(_, ref subpats, _)
+ | PatKind::Tuple(ref subpats, _) => subpats.iter().any(|p| is_binding_pat(&p)),
+
+ PatKind::Box(ref subpat) => is_binding_pat(&subpat),
+
+ PatKind::Ref(_, _)
+ | PatKind::Binding(hir::BindingAnnotation(hir::ByRef::No, _), ..)
+ | PatKind::Wild
+ | PatKind::Path(_)
+ | PatKind::Lit(_)
+ | PatKind::Range(_, _, _) => false,
+ }
+ }
+
+ /// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate:
+ ///
+ /// ```text
+ /// E& = & ET
+ /// | StructName { ..., f: E&, ... }
+ /// | [ ..., E&, ... ]
+ /// | ( ..., E&, ... )
+ /// | {...; E&}
+ /// | box E&
+ /// | E& as ...
+ /// | ( E& )
+ /// ```
+ fn record_rvalue_scope_if_borrow_expr<'tcx>(
+ visitor: &mut RegionResolutionVisitor<'tcx>,
+ expr: &hir::Expr<'_>,
+ blk_id: Option<Scope>,
+ ) {
+ match expr.kind {
+ hir::ExprKind::AddrOf(_, _, subexpr) => {
+ record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
+ visitor.scope_tree.record_rvalue_candidate(
+ subexpr.hir_id,
+ RvalueCandidateType::Borrow {
+ target: subexpr.hir_id.local_id,
+ lifetime: blk_id,
+ },
+ );
+ }
+ hir::ExprKind::Struct(_, fields, _) => {
+ for field in fields {
+ record_rvalue_scope_if_borrow_expr(visitor, &field.expr, blk_id);
+ }
+ }
+ hir::ExprKind::Array(subexprs) | hir::ExprKind::Tup(subexprs) => {
+ for subexpr in subexprs {
+ record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
+ }
+ }
+ hir::ExprKind::Cast(ref subexpr, _) => {
+ record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id)
+ }
+ hir::ExprKind::Block(ref block, _) => {
+ if let Some(ref subexpr) = block.expr {
+ record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
+ }
+ }
+ hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) => {
+ // FIXME(@dingxiangfei2009): choose call arguments here
+ // for candidacy for extended parameter rule application
+ }
+ hir::ExprKind::Index(..) => {
+ // FIXME(@dingxiangfei2009): select the indices
+ // as candidate for rvalue scope rules
+ }
+ _ => {}
+ }
+ }
+}
+
+impl<'tcx> RegionResolutionVisitor<'tcx> {
+ /// Records the current parent (if any) as the parent of `child_scope`.
+ /// Returns the depth of `child_scope`.
+ fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth {
+ let parent = self.cx.parent;
+ self.scope_tree.record_scope_parent(child_scope, parent);
+ // If `child_scope` has no parent, it must be the root node, and so has
+ // a depth of 1. Otherwise, its depth is one more than its parent's.
+ parent.map_or(1, |(_p, d)| d + 1)
+ }
+
+ /// Records the current parent (if any) as the parent of `child_scope`,
+ /// and sets `child_scope` as the new current parent.
+ fn enter_scope(&mut self, child_scope: Scope) {
+ let child_depth = self.record_child_scope(child_scope);
+ self.cx.parent = Some((child_scope, child_depth));
+ }
+
+ fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) {
+ // If node was previously marked as a terminating scope during the
+ // recursive visit of its parent node in the AST, then we need to
+ // account for the destruction scope representing the scope of
+ // the destructors that run immediately after it completes.
+ if self.terminating_scopes.contains(&id) {
+ self.enter_scope(Scope { id, data: ScopeData::Destruction });
+ }
+ self.enter_scope(Scope { id, data: ScopeData::Node });
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> {
+ fn visit_block(&mut self, b: &'tcx Block<'tcx>) {
+ resolve_block(self, b);
+ }
+
+ fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
+ let body_id = body.id();
+ let owner_id = self.tcx.hir().body_owner_def_id(body_id);
+
+ debug!(
+ "visit_body(id={:?}, span={:?}, body.id={:?}, cx.parent={:?})",
+ owner_id,
+ self.tcx.sess.source_map().span_to_diagnostic_string(body.value.span),
+ body_id,
+ self.cx.parent
+ );
+
+ // Save all state that is specific to the outer function
+ // body. These will be restored once down below, once we've
+ // visited the body.
+ let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0);
+ let outer_cx = self.cx;
+ let outer_ts = mem::take(&mut self.terminating_scopes);
+ // The 'pessimistic yield' flag is set to true when we are
+ // processing a `+=` statement and have to make pessimistic
+ // control flow assumptions. This doesn't apply to nested
+ // bodies within the `+=` statements. See #69307.
+ let outer_pessimistic_yield = mem::replace(&mut self.pessimistic_yield, false);
+ self.terminating_scopes.insert(body.value.hir_id.local_id);
+
+ self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::CallSite });
+ self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::Arguments });
+
+ // The arguments and `self` are parented to the fn.
+ self.cx.var_parent = self.cx.parent.take();
+ for param in body.params {
+ self.visit_pat(&param.pat);
+ }
+
+ // The body of the every fn is a root scope.
+ self.cx.parent = self.cx.var_parent;
+ if self.tcx.hir().body_owner_kind(owner_id).is_fn_or_closure() {
+ self.visit_expr(&body.value)
+ } else {
+ // Only functions have an outer terminating (drop) scope, while
+ // temporaries in constant initializers may be 'static, but only
+ // according to rvalue lifetime semantics, using the same
+ // syntactical rules used for let initializers.
+ //
+ // e.g., in `let x = &f();`, the temporary holding the result from
+ // the `f()` call lives for the entirety of the surrounding block.
+ //
+ // Similarly, `const X: ... = &f();` would have the result of `f()`
+ // live for `'static`, implying (if Drop restrictions on constants
+ // ever get lifted) that the value *could* have a destructor, but
+ // it'd get leaked instead of the destructor running during the
+ // evaluation of `X` (if at all allowed by CTFE).
+ //
+ // However, `const Y: ... = g(&f());`, like `let y = g(&f());`,
+ // would *not* let the `f()` temporary escape into an outer scope
+ // (i.e., `'static`), which means that after `g` returns, it drops,
+ // and all the associated destruction scope rules apply.
+ self.cx.var_parent = None;
+ resolve_local(self, None, Some(&body.value));
+ }
+
+ if body.generator_kind.is_some() {
+ self.scope_tree.body_expr_count.insert(body_id, self.expr_and_pat_count);
+ }
+
+ // Restore context we had at the start.
+ self.expr_and_pat_count = outer_ec;
+ self.cx = outer_cx;
+ self.terminating_scopes = outer_ts;
+ self.pessimistic_yield = outer_pessimistic_yield;
+ }
+
+ fn visit_arm(&mut self, a: &'tcx Arm<'tcx>) {
+ resolve_arm(self, a);
+ }
+ fn visit_pat(&mut self, p: &'tcx Pat<'tcx>) {
+ resolve_pat(self, p);
+ }
+ fn visit_stmt(&mut self, s: &'tcx Stmt<'tcx>) {
+ resolve_stmt(self, s);
+ }
+ fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
+ resolve_expr(self, ex);
+ }
+ fn visit_local(&mut self, l: &'tcx Local<'tcx>) {
+ resolve_local(self, Some(&l.pat), l.init)
+ }
+}
+
+/// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
+/// in the case of closures, this will be redirected to the enclosing function.
+///
+/// Performance: This is a query rather than a simple function to enable
+/// re-use in incremental scenarios. We may sometimes need to rerun the
+/// type checker even when the HIR hasn't changed, and in those cases
+/// we can avoid reconstructing the region scope tree.
+pub fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree {
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ if typeck_root_def_id != def_id {
+ return tcx.region_scope_tree(typeck_root_def_id);
+ }
+
+ let scope_tree = if let Some(body_id) = tcx.hir().maybe_body_owned_by(def_id.expect_local()) {
+ let mut visitor = RegionResolutionVisitor {
+ tcx,
+ scope_tree: ScopeTree::default(),
+ expr_and_pat_count: 0,
+ cx: Context { parent: None, var_parent: None },
+ terminating_scopes: Default::default(),
+ pessimistic_yield: false,
+ fixup_scopes: vec![],
+ };
+
+ let body = tcx.hir().body(body_id);
+ visitor.scope_tree.root_body = Some(body.value.hir_id);
+ visitor.visit_body(body);
+ visitor.scope_tree
+ } else {
+ ScopeTree::default()
+ };
+
+ tcx.arena.alloc(scope_tree)
+}
diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
new file mode 100644
index 000000000..a23575004
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -0,0 +1,1990 @@
+use crate::constrained_generic_params::{identify_constrained_generic_params, Parameter};
+use hir::def::DefKind;
+use rustc_ast as ast;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::ItemKind;
+use rustc_infer::infer::outlives::env::{OutlivesEnvironment, RegionBoundPairs};
+use rustc_infer::infer::outlives::obligations::TypeOutlives;
+use rustc_infer::infer::{self, InferCtxt, TyCtxtInferExt};
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::trait_def::TraitSpecializationKind;
+use rustc_middle::ty::{
+ self, AdtKind, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeFoldable,
+ TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_middle::ty::{GenericArgKind, InternalSubsts};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, ObligationCtxt, WellFormedLoc,
+};
+
+use std::cell::LazyCell;
+use std::convert::TryInto;
+use std::iter;
+use std::ops::{ControlFlow, Deref};
+
+pub(super) struct WfCheckingCtxt<'a, 'tcx> {
+ pub(super) ocx: ObligationCtxt<'a, 'tcx>,
+ span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+}
+impl<'a, 'tcx> Deref for WfCheckingCtxt<'a, 'tcx> {
+ type Target = ObligationCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.ocx
+ }
+}
+
+impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.ocx.infcx.tcx
+ }
+
+ fn normalize<T>(&self, span: Span, loc: Option<WellFormedLoc>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.ocx.normalize(
+ ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc)),
+ self.param_env,
+ value,
+ )
+ }
+
+ fn register_wf_obligation(
+ &self,
+ span: Span,
+ loc: Option<WellFormedLoc>,
+ arg: ty::GenericArg<'tcx>,
+ ) {
+ let cause =
+ traits::ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc));
+ // for a type to be WF, we do not need to check if const trait predicates satisfy.
+ let param_env = self.param_env.without_const();
+ self.ocx.register_obligation(traits::Obligation::new(
+ cause,
+ param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx()),
+ ));
+ }
+}
+
+pub(super) fn enter_wf_checking_ctxt<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ body_def_id: LocalDefId,
+ f: F,
+) where
+ F: for<'a> FnOnce(&WfCheckingCtxt<'a, 'tcx>),
+{
+ let param_env = tcx.param_env(body_def_id);
+ let body_id = tcx.hir().local_def_id_to_hir_id(body_def_id);
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ let assumed_wf_types = ocx.assumed_wf_types(param_env, span, body_def_id);
+
+ let mut wfcx = WfCheckingCtxt { ocx, span, body_id, param_env };
+
+ if !tcx.features().trivial_bounds {
+ wfcx.check_false_global_bounds()
+ }
+ f(&mut wfcx);
+ let errors = wfcx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return;
+ }
+
+ let implied_bounds = infcx.implied_bounds_tys(param_env, body_id, assumed_wf_types);
+ let outlives_environment =
+ OutlivesEnvironment::with_bounds(param_env, Some(infcx), implied_bounds);
+
+ infcx.check_region_obligations_and_report_errors(body_def_id, &outlives_environment);
+}
+
+fn check_well_formed(tcx: TyCtxt<'_>, def_id: hir::OwnerId) {
+ let node = tcx.hir().expect_owner(def_id);
+ match node {
+ hir::OwnerNode::Crate(_) => {}
+ hir::OwnerNode::Item(item) => check_item(tcx, item),
+ hir::OwnerNode::TraitItem(item) => check_trait_item(tcx, item),
+ hir::OwnerNode::ImplItem(item) => check_impl_item(tcx, item),
+ hir::OwnerNode::ForeignItem(item) => check_foreign_item(tcx, item),
+ }
+
+ if let Some(generics) = node.generics() {
+ for param in generics.params {
+ check_param_wf(tcx, param)
+ }
+ }
+}
+
+/// Checks that the field types (in a struct def'n) or argument types (in an enum def'n) are
+/// well-formed, meaning that they do not require any constraints not declared in the struct
+/// definition itself. For example, this definition would be illegal:
+///
+/// ```rust
+/// struct Ref<'a, T> { x: &'a T }
+/// ```
+///
+/// because the type did not declare that `T:'a`.
+///
+/// We do this check as a pre-pass before checking fn bodies because if these constraints are
+/// not included it frequently leads to confusing errors in fn bodies. So it's better to check
+/// the types first.
+#[instrument(skip(tcx), level = "debug")]
+fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
+ let def_id = item.owner_id.def_id;
+
+ debug!(
+ ?item.owner_id,
+ item.name = ? tcx.def_path_str(def_id.to_def_id())
+ );
+
+ match item.kind {
+ // Right now we check that every default trait implementation
+ // has an implementation of itself. Basically, a case like:
+ //
+ // impl Trait for T {}
+ //
+ // has a requirement of `T: Trait` which was required for default
+ // method implementations. Although this could be improved now that
+ // there's a better infrastructure in place for this, it's being left
+ // for a follow-up work.
+ //
+ // Since there's such a requirement, we need to check *just* positive
+ // implementations, otherwise things like:
+ //
+ // impl !Send for T {}
+ //
+ // won't be allowed unless there's an *explicit* implementation of `Send`
+ // for `T`
+ hir::ItemKind::Impl(ref impl_) => {
+ let is_auto = tcx
+ .impl_trait_ref(def_id)
+ .map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id));
+ if let (hir::Defaultness::Default { .. }, true) = (impl_.defaultness, is_auto) {
+ let sp = impl_.of_trait.as_ref().map_or(item.span, |t| t.path.span);
+ let mut err =
+ tcx.sess.struct_span_err(sp, "impls of auto traits cannot be default");
+ err.span_labels(impl_.defaultness_span, "default because of this");
+ err.span_label(sp, "auto trait");
+ err.emit();
+ }
+ // We match on both `ty::ImplPolarity` and `ast::ImplPolarity` just to get the `!` span.
+ match (tcx.impl_polarity(def_id), impl_.polarity) {
+ (ty::ImplPolarity::Positive, _) => {
+ check_impl(tcx, item, impl_.self_ty, &impl_.of_trait, impl_.constness);
+ }
+ (ty::ImplPolarity::Negative, ast::ImplPolarity::Negative(span)) => {
+ // FIXME(#27579): what amount of WF checking do we need for neg impls?
+ if let hir::Defaultness::Default { .. } = impl_.defaultness {
+ let mut spans = vec![span];
+ spans.extend(impl_.defaultness_span);
+ struct_span_err!(
+ tcx.sess,
+ spans,
+ E0750,
+ "negative impls cannot be default impls"
+ )
+ .emit();
+ }
+ }
+ (ty::ImplPolarity::Reservation, _) => {
+ // FIXME: what amount of WF checking do we need for reservation impls?
+ }
+ _ => unreachable!(),
+ }
+ }
+ hir::ItemKind::Fn(ref sig, ..) => {
+ check_item_fn(tcx, def_id, item.ident, item.span, sig.decl);
+ }
+ hir::ItemKind::Static(ty, ..) => {
+ check_item_type(tcx, def_id, ty.span, false);
+ }
+ hir::ItemKind::Const(ty, ..) => {
+ check_item_type(tcx, def_id, ty.span, false);
+ }
+ hir::ItemKind::Struct(ref struct_def, ref ast_generics) => {
+ check_type_defn(tcx, item, false, |wfcx| vec![wfcx.non_enum_variant(struct_def)]);
+
+ check_variances_for_type_defn(tcx, item, ast_generics);
+ }
+ hir::ItemKind::Union(ref struct_def, ref ast_generics) => {
+ check_type_defn(tcx, item, true, |wfcx| vec![wfcx.non_enum_variant(struct_def)]);
+
+ check_variances_for_type_defn(tcx, item, ast_generics);
+ }
+ hir::ItemKind::Enum(ref enum_def, ref ast_generics) => {
+ check_type_defn(tcx, item, true, |wfcx| wfcx.enum_variants(enum_def));
+
+ check_variances_for_type_defn(tcx, item, ast_generics);
+ }
+ hir::ItemKind::Trait(..) => {
+ check_trait(tcx, item);
+ }
+ hir::ItemKind::TraitAlias(..) => {
+ check_trait(tcx, item);
+ }
+ // `ForeignItem`s are handled separately.
+ hir::ItemKind::ForeignMod { .. } => {}
+ _ => {}
+ }
+}
+
+fn check_foreign_item(tcx: TyCtxt<'_>, item: &hir::ForeignItem<'_>) {
+ let def_id = item.owner_id.def_id;
+
+ debug!(
+ ?item.owner_id,
+ item.name = ? tcx.def_path_str(def_id.to_def_id())
+ );
+
+ match item.kind {
+ hir::ForeignItemKind::Fn(decl, ..) => {
+ check_item_fn(tcx, def_id, item.ident, item.span, decl)
+ }
+ hir::ForeignItemKind::Static(ty, ..) => check_item_type(tcx, def_id, ty.span, true),
+ hir::ForeignItemKind::Type => (),
+ }
+}
+
+fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) {
+ let def_id = trait_item.owner_id.def_id;
+
+ let (method_sig, span) = match trait_item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => (Some(sig), trait_item.span),
+ hir::TraitItemKind::Type(_bounds, Some(ty)) => (None, ty.span),
+ _ => (None, trait_item.span),
+ };
+ check_object_unsafe_self_trait_by_name(tcx, trait_item);
+ check_associated_item(tcx, def_id, span, method_sig);
+
+ let encl_trait_def_id = tcx.local_parent(def_id);
+ let encl_trait = tcx.hir().expect_item(encl_trait_def_id);
+ let encl_trait_def_id = encl_trait.owner_id.to_def_id();
+ let fn_lang_item_name = if Some(encl_trait_def_id) == tcx.lang_items().fn_trait() {
+ Some("fn")
+ } else if Some(encl_trait_def_id) == tcx.lang_items().fn_mut_trait() {
+ Some("fn_mut")
+ } else {
+ None
+ };
+
+ if let (Some(fn_lang_item_name), "call") =
+ (fn_lang_item_name, trait_item.ident.name.to_ident_string().as_str())
+ {
+ // We are looking at the `call` function of the `fn` or `fn_mut` lang item.
+ // Do some rudimentary sanity checking to avoid an ICE later (issue #83471).
+ if let Some(hir::FnSig { decl, span, .. }) = method_sig {
+ if let [self_ty, _] = decl.inputs {
+ if !matches!(self_ty.kind, hir::TyKind::Rptr(_, _)) {
+ tcx.sess
+ .struct_span_err(
+ self_ty.span,
+ &format!(
+ "first argument of `call` in `{fn_lang_item_name}` lang item must be a reference",
+ ),
+ )
+ .emit();
+ }
+ } else {
+ tcx.sess
+ .struct_span_err(
+ *span,
+ &format!(
+ "`call` function in `{fn_lang_item_name}` lang item takes exactly two arguments",
+ ),
+ )
+ .emit();
+ }
+ } else {
+ tcx.sess
+ .struct_span_err(
+ trait_item.span,
+ &format!(
+ "`call` trait item in `{fn_lang_item_name}` lang item must be a function",
+ ),
+ )
+ .emit();
+ }
+ }
+}
+
+/// Require that the user writes where clauses on GATs for the implicit
+/// outlives bounds involving trait parameters in trait functions and
+/// lifetimes passed as GAT substs. See `self-outlives-lint` test.
+///
+/// We use the following trait as an example throughout this function:
+/// ```rust,ignore (this code fails due to this lint)
+/// trait IntoIter {
+/// type Iter<'a>: Iterator<Item = Self::Item<'a>>;
+/// type Item<'a>;
+/// fn into_iter<'a>(&'a self) -> Self::Iter<'a>;
+/// }
+/// ```
+fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRef]) {
+ // Associates every GAT's def_id to a list of possibly missing bounds detected by this lint.
+ let mut required_bounds_by_item = FxHashMap::default();
+
+ // Loop over all GATs together, because if this lint suggests adding a where-clause bound
+ // to one GAT, it might then require us to an additional bound on another GAT.
+ // In our `IntoIter` example, we discover a missing `Self: 'a` bound on `Iter<'a>`, which
+ // then in a second loop adds a `Self: 'a` bound to `Item` due to the relationship between
+ // those GATs.
+ loop {
+ let mut should_continue = false;
+ for gat_item in associated_items {
+ let gat_def_id = gat_item.id.owner_id;
+ let gat_item = tcx.associated_item(gat_def_id);
+ // If this item is not an assoc ty, or has no substs, then it's not a GAT
+ if gat_item.kind != ty::AssocKind::Type {
+ continue;
+ }
+ let gat_generics = tcx.generics_of(gat_def_id);
+ // FIXME(jackh726): we can also warn in the more general case
+ if gat_generics.params.is_empty() {
+ continue;
+ }
+
+ // Gather the bounds with which all other items inside of this trait constrain the GAT.
+ // This is calculated by taking the intersection of the bounds that each item
+ // constrains the GAT with individually.
+ let mut new_required_bounds: Option<FxHashSet<ty::Predicate<'_>>> = None;
+ for item in associated_items {
+ let item_def_id = item.id.owner_id;
+ // Skip our own GAT, since it does not constrain itself at all.
+ if item_def_id == gat_def_id {
+ continue;
+ }
+
+ let item_hir_id = item.id.hir_id();
+ let param_env = tcx.param_env(item_def_id);
+
+ let item_required_bounds = match item.kind {
+ // In our example, this corresponds to `into_iter` method
+ hir::AssocItemKind::Fn { .. } => {
+ // For methods, we check the function signature's return type for any GATs
+ // to constrain. In the `into_iter` case, we see that the return type
+ // `Self::Iter<'a>` is a GAT we want to gather any potential missing bounds from.
+ let sig: ty::FnSig<'_> = tcx.liberate_late_bound_regions(
+ item_def_id.to_def_id(),
+ tcx.fn_sig(item_def_id),
+ );
+ gather_gat_bounds(
+ tcx,
+ param_env,
+ item_hir_id,
+ sig.inputs_and_output,
+ // We also assume that all of the function signature's parameter types
+ // are well formed.
+ &sig.inputs().iter().copied().collect(),
+ gat_def_id.def_id,
+ gat_generics,
+ )
+ }
+ // In our example, this corresponds to the `Iter` and `Item` associated types
+ hir::AssocItemKind::Type => {
+ // If our associated item is a GAT with missing bounds, add them to
+ // the param-env here. This allows this GAT to propagate missing bounds
+ // to other GATs.
+ let param_env = augment_param_env(
+ tcx,
+ param_env,
+ required_bounds_by_item.get(&item_def_id),
+ );
+ gather_gat_bounds(
+ tcx,
+ param_env,
+ item_hir_id,
+ tcx.explicit_item_bounds(item_def_id)
+ .iter()
+ .copied()
+ .collect::<Vec<_>>(),
+ &FxHashSet::default(),
+ gat_def_id.def_id,
+ gat_generics,
+ )
+ }
+ hir::AssocItemKind::Const => None,
+ };
+
+ if let Some(item_required_bounds) = item_required_bounds {
+ // Take the intersection of the required bounds for this GAT, and
+ // the item_required_bounds which are the ones implied by just
+ // this item alone.
+ // This is why we use an Option<_>, since we need to distinguish
+ // the empty set of bounds from the _uninitialized_ set of bounds.
+ if let Some(new_required_bounds) = &mut new_required_bounds {
+ new_required_bounds.retain(|b| item_required_bounds.contains(b));
+ } else {
+ new_required_bounds = Some(item_required_bounds);
+ }
+ }
+ }
+
+ if let Some(new_required_bounds) = new_required_bounds {
+ let required_bounds = required_bounds_by_item.entry(gat_def_id).or_default();
+ if new_required_bounds.into_iter().any(|p| required_bounds.insert(p)) {
+ // Iterate until our required_bounds no longer change
+ // Since they changed here, we should continue the loop
+ should_continue = true;
+ }
+ }
+ }
+ // We know that this loop will eventually halt, since we only set `should_continue` if the
+ // `required_bounds` for this item grows. Since we are not creating any new region or type
+ // variables, the set of all region and type bounds that we could ever insert are limited
+ // by the number of unique types and regions we observe in a given item.
+ if !should_continue {
+ break;
+ }
+ }
+
+ for (gat_def_id, required_bounds) in required_bounds_by_item {
+ let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id.def_id);
+ debug!(?required_bounds);
+ let param_env = tcx.param_env(gat_def_id);
+ let gat_hir = gat_item_hir.hir_id();
+
+ let mut unsatisfied_bounds: Vec<_> = required_bounds
+ .into_iter()
+ .filter(|clause| match clause.kind().skip_binder() {
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => {
+ !region_known_to_outlive(tcx, gat_hir, param_env, &FxHashSet::default(), a, b)
+ }
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(a, b)) => {
+ !ty_known_to_outlive(tcx, gat_hir, param_env, &FxHashSet::default(), a, b)
+ }
+ _ => bug!("Unexpected PredicateKind"),
+ })
+ .map(|clause| clause.to_string())
+ .collect();
+
+ // We sort so that order is predictable
+ unsatisfied_bounds.sort();
+
+ if !unsatisfied_bounds.is_empty() {
+ let plural = pluralize!(unsatisfied_bounds.len());
+ let mut err = tcx.sess.struct_span_err(
+ gat_item_hir.span,
+ &format!("missing required bound{} on `{}`", plural, gat_item_hir.ident),
+ );
+
+ let suggestion = format!(
+ "{} {}",
+ gat_item_hir.generics.add_where_or_trailing_comma(),
+ unsatisfied_bounds.join(", "),
+ );
+ err.span_suggestion(
+ gat_item_hir.generics.tail_span_for_predicate_suggestion(),
+ &format!("add the required where clause{plural}"),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+
+ let bound =
+ if unsatisfied_bounds.len() > 1 { "these bounds are" } else { "this bound is" };
+ err.note(&format!(
+ "{} currently required to ensure that impls have maximum flexibility",
+ bound
+ ));
+ err.note(
+ "we are soliciting feedback, see issue #87479 \
+ <https://github.com/rust-lang/rust/issues/87479> \
+ for more information",
+ );
+
+ err.emit();
+ }
+ }
+}
+
+/// Add a new set of predicates to the caller_bounds of an existing param_env.
+fn augment_param_env<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ new_predicates: Option<&FxHashSet<ty::Predicate<'tcx>>>,
+) -> ty::ParamEnv<'tcx> {
+ let Some(new_predicates) = new_predicates else {
+ return param_env;
+ };
+
+ if new_predicates.is_empty() {
+ return param_env;
+ }
+
+ let bounds =
+ tcx.mk_predicates(param_env.caller_bounds().iter().chain(new_predicates.iter().cloned()));
+ // FIXME(compiler-errors): Perhaps there is a case where we need to normalize this
+ // i.e. traits::normalize_param_env_or_error
+ ty::ParamEnv::new(bounds, param_env.reveal(), param_env.constness())
+}
+
+/// We use the following trait as an example throughout this function.
+/// Specifically, let's assume that `to_check` here is the return type
+/// of `into_iter`, and the GAT we are checking this for is `Iter`.
+/// ```rust,ignore (this code fails due to this lint)
+/// trait IntoIter {
+/// type Iter<'a>: Iterator<Item = Self::Item<'a>>;
+/// type Item<'a>;
+/// fn into_iter<'a>(&'a self) -> Self::Iter<'a>;
+/// }
+/// ```
+fn gather_gat_bounds<'tcx, T: TypeFoldable<'tcx>>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ item_hir: hir::HirId,
+ to_check: T,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ gat_def_id: LocalDefId,
+ gat_generics: &'tcx ty::Generics,
+) -> Option<FxHashSet<ty::Predicate<'tcx>>> {
+ // The bounds we that we would require from `to_check`
+ let mut bounds = FxHashSet::default();
+
+ let (regions, types) = GATSubstCollector::visit(gat_def_id.to_def_id(), to_check);
+
+ // If both regions and types are empty, then this GAT isn't in the
+ // set of types we are checking, and we shouldn't try to do clause analysis
+ // (particularly, doing so would end up with an empty set of clauses,
+ // since the current method would require none, and we take the
+ // intersection of requirements of all methods)
+ if types.is_empty() && regions.is_empty() {
+ return None;
+ }
+
+ for (region_a, region_a_idx) in &regions {
+ // Ignore `'static` lifetimes for the purpose of this lint: it's
+ // because we know it outlives everything and so doesn't give meaningful
+ // clues
+ if let ty::ReStatic = **region_a {
+ continue;
+ }
+ // For each region argument (e.g., `'a` in our example), check for a
+ // relationship to the type arguments (e.g., `Self`). If there is an
+ // outlives relationship (`Self: 'a`), then we want to ensure that is
+ // reflected in a where clause on the GAT itself.
+ for (ty, ty_idx) in &types {
+ // In our example, requires that `Self: 'a`
+ if ty_known_to_outlive(tcx, item_hir, param_env, &wf_tys, *ty, *region_a) {
+ debug!(?ty_idx, ?region_a_idx);
+ debug!("required clause: {ty} must outlive {region_a}");
+ // Translate into the generic parameters of the GAT. In
+ // our example, the type was `Self`, which will also be
+ // `Self` in the GAT.
+ let ty_param = gat_generics.param_at(*ty_idx, tcx);
+ let ty_param = tcx
+ .mk_ty(ty::Param(ty::ParamTy { index: ty_param.index, name: ty_param.name }));
+ // Same for the region. In our example, 'a corresponds
+ // to the 'me parameter.
+ let region_param = gat_generics.param_at(*region_a_idx, tcx);
+ let region_param =
+ tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: region_param.def_id,
+ index: region_param.index,
+ name: region_param.name,
+ }));
+ // The predicate we expect to see. (In our example,
+ // `Self: 'me`.)
+ let clause =
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_param, region_param));
+ let clause = tcx.mk_predicate(ty::Binder::dummy(clause));
+ bounds.insert(clause);
+ }
+ }
+
+ // For each region argument (e.g., `'a` in our example), also check for a
+ // relationship to the other region arguments. If there is an outlives
+ // relationship, then we want to ensure that is reflected in the where clause
+ // on the GAT itself.
+ for (region_b, region_b_idx) in &regions {
+ // Again, skip `'static` because it outlives everything. Also, we trivially
+ // know that a region outlives itself.
+ if ty::ReStatic == **region_b || region_a == region_b {
+ continue;
+ }
+ if region_known_to_outlive(tcx, item_hir, param_env, &wf_tys, *region_a, *region_b) {
+ debug!(?region_a_idx, ?region_b_idx);
+ debug!("required clause: {region_a} must outlive {region_b}");
+ // Translate into the generic parameters of the GAT.
+ let region_a_param = gat_generics.param_at(*region_a_idx, tcx);
+ let region_a_param =
+ tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: region_a_param.def_id,
+ index: region_a_param.index,
+ name: region_a_param.name,
+ }));
+ // Same for the region.
+ let region_b_param = gat_generics.param_at(*region_b_idx, tcx);
+ let region_b_param =
+ tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: region_b_param.def_id,
+ index: region_b_param.index,
+ name: region_b_param.name,
+ }));
+ // The predicate we expect to see.
+ let clause = ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(
+ region_a_param,
+ region_b_param,
+ ));
+ let clause = tcx.mk_predicate(ty::Binder::dummy(clause));
+ bounds.insert(clause);
+ }
+ }
+ }
+
+ Some(bounds)
+}
+
+/// Given a known `param_env` and a set of well formed types, can we prove that
+/// `ty` outlives `region`.
+fn ty_known_to_outlive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ ty: Ty<'tcx>,
+ region: ty::Region<'tcx>,
+) -> bool {
+ resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |infcx, region_bound_pairs| {
+ let origin = infer::RelateParamBound(DUMMY_SP, ty, None);
+ let outlives = &mut TypeOutlives::new(infcx, tcx, region_bound_pairs, None, param_env);
+ outlives.type_must_outlive(origin, ty, region, ConstraintCategory::BoringNoLocation);
+ })
+}
+
+/// Given a known `param_env` and a set of well formed types, can we prove that
+/// `region_a` outlives `region_b`
+fn region_known_to_outlive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ region_a: ty::Region<'tcx>,
+ region_b: ty::Region<'tcx>,
+) -> bool {
+ resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |mut infcx, _| {
+ use rustc_infer::infer::outlives::obligations::TypeOutlivesDelegate;
+ let origin = infer::RelateRegionParamBound(DUMMY_SP);
+ // `region_a: region_b` -> `region_b <= region_a`
+ infcx.push_sub_region_constraint(
+ origin,
+ region_b,
+ region_a,
+ ConstraintCategory::BoringNoLocation,
+ );
+ })
+}
+
+/// Given a known `param_env` and a set of well formed types, set up an
+/// `InferCtxt`, call the passed function (to e.g. set up region constraints
+/// to be tested), then resolve region and return errors
+fn resolve_regions_with_wf_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ add_constraints: impl for<'a> FnOnce(&'a InferCtxt<'tcx>, &'a RegionBoundPairs<'tcx>),
+) -> bool {
+ // Unfortunately, we have to use a new `InferCtxt` each call, because
+ // region constraints get added and solved there and we need to test each
+ // call individually.
+ let infcx = tcx.infer_ctxt().build();
+ let outlives_environment = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(&infcx),
+ infcx.implied_bounds_tys(param_env, id, wf_tys.clone()),
+ );
+ let region_bound_pairs = outlives_environment.region_bound_pairs();
+
+ add_constraints(&infcx, region_bound_pairs);
+
+ infcx.process_registered_region_obligations(
+ outlives_environment.region_bound_pairs(),
+ param_env,
+ );
+ let errors = infcx.resolve_regions(&outlives_environment);
+
+ debug!(?errors, "errors");
+
+ // If we were able to prove that the type outlives the region without
+ // an error, it must be because of the implied or explicit bounds...
+ errors.is_empty()
+}
+
+/// TypeVisitor that looks for uses of GATs like
+/// `<P0 as Trait<P1..Pn>>::GAT<Pn..Pm>` and adds the arguments `P0..Pm` into
+/// the two vectors, `regions` and `types` (depending on their kind). For each
+/// parameter `Pi` also track the index `i`.
+struct GATSubstCollector<'tcx> {
+ gat: DefId,
+ // Which region appears and which parameter index its substituted for
+ regions: FxHashSet<(ty::Region<'tcx>, usize)>,
+ // Which params appears and which parameter index its substituted for
+ types: FxHashSet<(Ty<'tcx>, usize)>,
+}
+
+impl<'tcx> GATSubstCollector<'tcx> {
+ fn visit<T: TypeFoldable<'tcx>>(
+ gat: DefId,
+ t: T,
+ ) -> (FxHashSet<(ty::Region<'tcx>, usize)>, FxHashSet<(Ty<'tcx>, usize)>) {
+ let mut visitor =
+ GATSubstCollector { gat, regions: FxHashSet::default(), types: FxHashSet::default() };
+ t.visit_with(&mut visitor);
+ (visitor.regions, visitor.types)
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for GATSubstCollector<'tcx> {
+ type BreakTy = !;
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ ty::Projection(p) if p.item_def_id == self.gat => {
+ for (idx, subst) in p.substs.iter().enumerate() {
+ match subst.unpack() {
+ GenericArgKind::Lifetime(lt) if !lt.is_late_bound() => {
+ self.regions.insert((lt, idx));
+ }
+ GenericArgKind::Type(t) => {
+ self.types.insert((t, idx));
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+ t.super_visit_with(self)
+ }
+}
+
+fn could_be_self(trait_def_id: LocalDefId, ty: &hir::Ty<'_>) -> bool {
+ match ty.kind {
+ hir::TyKind::TraitObject([trait_ref], ..) => match trait_ref.trait_ref.path.segments {
+ [s] => s.res.opt_def_id() == Some(trait_def_id.to_def_id()),
+ _ => false,
+ },
+ _ => false,
+ }
+}
+
+/// Detect when an object unsafe trait is referring to itself in one of its associated items.
+/// When this is done, suggest using `Self` instead.
+fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem<'_>) {
+ let (trait_name, trait_def_id) =
+ match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(item.hir_id()).def_id) {
+ hir::Node::Item(item) => match item.kind {
+ hir::ItemKind::Trait(..) => (item.ident, item.owner_id),
+ _ => return,
+ },
+ _ => return,
+ };
+ let mut trait_should_be_self = vec![];
+ match &item.kind {
+ hir::TraitItemKind::Const(ty, _) | hir::TraitItemKind::Type(_, Some(ty))
+ if could_be_self(trait_def_id.def_id, ty) =>
+ {
+ trait_should_be_self.push(ty.span)
+ }
+ hir::TraitItemKind::Fn(sig, _) => {
+ for ty in sig.decl.inputs {
+ if could_be_self(trait_def_id.def_id, ty) {
+ trait_should_be_self.push(ty.span);
+ }
+ }
+ match sig.decl.output {
+ hir::FnRetTy::Return(ty) if could_be_self(trait_def_id.def_id, ty) => {
+ trait_should_be_self.push(ty.span);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ if !trait_should_be_self.is_empty() {
+ if tcx.object_safety_violations(trait_def_id).is_empty() {
+ return;
+ }
+ let sugg = trait_should_be_self.iter().map(|span| (*span, "Self".to_string())).collect();
+ tcx.sess
+ .struct_span_err(
+ trait_should_be_self,
+ "associated item referring to unboxed trait object for its own trait",
+ )
+ .span_label(trait_name.span, "in this trait")
+ .multipart_suggestion(
+ "you might have meant to use `Self` to refer to the implementing type",
+ sugg,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+}
+
+fn check_impl_item(tcx: TyCtxt<'_>, impl_item: &hir::ImplItem<'_>) {
+ let (method_sig, span) = match impl_item.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => (Some(sig), impl_item.span),
+ // Constrain binding and overflow error spans to `<Ty>` in `type foo = <Ty>`.
+ hir::ImplItemKind::Type(ty) if ty.span != DUMMY_SP => (None, ty.span),
+ _ => (None, impl_item.span),
+ };
+
+ check_associated_item(tcx, impl_item.owner_id.def_id, span, method_sig);
+}
+
+fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
+ match param.kind {
+ // We currently only check wf of const params here.
+ hir::GenericParamKind::Lifetime { .. } | hir::GenericParamKind::Type { .. } => (),
+
+ // Const parameters are well formed if their type is structural match.
+ hir::GenericParamKind::Const { ty: hir_ty, default: _ } => {
+ let ty = tcx.type_of(tcx.hir().local_def_id(param.hir_id));
+
+ if tcx.features().adt_const_params {
+ if let Some(non_structural_match_ty) =
+ traits::search_for_adt_const_param_violation(param.span, tcx, ty)
+ {
+ // We use the same error code in both branches, because this is really the same
+ // issue: we just special-case the message for type parameters to make it
+ // clearer.
+ match non_structural_match_ty.kind() {
+ ty::Param(_) => {
+ // Const parameters may not have type parameters as their types,
+ // because we cannot be sure that the type parameter derives `PartialEq`
+ // and `Eq` (just implementing them is not enough for `structural_match`).
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "`{ty}` is not guaranteed to `#[derive(PartialEq, Eq)]`, so may not be \
+ used as the type of a const parameter",
+ )
+ .span_label(
+ hir_ty.span,
+ format!("`{ty}` may not derive both `PartialEq` and `Eq`"),
+ )
+ .note(
+ "it is not currently possible to use a type parameter as the type of a \
+ const parameter",
+ )
+ .emit();
+ }
+ ty::Float(_) => {
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "`{ty}` is forbidden as the type of a const generic parameter",
+ )
+ .note("floats do not derive `Eq` or `Ord`, which are required for const parameters")
+ .emit();
+ }
+ ty::FnPtr(_) => {
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "using function pointers as const generic parameters is forbidden",
+ )
+ .emit();
+ }
+ ty::RawPtr(_) => {
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "using raw pointers as const generic parameters is forbidden",
+ )
+ .emit();
+ }
+ _ => {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "`{}` must be annotated with `#[derive(PartialEq, Eq)]` to be used as \
+ the type of a const parameter",
+ non_structural_match_ty,
+ );
+
+ if ty == non_structural_match_ty {
+ diag.span_label(
+ hir_ty.span,
+ format!("`{ty}` doesn't derive both `PartialEq` and `Eq`"),
+ );
+ }
+
+ diag.emit();
+ }
+ }
+ }
+ } else {
+ let err_ty_str;
+ let mut is_ptr = true;
+
+ let err = match ty.kind() {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Error(_) => None,
+ ty::FnPtr(_) => Some("function pointers"),
+ ty::RawPtr(_) => Some("raw pointers"),
+ _ => {
+ is_ptr = false;
+ err_ty_str = format!("`{ty}`");
+ Some(err_ty_str.as_str())
+ }
+ };
+
+ if let Some(unsupported_type) = err {
+ if is_ptr {
+ tcx.sess.span_err(
+ hir_ty.span,
+ &format!(
+ "using {unsupported_type} as const generic parameters is forbidden",
+ ),
+ );
+ } else {
+ let mut err = tcx.sess.struct_span_err(
+ hir_ty.span,
+ &format!(
+ "{unsupported_type} is forbidden as the type of a const generic parameter",
+ ),
+ );
+ err.note("the only supported types are integers, `bool` and `char`");
+ if tcx.sess.is_nightly_build() {
+ err.help(
+ "more complex types are supported with `#![feature(adt_const_params)]`",
+ );
+ }
+ err.emit();
+ }
+ }
+ }
+ }
+ }
+}
+
+#[instrument(level = "debug", skip(tcx, span, sig_if_method))]
+fn check_associated_item(
+ tcx: TyCtxt<'_>,
+ item_id: LocalDefId,
+ span: Span,
+ sig_if_method: Option<&hir::FnSig<'_>>,
+) {
+ let loc = Some(WellFormedLoc::Ty(item_id));
+ enter_wf_checking_ctxt(tcx, span, item_id, |wfcx| {
+ let item = tcx.associated_item(item_id);
+
+ let self_ty = match item.container {
+ ty::TraitContainer => tcx.types.self_param,
+ ty::ImplContainer => tcx.type_of(item.container_id(tcx)),
+ };
+
+ match item.kind {
+ ty::AssocKind::Const => {
+ let ty = tcx.type_of(item.def_id);
+ let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
+ wfcx.register_wf_obligation(span, loc, ty.into());
+ }
+ ty::AssocKind::Fn => {
+ let sig = tcx.fn_sig(item.def_id);
+ let hir_sig = sig_if_method.expect("bad signature for method");
+ check_fn_or_method(
+ wfcx,
+ item.ident(tcx).span,
+ sig,
+ hir_sig.decl,
+ item.def_id.expect_local(),
+ );
+ check_method_receiver(wfcx, hir_sig, item, self_ty);
+ }
+ ty::AssocKind::Type => {
+ if let ty::AssocItemContainer::TraitContainer = item.container {
+ check_associated_type_bounds(wfcx, item, span)
+ }
+ if item.defaultness(tcx).has_value() {
+ let ty = tcx.type_of(item.def_id);
+ let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
+ wfcx.register_wf_obligation(span, loc, ty.into());
+ }
+ }
+ }
+ })
+}
+
+fn item_adt_kind(kind: &ItemKind<'_>) -> Option<AdtKind> {
+ match kind {
+ ItemKind::Struct(..) => Some(AdtKind::Struct),
+ ItemKind::Union(..) => Some(AdtKind::Union),
+ ItemKind::Enum(..) => Some(AdtKind::Enum),
+ _ => None,
+ }
+}
+
+/// In a type definition, we check that to ensure that the types of the fields are well-formed.
+fn check_type_defn<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ item: &hir::Item<'tcx>,
+ all_sized: bool,
+ mut lookup_fields: F,
+) where
+ F: FnMut(&WfCheckingCtxt<'_, 'tcx>) -> Vec<AdtVariant<'tcx>>,
+{
+ let _ = tcx.representability(item.owner_id.def_id);
+
+ enter_wf_checking_ctxt(tcx, item.span, item.owner_id.def_id, |wfcx| {
+ let variants = lookup_fields(wfcx);
+ let packed = tcx.adt_def(item.owner_id).repr().packed();
+
+ for variant in &variants {
+ // All field types must be well-formed.
+ for field in &variant.fields {
+ wfcx.register_wf_obligation(
+ field.span,
+ Some(WellFormedLoc::Ty(field.def_id)),
+ field.ty.into(),
+ )
+ }
+
+ // For DST, or when drop needs to copy things around, all
+ // intermediate types must be sized.
+ let needs_drop_copy = || {
+ packed && {
+ let ty = variant.fields.last().unwrap().ty;
+ let ty = tcx.erase_regions(ty);
+ if ty.needs_infer() {
+ tcx.sess
+ .delay_span_bug(item.span, &format!("inference variables in {:?}", ty));
+ // Just treat unresolved type expression as if it needs drop.
+ true
+ } else {
+ ty.needs_drop(tcx, tcx.param_env(item.owner_id))
+ }
+ }
+ };
+ // All fields (except for possibly the last) should be sized.
+ let all_sized = all_sized || variant.fields.is_empty() || needs_drop_copy();
+ let unsized_len = if all_sized { 0 } else { 1 };
+ for (idx, field) in
+ variant.fields[..variant.fields.len() - unsized_len].iter().enumerate()
+ {
+ let last = idx == variant.fields.len() - 1;
+ wfcx.register_bound(
+ traits::ObligationCause::new(
+ field.span,
+ wfcx.body_id,
+ traits::FieldSized {
+ adt_kind: match item_adt_kind(&item.kind) {
+ Some(i) => i,
+ None => bug!(),
+ },
+ span: field.span,
+ last,
+ },
+ ),
+ wfcx.param_env,
+ field.ty,
+ tcx.require_lang_item(LangItem::Sized, None),
+ );
+ }
+
+ // Explicit `enum` discriminant values must const-evaluate successfully.
+ if let Some(discr_def_id) = variant.explicit_discr {
+ let cause = traits::ObligationCause::new(
+ tcx.def_span(discr_def_id),
+ wfcx.body_id,
+ traits::MiscObligation,
+ );
+ wfcx.register_obligation(traits::Obligation::new(
+ cause,
+ wfcx.param_env,
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(
+ ty::Const::from_anon_const(tcx, discr_def_id),
+ ))
+ .to_predicate(tcx),
+ ));
+ }
+ }
+
+ check_where_clauses(wfcx, item.span, item.owner_id.def_id);
+ });
+}
+
+#[instrument(skip(tcx, item))]
+fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
+ debug!(?item.owner_id);
+
+ let def_id = item.owner_id.def_id;
+ let trait_def = tcx.trait_def(def_id);
+ if trait_def.is_marker
+ || matches!(trait_def.specialization_kind, TraitSpecializationKind::Marker)
+ {
+ for associated_def_id in &*tcx.associated_item_def_ids(def_id) {
+ struct_span_err!(
+ tcx.sess,
+ tcx.def_span(*associated_def_id),
+ E0714,
+ "marker traits cannot have associated items",
+ )
+ .emit();
+ }
+ }
+
+ enter_wf_checking_ctxt(tcx, item.span, def_id, |wfcx| {
+ check_where_clauses(wfcx, item.span, def_id)
+ });
+
+ // Only check traits, don't check trait aliases
+ if let hir::ItemKind::Trait(_, _, _, _, items) = item.kind {
+ check_gat_where_clauses(tcx, items);
+ }
+}
+
+/// Checks all associated type defaults of trait `trait_def_id`.
+///
+/// Assuming the defaults are used, check that all predicates (bounds on the
+/// assoc type and where clauses on the trait) hold.
+fn check_associated_type_bounds(wfcx: &WfCheckingCtxt<'_, '_>, item: &ty::AssocItem, span: Span) {
+ let bounds = wfcx.tcx().explicit_item_bounds(item.def_id);
+
+ debug!("check_associated_type_bounds: bounds={:?}", bounds);
+ let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| {
+ let normalized_bound = wfcx.normalize(span, None, bound);
+ traits::wf::predicate_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_id,
+ normalized_bound,
+ bound_span,
+ )
+ });
+
+ wfcx.register_obligations(wf_obligations);
+}
+
+fn check_item_fn(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ ident: Ident,
+ span: Span,
+ decl: &hir::FnDecl<'_>,
+) {
+ enter_wf_checking_ctxt(tcx, span, def_id, |wfcx| {
+ let sig = tcx.fn_sig(def_id);
+ check_fn_or_method(wfcx, ident.span, sig, decl, def_id);
+ })
+}
+
+fn check_item_type(tcx: TyCtxt<'_>, item_id: LocalDefId, ty_span: Span, allow_foreign_ty: bool) {
+ debug!("check_item_type: {:?}", item_id);
+
+ enter_wf_checking_ctxt(tcx, ty_span, item_id, |wfcx| {
+ let ty = tcx.type_of(item_id);
+ let item_ty = wfcx.normalize(ty_span, Some(WellFormedLoc::Ty(item_id)), ty);
+
+ let mut forbid_unsized = true;
+ if allow_foreign_ty {
+ let tail = tcx.struct_tail_erasing_lifetimes(item_ty, wfcx.param_env);
+ if let ty::Foreign(_) = tail.kind() {
+ forbid_unsized = false;
+ }
+ }
+
+ wfcx.register_wf_obligation(ty_span, Some(WellFormedLoc::Ty(item_id)), item_ty.into());
+ if forbid_unsized {
+ wfcx.register_bound(
+ traits::ObligationCause::new(ty_span, wfcx.body_id, traits::WellFormed(None)),
+ wfcx.param_env,
+ item_ty,
+ tcx.require_lang_item(LangItem::Sized, None),
+ );
+ }
+
+ // Ensure that the end result is `Sync` in a non-thread local `static`.
+ let should_check_for_sync = tcx.static_mutability(item_id.to_def_id())
+ == Some(hir::Mutability::Not)
+ && !tcx.is_foreign_item(item_id.to_def_id())
+ && !tcx.is_thread_local_static(item_id.to_def_id());
+
+ if should_check_for_sync {
+ wfcx.register_bound(
+ traits::ObligationCause::new(ty_span, wfcx.body_id, traits::SharedStatic),
+ wfcx.param_env,
+ item_ty,
+ tcx.require_lang_item(LangItem::Sync, Some(ty_span)),
+ );
+ }
+ });
+}
+
+#[instrument(level = "debug", skip(tcx, ast_self_ty, ast_trait_ref))]
+fn check_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &'tcx hir::Item<'tcx>,
+ ast_self_ty: &hir::Ty<'_>,
+ ast_trait_ref: &Option<hir::TraitRef<'_>>,
+ constness: hir::Constness,
+) {
+ enter_wf_checking_ctxt(tcx, item.span, item.owner_id.def_id, |wfcx| {
+ match *ast_trait_ref {
+ Some(ref ast_trait_ref) => {
+ // `#[rustc_reservation_impl]` impls are not real impls and
+ // therefore don't need to be WF (the trait's `Self: Trait` predicate
+ // won't hold).
+ let trait_ref = tcx.impl_trait_ref(item.owner_id).unwrap();
+ let trait_ref = wfcx.normalize(ast_trait_ref.path.span, None, trait_ref);
+ let trait_pred = ty::TraitPredicate {
+ trait_ref,
+ constness: match constness {
+ hir::Constness::Const => ty::BoundConstness::ConstIfConst,
+ hir::Constness::NotConst => ty::BoundConstness::NotConst,
+ },
+ polarity: ty::ImplPolarity::Positive,
+ };
+ let obligations = traits::wf::trait_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_id,
+ &trait_pred,
+ ast_trait_ref.path.span,
+ item,
+ );
+ debug!(?obligations);
+ wfcx.register_obligations(obligations);
+ }
+ None => {
+ let self_ty = tcx.type_of(item.owner_id);
+ let self_ty = wfcx.normalize(
+ item.span,
+ Some(WellFormedLoc::Ty(item.hir_id().expect_owner().def_id)),
+ self_ty,
+ );
+ wfcx.register_wf_obligation(
+ ast_self_ty.span,
+ Some(WellFormedLoc::Ty(item.hir_id().expect_owner().def_id)),
+ self_ty.into(),
+ );
+ }
+ }
+
+ check_where_clauses(wfcx, item.span, item.owner_id.def_id);
+ });
+}
+
+/// Checks where-clauses and inline bounds that are declared on `def_id`.
+#[instrument(level = "debug", skip(wfcx))]
+fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id: LocalDefId) {
+ let infcx = wfcx.infcx;
+ let tcx = wfcx.tcx();
+
+ let predicates = tcx.bound_predicates_of(def_id.to_def_id());
+ let generics = tcx.generics_of(def_id);
+
+ let is_our_default = |def: &ty::GenericParamDef| match def.kind {
+ GenericParamDefKind::Type { has_default, .. }
+ | GenericParamDefKind::Const { has_default } => {
+ has_default && def.index >= generics.parent_count as u32
+ }
+ GenericParamDefKind::Lifetime => unreachable!(),
+ };
+
+ // Check that concrete defaults are well-formed. See test `type-check-defaults.rs`.
+ // For example, this forbids the declaration:
+ //
+ // struct Foo<T = Vec<[u32]>> { .. }
+ //
+ // Here, the default `Vec<[u32]>` is not WF because `[u32]: Sized` does not hold.
+ for param in &generics.params {
+ match param.kind {
+ GenericParamDefKind::Type { .. } => {
+ if is_our_default(param) {
+ let ty = tcx.type_of(param.def_id);
+ // Ignore dependent defaults -- that is, where the default of one type
+ // parameter includes another (e.g., `<T, U = T>`). In those cases, we can't
+ // be sure if it will error or not as user might always specify the other.
+ if !ty.needs_subst() {
+ wfcx.register_wf_obligation(
+ tcx.def_span(param.def_id),
+ Some(WellFormedLoc::Ty(param.def_id.expect_local())),
+ ty.into(),
+ );
+ }
+ }
+ }
+ GenericParamDefKind::Const { .. } => {
+ if is_our_default(param) {
+ // FIXME(const_generics_defaults): This
+ // is incorrect when dealing with unused substs, for example
+ // for `struct Foo<const N: usize, const M: usize = { 1 - 2 }>`
+ // we should eagerly error.
+ let default_ct = tcx.const_param_default(param.def_id);
+ if !default_ct.needs_subst() {
+ wfcx.register_wf_obligation(
+ tcx.def_span(param.def_id),
+ None,
+ default_ct.into(),
+ );
+ }
+ }
+ }
+ // Doesn't have defaults.
+ GenericParamDefKind::Lifetime => {}
+ }
+ }
+
+ // Check that trait predicates are WF when params are substituted by their defaults.
+ // We don't want to overly constrain the predicates that may be written but we want to
+ // catch cases where a default my never be applied such as `struct Foo<T: Copy = String>`.
+ // Therefore we check if a predicate which contains a single type param
+ // with a concrete default is WF with that default substituted.
+ // For more examples see tests `defaults-well-formedness.rs` and `type-check-defaults.rs`.
+ //
+ // First we build the defaulted substitution.
+ let substs = InternalSubsts::for_item(tcx, def_id.to_def_id(), |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ // All regions are identity.
+ tcx.mk_param_from_def(param)
+ }
+
+ GenericParamDefKind::Type { .. } => {
+ // If the param has a default, ...
+ if is_our_default(param) {
+ let default_ty = tcx.type_of(param.def_id);
+ // ... and it's not a dependent default, ...
+ if !default_ty.needs_subst() {
+ // ... then substitute it with the default.
+ return default_ty.into();
+ }
+ }
+
+ tcx.mk_param_from_def(param)
+ }
+ GenericParamDefKind::Const { .. } => {
+ // If the param has a default, ...
+ if is_our_default(param) {
+ let default_ct = tcx.const_param_default(param.def_id);
+ // ... and it's not a dependent default, ...
+ if !default_ct.needs_subst() {
+ // ... then substitute it with the default.
+ return default_ct.into();
+ }
+ }
+
+ tcx.mk_param_from_def(param)
+ }
+ }
+ });
+
+ // Now we build the substituted predicates.
+ let default_obligations = predicates
+ .0
+ .predicates
+ .iter()
+ .flat_map(|&(pred, sp)| {
+ #[derive(Default)]
+ struct CountParams {
+ params: FxHashSet<u32>,
+ }
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for CountParams {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Param(param) = t.kind() {
+ self.params.insert(param.index);
+ }
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ ControlFlow::BREAK
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Param(param) = c.kind() {
+ self.params.insert(param.index);
+ }
+ c.super_visit_with(self)
+ }
+ }
+ let mut param_count = CountParams::default();
+ let has_region = pred.visit_with(&mut param_count).is_break();
+ let substituted_pred = predicates.rebind(pred).subst(tcx, substs);
+ // Don't check non-defaulted params, dependent defaults (including lifetimes)
+ // or preds with multiple params.
+ if substituted_pred.has_non_region_param() || param_count.params.len() > 1 || has_region
+ {
+ None
+ } else if predicates.0.predicates.iter().any(|&(p, _)| p == substituted_pred) {
+ // Avoid duplication of predicates that contain no parameters, for example.
+ None
+ } else {
+ Some((substituted_pred, sp))
+ }
+ })
+ .map(|(pred, sp)| {
+ // Convert each of those into an obligation. So if you have
+ // something like `struct Foo<T: Copy = String>`, we would
+ // take that predicate `T: Copy`, substitute to `String: Copy`
+ // (actually that happens in the previous `flat_map` call),
+ // and then try to prove it (in this case, we'll fail).
+ //
+ // Note the subtle difference from how we handle `predicates`
+ // below: there, we are not trying to prove those predicates
+ // to be *true* but merely *well-formed*.
+ let pred = wfcx.normalize(sp, None, pred);
+ let cause = traits::ObligationCause::new(
+ sp,
+ wfcx.body_id,
+ traits::ItemObligation(def_id.to_def_id()),
+ );
+ traits::Obligation::new(cause, wfcx.param_env, pred)
+ });
+
+ let predicates = predicates.0.instantiate_identity(tcx);
+
+ let predicates = wfcx.normalize(span, None, predicates);
+
+ debug!(?predicates.predicates);
+ assert_eq!(predicates.predicates.len(), predicates.spans.len());
+ let wf_obligations =
+ iter::zip(&predicates.predicates, &predicates.spans).flat_map(|(&p, &sp)| {
+ traits::wf::predicate_obligations(
+ infcx,
+ wfcx.param_env.without_const(),
+ wfcx.body_id,
+ p,
+ sp,
+ )
+ });
+
+ let obligations: Vec<_> = wf_obligations.chain(default_obligations).collect();
+ wfcx.register_obligations(obligations);
+}
+
+#[instrument(level = "debug", skip(wfcx, span, hir_decl))]
+fn check_fn_or_method<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ span: Span,
+ sig: ty::PolyFnSig<'tcx>,
+ hir_decl: &hir::FnDecl<'_>,
+ def_id: LocalDefId,
+) {
+ let tcx = wfcx.tcx();
+ let sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), sig);
+
+ // Normalize the input and output types one at a time, using a different
+ // `WellFormedLoc` for each. We cannot call `normalize_associated_types`
+ // on the entire `FnSig`, since this would use the same `WellFormedLoc`
+ // for each type, preventing the HIR wf check from generating
+ // a nice error message.
+ let ty::FnSig { mut inputs_and_output, c_variadic, unsafety, abi } = sig;
+ inputs_and_output = tcx.mk_type_list(inputs_and_output.iter().enumerate().map(|(i, ty)| {
+ wfcx.normalize(
+ span,
+ Some(WellFormedLoc::Param {
+ function: def_id,
+ // Note that the `param_idx` of the output type is
+ // one greater than the index of the last input type.
+ param_idx: i.try_into().unwrap(),
+ }),
+ ty,
+ )
+ }));
+ // Manually call `normalize_associated_types_in` on the other types
+ // in `FnSig`. This ensures that if the types of these fields
+ // ever change to include projections, we will start normalizing
+ // them automatically.
+ let sig = ty::FnSig {
+ inputs_and_output,
+ c_variadic: wfcx.normalize(span, None, c_variadic),
+ unsafety: wfcx.normalize(span, None, unsafety),
+ abi: wfcx.normalize(span, None, abi),
+ };
+
+ for (i, (&input_ty, ty)) in iter::zip(sig.inputs(), hir_decl.inputs).enumerate() {
+ wfcx.register_wf_obligation(
+ ty.span,
+ Some(WellFormedLoc::Param { function: def_id, param_idx: i.try_into().unwrap() }),
+ input_ty.into(),
+ );
+ }
+
+ wfcx.register_wf_obligation(
+ hir_decl.output.span(),
+ Some(WellFormedLoc::Param {
+ function: def_id,
+ param_idx: sig.inputs().len().try_into().unwrap(),
+ }),
+ sig.output().into(),
+ );
+
+ check_where_clauses(wfcx, span, def_id);
+
+ check_return_position_impl_trait_in_trait_bounds(
+ tcx,
+ wfcx,
+ def_id,
+ sig.output(),
+ hir_decl.output.span(),
+ );
+}
+
+/// Basically `check_associated_type_bounds`, but separated for now and should be
+/// deduplicated when RPITITs get lowered into real associated items.
+fn check_return_position_impl_trait_in_trait_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ fn_def_id: LocalDefId,
+ fn_output: Ty<'tcx>,
+ span: Span,
+) {
+ if let Some(assoc_item) = tcx.opt_associated_item(fn_def_id.to_def_id())
+ && assoc_item.container == ty::AssocItemContainer::TraitContainer
+ {
+ for arg in fn_output.walk() {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Projection(proj) = ty.kind()
+ && tcx.def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder
+ && tcx.impl_trait_in_trait_parent(proj.item_def_id) == fn_def_id.to_def_id()
+ {
+ let bounds = wfcx.tcx().explicit_item_bounds(proj.item_def_id);
+ let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| {
+ let normalized_bound = wfcx.normalize(span, None, bound);
+ traits::wf::predicate_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_id,
+ normalized_bound,
+ bound_span,
+ )
+ });
+ wfcx.register_obligations(wf_obligations);
+ }
+ }
+ }
+}
+
+const HELP_FOR_SELF_TYPE: &str = "consider changing to `self`, `&self`, `&mut self`, `self: Box<Self>`, \
+ `self: Rc<Self>`, `self: Arc<Self>`, or `self: Pin<P>` (where P is one \
+ of the previous types except `Self`)";
+
+#[instrument(level = "debug", skip(wfcx))]
+fn check_method_receiver<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ fn_sig: &hir::FnSig<'_>,
+ method: &ty::AssocItem,
+ self_ty: Ty<'tcx>,
+) {
+ let tcx = wfcx.tcx();
+
+ if !method.fn_has_self_parameter {
+ return;
+ }
+
+ let span = fn_sig.decl.inputs[0].span;
+
+ let sig = tcx.fn_sig(method.def_id);
+ let sig = tcx.liberate_late_bound_regions(method.def_id, sig);
+ let sig = wfcx.normalize(span, None, sig);
+
+ debug!("check_method_receiver: sig={:?}", sig);
+
+ let self_ty = wfcx.normalize(span, None, self_ty);
+
+ let receiver_ty = sig.inputs()[0];
+ let receiver_ty = wfcx.normalize(span, None, receiver_ty);
+
+ if tcx.features().arbitrary_self_types {
+ if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, true) {
+ // Report error; `arbitrary_self_types` was enabled.
+ e0307(tcx, span, receiver_ty);
+ }
+ } else {
+ if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, false) {
+ if receiver_is_valid(wfcx, span, receiver_ty, self_ty, true) {
+ // Report error; would have worked with `arbitrary_self_types`.
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::arbitrary_self_types,
+ span,
+ &format!(
+ "`{receiver_ty}` cannot be used as the type of `self` without \
+ the `arbitrary_self_types` feature",
+ ),
+ )
+ .help(HELP_FOR_SELF_TYPE)
+ .emit();
+ } else {
+ // Report error; would not have worked with `arbitrary_self_types`.
+ e0307(tcx, span, receiver_ty);
+ }
+ }
+ }
+}
+
+fn e0307<'tcx>(tcx: TyCtxt<'tcx>, span: Span, receiver_ty: Ty<'_>) {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ span,
+ E0307,
+ "invalid `self` parameter type: {receiver_ty}"
+ )
+ .note("type of `self` must be `Self` or a type that dereferences to it")
+ .help(HELP_FOR_SELF_TYPE)
+ .emit();
+}
+
+/// Returns whether `receiver_ty` would be considered a valid receiver type for `self_ty`. If
+/// `arbitrary_self_types` is enabled, `receiver_ty` must transitively deref to `self_ty`, possibly
+/// through a `*const/mut T` raw pointer. If the feature is not enabled, the requirements are more
+/// strict: `receiver_ty` must implement `Receiver` and directly implement
+/// `Deref<Target = self_ty>`.
+///
+/// N.B., there are cases this function returns `true` but causes an error to be emitted,
+/// particularly when `receiver_ty` derefs to a type that is the same as `self_ty` but has the
+/// wrong lifetime. Be careful of this if you are calling this function speculatively.
+fn receiver_is_valid<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ span: Span,
+ receiver_ty: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ arbitrary_self_types_enabled: bool,
+) -> bool {
+ let infcx = wfcx.infcx;
+ let tcx = wfcx.tcx();
+ let cause =
+ ObligationCause::new(span, wfcx.body_id, traits::ObligationCauseCode::MethodReceiver);
+
+ let can_eq_self = |ty| infcx.can_eq(wfcx.param_env, self_ty, ty).is_ok();
+
+ // `self: Self` is always valid.
+ if can_eq_self(receiver_ty) {
+ if let Err(err) = wfcx.equate_types(&cause, wfcx.param_env, self_ty, receiver_ty) {
+ infcx.err_ctxt().report_mismatched_types(&cause, self_ty, receiver_ty, err).emit();
+ }
+ return true;
+ }
+
+ let mut autoderef =
+ Autoderef::new(infcx, wfcx.param_env, wfcx.body_id, span, receiver_ty, span);
+
+ // The `arbitrary_self_types` feature allows raw pointer receivers like `self: *const Self`.
+ if arbitrary_self_types_enabled {
+ autoderef = autoderef.include_raw_pointers();
+ }
+
+ // The first type is `receiver_ty`, which we know its not equal to `self_ty`; skip it.
+ autoderef.next();
+
+ let receiver_trait_def_id = tcx.require_lang_item(LangItem::Receiver, None);
+
+ // Keep dereferencing `receiver_ty` until we get to `self_ty`.
+ loop {
+ if let Some((potential_self_ty, _)) = autoderef.next() {
+ debug!(
+ "receiver_is_valid: potential self type `{:?}` to match `{:?}`",
+ potential_self_ty, self_ty
+ );
+
+ if can_eq_self(potential_self_ty) {
+ wfcx.register_obligations(autoderef.into_obligations());
+
+ if let Err(err) =
+ wfcx.equate_types(&cause, wfcx.param_env, self_ty, potential_self_ty)
+ {
+ infcx
+ .err_ctxt()
+ .report_mismatched_types(&cause, self_ty, potential_self_ty, err)
+ .emit();
+ }
+
+ break;
+ } else {
+ // Without `feature(arbitrary_self_types)`, we require that each step in the
+ // deref chain implement `receiver`
+ if !arbitrary_self_types_enabled
+ && !receiver_is_implemented(
+ wfcx,
+ receiver_trait_def_id,
+ cause.clone(),
+ potential_self_ty,
+ )
+ {
+ return false;
+ }
+ }
+ } else {
+ debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`", receiver_ty, self_ty);
+ // If the receiver already has errors reported due to it, consider it valid to avoid
+ // unnecessary errors (#58712).
+ return receiver_ty.references_error();
+ }
+ }
+
+ // Without `feature(arbitrary_self_types)`, we require that `receiver_ty` implements `Receiver`.
+ if !arbitrary_self_types_enabled
+ && !receiver_is_implemented(wfcx, receiver_trait_def_id, cause.clone(), receiver_ty)
+ {
+ return false;
+ }
+
+ true
+}
+
+fn receiver_is_implemented<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ receiver_trait_def_id: DefId,
+ cause: ObligationCause<'tcx>,
+ receiver_ty: Ty<'tcx>,
+) -> bool {
+ let tcx = wfcx.tcx();
+ let trait_ref = ty::Binder::dummy(ty::TraitRef {
+ def_id: receiver_trait_def_id,
+ substs: tcx.mk_substs_trait(receiver_ty, &[]),
+ });
+
+ let obligation =
+ traits::Obligation::new(cause, wfcx.param_env, trait_ref.without_const().to_predicate(tcx));
+
+ if wfcx.infcx.predicate_must_hold_modulo_regions(&obligation) {
+ true
+ } else {
+ debug!(
+ "receiver_is_implemented: type `{:?}` does not implement `Receiver` trait",
+ receiver_ty
+ );
+ false
+ }
+}
+
+fn check_variances_for_type_defn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &hir::Item<'tcx>,
+ hir_generics: &hir::Generics<'_>,
+) {
+ let ty = tcx.type_of(item.owner_id);
+ if tcx.has_error_field(ty) {
+ return;
+ }
+
+ let ty_predicates = tcx.predicates_of(item.owner_id);
+ assert_eq!(ty_predicates.parent, None);
+ let variances = tcx.variances_of(item.owner_id);
+
+ let mut constrained_parameters: FxHashSet<_> = variances
+ .iter()
+ .enumerate()
+ .filter(|&(_, &variance)| variance != ty::Bivariant)
+ .map(|(index, _)| Parameter(index as u32))
+ .collect();
+
+ identify_constrained_generic_params(tcx, ty_predicates, None, &mut constrained_parameters);
+
+ // Lazily calculated because it is only needed in case of an error.
+ let explicitly_bounded_params = LazyCell::new(|| {
+ let icx = crate::collect::ItemCtxt::new(tcx, item.owner_id.to_def_id());
+ hir_generics
+ .predicates
+ .iter()
+ .filter_map(|predicate| match predicate {
+ hir::WherePredicate::BoundPredicate(predicate) => {
+ match icx.to_ty(predicate.bounded_ty).kind() {
+ ty::Param(data) => Some(Parameter(data.index)),
+ _ => None,
+ }
+ }
+ _ => None,
+ })
+ .collect::<FxHashSet<_>>()
+ });
+
+ for (index, _) in variances.iter().enumerate() {
+ let parameter = Parameter(index as u32);
+
+ if constrained_parameters.contains(&parameter) {
+ continue;
+ }
+
+ let param = &hir_generics.params[index];
+
+ match param.name {
+ hir::ParamName::Error => {}
+ _ => {
+ let has_explicit_bounds = explicitly_bounded_params.contains(&parameter);
+ report_bivariance(tcx, param, has_explicit_bounds);
+ }
+ }
+ }
+}
+
+fn report_bivariance(
+ tcx: TyCtxt<'_>,
+ param: &rustc_hir::GenericParam<'_>,
+ has_explicit_bounds: bool,
+) -> ErrorGuaranteed {
+ let span = param.span;
+ let param_name = param.name.ident().name;
+ let mut err = error_392(tcx, span, param_name);
+
+ let suggested_marker_id = tcx.lang_items().phantom_data();
+ // Help is available only in presence of lang items.
+ let msg = if let Some(def_id) = suggested_marker_id {
+ format!(
+ "consider removing `{}`, referring to it in a field, or using a marker such as `{}`",
+ param_name,
+ tcx.def_path_str(def_id),
+ )
+ } else {
+ format!("consider removing `{param_name}` or referring to it in a field")
+ };
+ err.help(&msg);
+
+ if matches!(param.kind, hir::GenericParamKind::Type { .. }) && !has_explicit_bounds {
+ err.help(&format!(
+ "if you intended `{0}` to be a const parameter, use `const {0}: usize` instead",
+ param_name
+ ));
+ }
+ err.emit()
+}
+
+impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
+ /// Feature gates RFC 2056 -- trivial bounds, checking for global bounds that
+ /// aren't true.
+ #[instrument(level = "debug", skip(self))]
+ fn check_false_global_bounds(&mut self) {
+ let tcx = self.ocx.infcx.tcx;
+ let mut span = self.span;
+ let empty_env = ty::ParamEnv::empty();
+
+ let def_id = tcx.hir().local_def_id(self.body_id);
+ let predicates_with_span = tcx.predicates_of(def_id).predicates.iter().copied();
+ // Check elaborated bounds.
+ let implied_obligations = traits::elaborate_predicates_with_span(tcx, predicates_with_span);
+
+ for obligation in implied_obligations {
+ // We lower empty bounds like `Vec<dyn Copy>:` as
+ // `WellFormed(Vec<dyn Copy>)`, which will later get checked by
+ // regular WF checking
+ if let ty::PredicateKind::WellFormed(..) = obligation.predicate.kind().skip_binder() {
+ continue;
+ }
+ let pred = obligation.predicate;
+ // Match the existing behavior.
+ if pred.is_global() && !pred.has_late_bound_regions() {
+ let pred = self.normalize(span, None, pred);
+ let hir_node = tcx.hir().find(self.body_id);
+
+ // only use the span of the predicate clause (#90869)
+
+ if let Some(hir::Generics { predicates, .. }) =
+ hir_node.and_then(|node| node.generics())
+ {
+ let obligation_span = obligation.cause.span();
+
+ span = predicates
+ .iter()
+ // There seems to be no better way to find out which predicate we are in
+ .find(|pred| pred.span().contains(obligation_span))
+ .map(|pred| pred.span())
+ .unwrap_or(obligation_span);
+ }
+
+ let obligation = traits::Obligation::new(
+ traits::ObligationCause::new(span, self.body_id, traits::TrivialBound),
+ empty_env,
+ pred,
+ );
+ self.ocx.register_obligation(obligation);
+ }
+ }
+ }
+}
+
+fn check_mod_type_wf(tcx: TyCtxt<'_>, module: LocalDefId) {
+ let items = tcx.hir_module_items(module);
+ items.par_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+ items.par_impl_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+ items.par_trait_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+ items.par_foreign_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+}
+
+///////////////////////////////////////////////////////////////////////////
+// ADT
+
+// FIXME(eddyb) replace this with getting fields/discriminants through `ty::AdtDef`.
+struct AdtVariant<'tcx> {
+ /// Types of fields in the variant, that must be well-formed.
+ fields: Vec<AdtField<'tcx>>,
+
+ /// Explicit discriminant of this variant (e.g. `A = 123`),
+ /// that must evaluate to a constant value.
+ explicit_discr: Option<LocalDefId>,
+}
+
+struct AdtField<'tcx> {
+ ty: Ty<'tcx>,
+ def_id: LocalDefId,
+ span: Span,
+}
+
+impl<'a, 'tcx> WfCheckingCtxt<'a, 'tcx> {
+ // FIXME(eddyb) replace this with getting fields through `ty::AdtDef`.
+ fn non_enum_variant(&self, struct_def: &hir::VariantData<'_>) -> AdtVariant<'tcx> {
+ let fields = struct_def
+ .fields()
+ .iter()
+ .map(|field| {
+ let def_id = self.tcx().hir().local_def_id(field.hir_id);
+ let field_ty = self.tcx().type_of(def_id);
+ let field_ty = self.normalize(field.ty.span, None, field_ty);
+ debug!("non_enum_variant: type of field {:?} is {:?}", field, field_ty);
+ AdtField { ty: field_ty, span: field.ty.span, def_id }
+ })
+ .collect();
+ AdtVariant { fields, explicit_discr: None }
+ }
+
+ fn enum_variants(&self, enum_def: &hir::EnumDef<'_>) -> Vec<AdtVariant<'tcx>> {
+ enum_def
+ .variants
+ .iter()
+ .map(|variant| AdtVariant {
+ fields: self.non_enum_variant(&variant.data).fields,
+ explicit_discr: variant
+ .disr_expr
+ .map(|explicit_discr| self.tcx().hir().local_def_id(explicit_discr.hir_id)),
+ })
+ .collect()
+ }
+}
+
+fn error_392(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ param_name: Symbol,
+) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut err = struct_span_err!(tcx.sess, span, E0392, "parameter `{param_name}` is never used");
+ err.span_label(span, "unused parameter");
+ err
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_type_wf, check_well_formed, ..*providers };
+}