summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/transform
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_const_eval/src/transform
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval/src/transform')
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs1032
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs132
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs771
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs123
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs384
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs384
-rw-r--r--compiler/rustc_const_eval/src/transform/mod.rs3
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs1066
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs913
9 files changed, 4808 insertions, 0 deletions
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
new file mode 100644
index 000000000..0adb88a18
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -0,0 +1,1032 @@
+//! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations.
+
+use rustc_errors::{Diagnostic, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{Binder, TraitPredicate, TraitRef, TypeVisitable};
+use rustc_mir_dataflow::{self, Analysis};
+use rustc_span::{sym, Span, Symbol};
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::SelectionContext;
+
+use std::mem;
+use std::ops::Deref;
+
+use super::ops::{self, NonConstOp, Status};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::resolver::FlowSensitiveAnalysis;
+use super::{ConstCx, Qualif};
+use crate::const_eval::is_unstable_const_fn;
+use crate::errors::UnstableInStable;
+
+type QualifResults<'mir, 'tcx, Q> =
+ rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
+
+#[derive(Default)]
+pub struct Qualifs<'mir, 'tcx> {
+ has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
+ needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
+ needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+}
+
+impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
+ /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary
+ pub fn needs_drop(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ // Peeking into opaque types causes cycles if the current function declares said opaque
+ // type. Thus we avoid short circuiting on the type and instead run the more expensive
+ // analysis that looks at the actual usage within this function
+ if !ty.has_opaque_types() && !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let needs_drop = self.needs_drop.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(NeedsDrop, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ needs_drop.seek_before_primary_effect(location);
+ needs_drop.get().contains(local)
+ }
+
+ /// Returns `true` if `local` is `NeedsNonConstDrop` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary
+ pub fn needs_non_const_drop(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ if !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ needs_non_const_drop.seek_before_primary_effect(location);
+ needs_non_const_drop.get().contains(local)
+ }
+
+ /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary.
+ pub fn has_mut_interior(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ // Peeking into opaque types causes cycles if the current function declares said opaque
+ // type. Thus we avoid short circuiting on the type and instead run the more expensive
+ // analysis that looks at the actual usage within this function
+ if !ty.has_opaque_types() && !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(HasMutInterior, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ has_mut_interior.seek_before_primary_effect(location);
+ has_mut_interior.get().contains(local)
+ }
+
+ fn in_return_place(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+ ) -> ConstQualifs {
+ // Find the `Return` terminator if one exists.
+ //
+ // If no `Return` terminator exists, this MIR is divergent. Just return the conservative
+ // qualifs for the return type.
+ let return_block = ccx
+ .body
+ .basic_blocks()
+ .iter_enumerated()
+ .find(|(_, block)| matches!(block.terminator().kind, TerminatorKind::Return))
+ .map(|(bb, _)| bb);
+
+ let Some(return_block) = return_block else {
+ return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty(), tainted_by_errors);
+ };
+
+ let return_loc = ccx.body.terminator_loc(return_block);
+
+ let custom_eq = match ccx.const_kind() {
+ // We don't care whether a `const fn` returns a value that is not structurally
+ // matchable. Functions calls are opaque and always use type-based qualification, so
+ // this value should never be used.
+ hir::ConstContext::ConstFn => true,
+
+ // If we know that all values of the return type are structurally matchable, there's no
+ // need to run dataflow.
+ // Opaque types do not participate in const generics or pattern matching, so we can safely count them out.
+ _ if ccx.body.return_ty().has_opaque_types()
+ || !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) =>
+ {
+ false
+ }
+
+ hir::ConstContext::Const | hir::ConstContext::Static(_) => {
+ let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
+ .into_engine(ccx.tcx, &ccx.body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&ccx.body);
+
+ cursor.seek_after_primary_effect(return_loc);
+ cursor.get().contains(RETURN_PLACE)
+ }
+ };
+
+ ConstQualifs {
+ needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc),
+ needs_non_const_drop: self.needs_non_const_drop(ccx, RETURN_PLACE, return_loc),
+ has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc),
+ custom_eq,
+ tainted_by_errors,
+ }
+ }
+}
+
+pub struct Checker<'mir, 'tcx> {
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ qualifs: Qualifs<'mir, 'tcx>,
+
+ /// The span of the current statement.
+ span: Span,
+
+ /// A set that stores for each local whether it has a `StorageDead` for it somewhere.
+ local_has_storage_dead: Option<BitSet<Local>>,
+
+ error_emitted: Option<ErrorGuaranteed>,
+ secondary_errors: Vec<Diagnostic>,
+}
+
+impl<'mir, 'tcx> Deref for Checker<'mir, 'tcx> {
+ type Target = ConstCx<'mir, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+impl<'mir, 'tcx> Checker<'mir, 'tcx> {
+ pub fn new(ccx: &'mir ConstCx<'mir, 'tcx>) -> Self {
+ Checker {
+ span: ccx.body.span,
+ ccx,
+ qualifs: Default::default(),
+ local_has_storage_dead: None,
+ error_emitted: None,
+ secondary_errors: Vec::new(),
+ }
+ }
+
+ pub fn check_body(&mut self) {
+ let ConstCx { tcx, body, .. } = *self.ccx;
+ let def_id = self.ccx.def_id();
+
+ // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
+ // no need to emit duplicate errors here.
+ if self.ccx.is_async() || body.generator.is_some() {
+ tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`");
+ return;
+ }
+
+ // The local type and predicate checks are not free and only relevant for `const fn`s.
+ if self.const_kind() == hir::ConstContext::ConstFn {
+ for (idx, local) in body.local_decls.iter_enumerated() {
+ // Handle the return place below.
+ if idx == RETURN_PLACE || local.internal {
+ continue;
+ }
+
+ self.span = local.source_info.span;
+ self.check_local_or_return_ty(local.ty, idx);
+ }
+
+ // impl trait is gone in MIR, so check the return type of a const fn by its signature
+ // instead of the type of the return place.
+ self.span = body.local_decls[RETURN_PLACE].source_info.span;
+ let return_ty = tcx.fn_sig(def_id).output();
+ self.check_local_or_return_ty(return_ty.skip_binder(), RETURN_PLACE);
+ }
+
+ if !tcx.has_attr(def_id.to_def_id(), sym::rustc_do_not_const_check) {
+ self.visit_body(&body);
+ }
+
+ // If we got through const-checking without emitting any "primary" errors, emit any
+ // "secondary" errors if they occurred.
+ let secondary_errors = mem::take(&mut self.secondary_errors);
+ if self.error_emitted.is_none() {
+ for mut error in secondary_errors {
+ self.tcx.sess.diagnostic().emit_diagnostic(&mut error);
+ }
+ } else {
+ assert!(self.tcx.sess.has_errors().is_some());
+ }
+ }
+
+ fn local_has_storage_dead(&mut self, local: Local) -> bool {
+ let ccx = self.ccx;
+ self.local_has_storage_dead
+ .get_or_insert_with(|| {
+ struct StorageDeads {
+ locals: BitSet<Local>,
+ }
+ impl<'tcx> Visitor<'tcx> for StorageDeads {
+ fn visit_statement(&mut self, stmt: &Statement<'tcx>, _: Location) {
+ if let StatementKind::StorageDead(l) = stmt.kind {
+ self.locals.insert(l);
+ }
+ }
+ }
+ let mut v = StorageDeads { locals: BitSet::new_empty(ccx.body.local_decls.len()) };
+ v.visit_body(ccx.body);
+ v.locals
+ })
+ .contains(local)
+ }
+
+ pub fn qualifs_in_return_place(&mut self) -> ConstQualifs {
+ self.qualifs.in_return_place(self.ccx, self.error_emitted)
+ }
+
+ /// Emits an error if an expression cannot be evaluated in the current context.
+ pub fn check_op(&mut self, op: impl NonConstOp<'tcx>) {
+ self.check_op_spanned(op, self.span);
+ }
+
+ /// Emits an error at the given `span` if an expression cannot be evaluated in the current
+ /// context.
+ pub fn check_op_spanned<O: NonConstOp<'tcx>>(&mut self, op: O, span: Span) {
+ let gate = match op.status_in_item(self.ccx) {
+ Status::Allowed => return,
+
+ Status::Unstable(gate) if self.tcx.features().enabled(gate) => {
+ let unstable_in_stable = self.ccx.is_const_stable_const_fn()
+ && !super::rustc_allow_const_fn_unstable(self.tcx, self.def_id(), gate);
+ if unstable_in_stable {
+ emit_unstable_in_stable_error(self.ccx, span, gate);
+ }
+
+ return;
+ }
+
+ Status::Unstable(gate) => Some(gate),
+ Status::Forbidden => None,
+ };
+
+ if self.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
+ self.tcx.sess.miri_unleashed_feature(span, gate);
+ return;
+ }
+
+ let mut err = op.build_error(self.ccx, span);
+ assert!(err.is_error());
+
+ match op.importance() {
+ ops::DiagnosticImportance::Primary => {
+ let reported = err.emit();
+ self.error_emitted = Some(reported);
+ }
+
+ ops::DiagnosticImportance::Secondary => err.buffer(&mut self.secondary_errors),
+ }
+ }
+
+ fn check_static(&mut self, def_id: DefId, span: Span) {
+ if self.tcx.is_thread_local_static(def_id) {
+ self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+ }
+ self.check_op_spanned(ops::StaticAccess, span)
+ }
+
+ fn check_local_or_return_ty(&mut self, ty: Ty<'tcx>, local: Local) {
+ let kind = self.body.local_kind(local);
+
+ for ty in ty.walk() {
+ let ty = match ty.unpack() {
+ GenericArgKind::Type(ty) => ty,
+
+ // No constraints on lifetimes or constants, except potentially
+ // constants' types, but `walk` will get to them as well.
+ GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
+ };
+
+ match *ty.kind() {
+ ty::Ref(_, _, hir::Mutability::Mut) => self.check_op(ops::ty::MutRef(kind)),
+ _ => {}
+ }
+ }
+ }
+
+ fn check_mut_borrow(&mut self, local: Local, kind: hir::BorrowKind) {
+ match self.const_kind() {
+ // In a const fn all borrows are transient or point to the places given via
+ // references in the arguments (so we already checked them with
+ // TransientMutBorrow/MutBorrow as appropriate).
+ // The borrow checker guarantees that no new non-transient borrows are created.
+ // NOTE: Once we have heap allocations during CTFE we need to figure out
+ // how to prevent `const fn` to create long-lived allocations that point
+ // to mutable memory.
+ hir::ConstContext::ConstFn => self.check_op(ops::TransientMutBorrow(kind)),
+ _ => {
+ // Locals with StorageDead do not live beyond the evaluation and can
+ // thus safely be borrowed without being able to be leaked to the final
+ // value of the constant.
+ if self.local_has_storage_dead(local) {
+ self.check_op(ops::TransientMutBorrow(kind));
+ } else {
+ self.check_op(ops::MutBorrow(kind));
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
+ trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+ // We don't const-check basic blocks on the cleanup path since we never unwind during
+ // const-eval: a panic causes an immediate compile error. In other words, cleanup blocks
+ // are unreachable during const-eval.
+ //
+ // We can't be more conservative (e.g., by const-checking cleanup blocks anyways) because
+ // locals that would never be dropped during normal execution are sometimes dropped during
+ // unwinding, which means backwards-incompatible live-drop errors.
+ if block.is_cleanup {
+ return;
+ }
+
+ self.super_basic_block_data(bb, block);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ trace!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location);
+
+ // Special-case reborrows to be more like a copy of a reference.
+ match *rvalue {
+ Rvalue::Ref(_, kind, place) => {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+ let ctx = match kind {
+ BorrowKind::Shared => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+ }
+ BorrowKind::Shallow => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ }
+ BorrowKind::Unique => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+ }
+ BorrowKind::Mut { .. } => {
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ }
+ };
+ self.visit_local(reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
+ return;
+ }
+ }
+ Rvalue::AddressOf(mutbl, place) => {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+ let ctx = match mutbl {
+ Mutability::Not => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+ }
+ Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
+ };
+ self.visit_local(reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ self.super_rvalue(rvalue, location);
+
+ match *rvalue {
+ Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
+
+ Rvalue::Use(_)
+ | Rvalue::CopyForDeref(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::Len(_)
+ | Rvalue::Aggregate(..) => {}
+
+ Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
+ | Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
+ let ty = place.ty(self.body, self.tcx).ty;
+ let is_allowed = match ty.kind() {
+ // Inside a `static mut`, `&mut [...]` is allowed.
+ ty::Array(..) | ty::Slice(_)
+ if self.const_kind() == hir::ConstContext::Static(hir::Mutability::Mut) =>
+ {
+ true
+ }
+
+ // FIXME(ecstaticmorse): We could allow `&mut []` inside a const context given
+ // that this is merely a ZST and it is already eligible for promotion.
+ // This may require an RFC?
+ /*
+ ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+ => true,
+ */
+ _ => false,
+ };
+
+ if !is_allowed {
+ if let BorrowKind::Mut { .. } = kind {
+ self.check_mut_borrow(place.local, hir::BorrowKind::Ref)
+ } else {
+ self.check_op(ops::CellBorrow);
+ }
+ }
+ }
+
+ Rvalue::AddressOf(Mutability::Mut, ref place) => {
+ self.check_mut_borrow(place.local, hir::BorrowKind::Raw)
+ }
+
+ Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
+ | Rvalue::AddressOf(Mutability::Not, ref place) => {
+ let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
+ &self.ccx,
+ &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
+ place.as_ref(),
+ );
+
+ if borrowed_place_has_mut_interior {
+ match self.const_kind() {
+ // In a const fn all borrows are transient or point to the places given via
+ // references in the arguments (so we already checked them with
+ // TransientCellBorrow/CellBorrow as appropriate).
+ // The borrow checker guarantees that no new non-transient borrows are created.
+ // NOTE: Once we have heap allocations during CTFE we need to figure out
+ // how to prevent `const fn` to create long-lived allocations that point
+ // to (interior) mutable memory.
+ hir::ConstContext::ConstFn => self.check_op(ops::TransientCellBorrow),
+ _ => {
+ // Locals with StorageDead are definitely not part of the final constant value, and
+ // it is thus inherently safe to permit such locals to have their
+ // address taken as we can't end up with a reference to them in the
+ // final value.
+ // Note: This is only sound if every local that has a `StorageDead` has a
+ // `StorageDead` in every control flow path leading to a `return` terminator.
+ if self.local_has_storage_dead(place.local) {
+ self.check_op(ops::TransientCellBorrow);
+ } else {
+ self.check_op(ops::CellBorrow);
+ }
+ }
+ }
+ }
+ }
+
+ Rvalue::Cast(
+ CastKind::Pointer(
+ PointerCast::MutToConstPointer
+ | PointerCast::ArrayToPointer
+ | PointerCast::UnsafeFnPointer
+ | PointerCast::ClosureFnPointer(_)
+ | PointerCast::ReifyFnPointer,
+ ),
+ _,
+ _,
+ ) => {
+ // These are all okay; they only change the type, not the data.
+ }
+
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), _, _) => {
+ // Unsizing is implemented for CTFE.
+ }
+
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
+ self.check_op(ops::RawPtrToIntCast);
+ }
+ Rvalue::Cast(CastKind::PointerFromExposedAddress, _, _) => {
+ // Since no pointer can ever get exposed (rejected above), this is easy to support.
+ }
+
+ Rvalue::Cast(CastKind::Misc, _, _) => {}
+
+ Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {}
+ Rvalue::ShallowInitBox(_, _) => {}
+
+ Rvalue::UnaryOp(_, ref operand) => {
+ let ty = operand.ty(self.body, self.tcx);
+ if is_int_bool_or_char(ty) {
+ // Int, bool, and char operations are fine.
+ } else if ty.is_floating_point() {
+ self.check_op(ops::FloatingPointOp);
+ } else {
+ span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty);
+ }
+ }
+
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs))
+ | Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(self.body, self.tcx);
+ let rhs_ty = rhs.ty(self.body, self.tcx);
+
+ if is_int_bool_or_char(lhs_ty) && is_int_bool_or_char(rhs_ty) {
+ // Int, bool, and char operations are fine.
+ } else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
+ assert_eq!(lhs_ty, rhs_ty);
+ assert!(
+ op == BinOp::Eq
+ || op == BinOp::Ne
+ || op == BinOp::Le
+ || op == BinOp::Lt
+ || op == BinOp::Ge
+ || op == BinOp::Gt
+ || op == BinOp::Offset
+ );
+
+ self.check_op(ops::RawPtrComparison);
+ } else if lhs_ty.is_floating_point() || rhs_ty.is_floating_point() {
+ self.check_op(ops::FloatingPointOp);
+ } else {
+ span_bug!(
+ self.span,
+ "non-primitive type in `Rvalue::BinaryOp`: {:?} ⚬ {:?}",
+ lhs_ty,
+ rhs_ty
+ );
+ }
+ }
+ }
+ }
+
+ fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+ self.super_operand(op, location);
+ if let Operand::Constant(c) = op {
+ if let Some(def_id) = c.check_static_ptr(self.tcx) {
+ self.check_static(def_id, self.span);
+ }
+ }
+ }
+ fn visit_projection_elem(
+ &mut self,
+ place_local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ trace!(
+ "visit_projection_elem: place_local={:?} proj_base={:?} elem={:?} \
+ context={:?} location={:?}",
+ place_local,
+ proj_base,
+ elem,
+ context,
+ location,
+ );
+
+ self.super_projection_elem(place_local, proj_base, elem, context, location);
+
+ match elem {
+ ProjectionElem::Deref => {
+ let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
+ if base_ty.is_unsafe_ptr() {
+ if proj_base.is_empty() {
+ let decl = &self.body.local_decls[place_local];
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+ let span = decl.source_info.span;
+ self.check_static(def_id, span);
+ return;
+ }
+ }
+
+ // `*const T` is stable, `*mut T` is not
+ if !base_ty.is_mutable_ptr() {
+ return;
+ }
+
+ self.check_op(ops::RawMutPtrDeref);
+ }
+
+ if context.is_mutating_use() {
+ self.check_op(ops::MutDeref);
+ }
+ }
+
+ ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Downcast(..)
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Field(..)
+ | ProjectionElem::Index(_) => {}
+ }
+ }
+
+ fn visit_source_info(&mut self, source_info: &SourceInfo) {
+ trace!("visit_source_info: source_info={:?}", source_info);
+ self.span = source_info.span;
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ trace!("visit_statement: statement={:?} location={:?}", statement, location);
+
+ self.super_statement(statement, location);
+
+ match statement.kind {
+ StatementKind::Assign(..)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ self.super_terminator(terminator, location);
+
+ match &terminator.kind {
+ TerminatorKind::Call { func, args, fn_span, from_hir_call, .. } => {
+ let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+ let caller = self.def_id();
+
+ let fn_ty = func.ty(body, tcx);
+
+ let (mut callee, mut substs) = match *fn_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+
+ ty::FnPtr(_) => {
+ self.check_op(ops::FnCallIndirect);
+ return;
+ }
+ _ => {
+ span_bug!(terminator.source_info.span, "invalid callee of type {:?}", fn_ty)
+ }
+ };
+
+ // Attempting to call a trait method?
+ if let Some(trait_id) = tcx.trait_of_item(callee) {
+ trace!("attempting to call a trait method");
+ if !self.tcx.features().const_trait_impl {
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+
+ let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+ let poly_trait_pred = Binder::dummy(TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ });
+ let obligation =
+ Obligation::new(ObligationCause::dummy(), param_env, poly_trait_pred);
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation)
+ });
+
+ match implsrc {
+ Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+ debug!(
+ "const_trait_impl: provided {:?} via where-clause in {:?}",
+ trait_ref, param_env
+ );
+ return;
+ }
+ Ok(Some(ImplSource::UserDefined(data))) => {
+ let callee_name = tcx.item_name(callee);
+ if let Some(&did) = tcx
+ .associated_item_def_ids(data.impl_def_id)
+ .iter()
+ .find(|did| tcx.item_name(**did) == callee_name)
+ {
+ // using internal substs is ok here, since this is only
+ // used for the `resolve` call below
+ substs = InternalSubsts::identity_for_item(tcx, did);
+ callee = did;
+ }
+
+ if let hir::Constness::NotConst = tcx.constness(data.impl_def_id) {
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+ _ if !tcx.is_const_fn_raw(callee) => {
+ // At this point, it is only legal when the caller is in a trait
+ // marked with #[const_trait], and the callee is in the same trait.
+ let mut nonconst_call_permission = false;
+ if let Some(callee_trait) = tcx.trait_of_item(callee)
+ && tcx.has_attr(callee_trait, sym::const_trait)
+ && Some(callee_trait) == tcx.trait_of_item(caller.to_def_id())
+ // Can only call methods when it's `<Self as TheTrait>::f`.
+ && tcx.types.self_param == substs.type_at(0)
+ {
+ nonconst_call_permission = true;
+ }
+
+ if !nonconst_call_permission {
+ let obligation = Obligation::new(
+ ObligationCause::dummy_with_span(*fn_span),
+ param_env,
+ tcx.mk_predicate(
+ poly_trait_pred.map_bound(ty::PredicateKind::Trait),
+ ),
+ );
+
+ // improve diagnostics by showing what failed. Our requirements are stricter this time
+ // as we are going to error again anyways.
+ tcx.infer_ctxt().enter(|infcx| {
+ if let Err(e) = implsrc {
+ infcx.report_selection_error(
+ obligation.clone(),
+ &obligation,
+ &e,
+ false,
+ );
+ }
+ });
+
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ // Resolve a trait method call to its concrete implementation, which may be in a
+ // `const` trait impl.
+ let instance = Instance::resolve(tcx, param_env, callee, substs);
+ debug!("Resolving ({:?}) -> {:?}", callee, instance);
+ if let Ok(Some(func)) = instance {
+ if let InstanceDef::Item(def) = func.def {
+ callee = def.did;
+ }
+ }
+ }
+
+ // At this point, we are calling a function, `callee`, whose `DefId` is known...
+
+ // `begin_panic` and `panic_display` are generic functions that accept
+ // types other than str. Check to enforce that only str can be used in
+ // const-eval.
+
+ // const-eval of the `begin_panic` fn assumes the argument is `&str`
+ if Some(callee) == tcx.lang_items().begin_panic_fn() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if ty.is_str() => return,
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
+ // const-eval of the `panic_display` fn assumes the argument is `&&str`
+ if Some(callee) == tcx.lang_items().panic_display() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
+ {
+ return;
+ }
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
+ if Some(callee) == tcx.lang_items().exchange_malloc_fn() {
+ self.check_op(ops::HeapAllocation);
+ return;
+ }
+
+ // `async` blocks get lowered to `std::future::from_generator(/* a closure */)`.
+ let is_async_block = Some(callee) == tcx.lang_items().from_generator_fn();
+ if is_async_block {
+ let kind = hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block);
+ self.check_op(ops::Generator(kind));
+ return;
+ }
+
+ let is_intrinsic = tcx.is_intrinsic(callee);
+
+ if !tcx.is_const_fn_raw(callee) {
+ if !tcx.is_const_default_method(callee) {
+ // To get to here we must have already found a const impl for the
+ // trait, but for it to still be non-const can be that the impl is
+ // using default method bodies.
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+
+ // If the `const fn` we are trying to call is not const-stable, ensure that we have
+ // the proper feature gate enabled.
+ if let Some(gate) = is_unstable_const_fn(tcx, callee) {
+ trace!(?gate, "calling unstable const fn");
+ if self.span.allows_unstable(gate) {
+ return;
+ }
+
+ // Calling an unstable function *always* requires that the corresponding gate
+ // be enabled, even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
+ if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate) {
+ self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+ return;
+ }
+
+ // If this crate is not using stability attributes, or the caller is not claiming to be a
+ // stable `const fn`, that is all that is required.
+ if !self.ccx.is_const_stable_const_fn() {
+ trace!("crate not using stability attributes or caller not stably const");
+ return;
+ }
+
+ // Otherwise, we are something const-stable calling a const-unstable fn.
+
+ if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
+ trace!("rustc_allow_const_fn_unstable gate active");
+ return;
+ }
+
+ self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+ return;
+ }
+
+ // FIXME(ecstaticmorse); For compatibility, we consider `unstable` callees that
+ // have no `rustc_const_stable` attributes to be const-unstable as well. This
+ // should be fixed later.
+ let callee_is_unstable_unmarked = tcx.lookup_const_stability(callee).is_none()
+ && tcx.lookup_stability(callee).map_or(false, |s| s.is_unstable());
+ if callee_is_unstable_unmarked {
+ trace!("callee_is_unstable_unmarked");
+ // We do not use `const` modifiers for intrinsic "functions", as intrinsics are
+ // `extern` functions, and these have no way to get marked `const`. So instead we
+ // use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
+ if self.ccx.is_const_stable_const_fn() || is_intrinsic {
+ self.check_op(ops::FnCallUnstable(callee, None));
+ return;
+ }
+ }
+ trace!("permitting call");
+ }
+
+ // Forbid all `Drop` terminators unless the place being dropped is a local with no
+ // projections that cannot be `NeedsNonConstDrop`.
+ TerminatorKind::Drop { place: dropped_place, .. }
+ | TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ // If we are checking live drops after drop-elaboration, don't emit duplicate
+ // errors here.
+ if super::post_drop_elaboration::checking_enabled(self.ccx) {
+ return;
+ }
+
+ let mut err_span = self.span;
+ let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+
+ let ty_needs_non_const_drop =
+ qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+
+ debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
+
+ if !ty_needs_non_const_drop {
+ return;
+ }
+
+ let needs_non_const_drop = if let Some(local) = dropped_place.as_local() {
+ // Use the span where the local was declared as the span of the drop error.
+ err_span = self.body.local_decls[local].source_info.span;
+ self.qualifs.needs_non_const_drop(self.ccx, local, location)
+ } else {
+ true
+ };
+
+ if needs_non_const_drop {
+ self.check_op_spanned(
+ ops::LiveDrop { dropped_at: Some(terminator.source_info.span) },
+ err_span,
+ );
+ }
+ }
+
+ TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
+
+ TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
+ self.check_op(ops::Generator(hir::GeneratorKind::Gen))
+ }
+
+ TerminatorKind::Abort => {
+ // Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
+ span_bug!(self.span, "`Abort` terminator outside of cleanup block")
+ }
+
+ TerminatorKind::Assert { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable => {}
+ }
+ }
+}
+
+fn place_as_reborrow<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ place: Place<'tcx>,
+) -> Option<PlaceRef<'tcx>> {
+ match place.as_ref().last_projection() {
+ Some((place_base, ProjectionElem::Deref)) => {
+ // A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
+ // that points to the allocation for the static. Don't treat these as reborrows.
+ if body.local_decls[place_base.local].is_ref_to_static() {
+ None
+ } else {
+ // Ensure the type being derefed is a reference and not a raw pointer.
+ // This is sufficient to prevent an access to a `static mut` from being marked as a
+ // reborrow, even if the check above were to disappear.
+ let inner_ty = place_base.ty(body, tcx).ty;
+
+ if let ty::Ref(..) = inner_ty.kind() {
+ return Some(place_base);
+ } else {
+ return None;
+ }
+ }
+ }
+ _ => None,
+ }
+}
+
+fn is_int_bool_or_char(ty: Ty<'_>) -> bool {
+ ty.is_bool() || ty.is_integral() || ty.is_char()
+}
+
+fn emit_unstable_in_stable_error(ccx: &ConstCx<'_, '_>, span: Span, gate: Symbol) {
+ let attr_span = ccx.tcx.def_span(ccx.def_id()).shrink_to_lo();
+
+ ccx.tcx.sess.emit_err(UnstableInStable { gate: gate.to_string(), span, attr_span });
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
new file mode 100644
index 000000000..25b420bed
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -0,0 +1,132 @@
+//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
+//!
+//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
+//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
+//! it finds operations that are invalid in a certain context.
+
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::Symbol;
+
+pub use self::qualifs::Qualif;
+
+pub mod check;
+mod ops;
+pub mod post_drop_elaboration;
+pub mod qualifs;
+mod resolver;
+
+/// Information about the item currently being const-checked, as well as a reference to the global
+/// context.
+pub struct ConstCx<'mir, 'tcx> {
+ pub body: &'mir mir::Body<'tcx>,
+ pub tcx: TyCtxt<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+ pub const_kind: Option<hir::ConstContext>,
+}
+
+impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
+ let def_id = body.source.def_id().expect_local();
+ let param_env = tcx.param_env(def_id);
+ Self::new_with_param_env(tcx, body, param_env)
+ }
+
+ pub fn new_with_param_env(
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
+ ConstCx { body, tcx, param_env, const_kind }
+ }
+
+ pub fn def_id(&self) -> LocalDefId {
+ self.body.source.def_id().expect_local()
+ }
+
+ /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
+ ///
+ /// Panics if this `Item` is not const.
+ pub fn const_kind(&self) -> hir::ConstContext {
+ self.const_kind.expect("`const_kind` must not be called on a non-const fn")
+ }
+
+ pub fn is_const_stable_const_fn(&self) -> bool {
+ self.const_kind == Some(hir::ConstContext::ConstFn)
+ && self.tcx.features().staged_api
+ && is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
+ }
+
+ fn is_async(&self) -> bool {
+ self.tcx.asyncness(self.def_id()) == hir::IsAsync::Async
+ }
+}
+
+pub fn rustc_allow_const_fn_unstable(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ feature_gate: Symbol,
+) -> bool {
+ let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(def_id));
+ attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
+}
+
+// Returns `true` if the given `const fn` is "const-stable".
+//
+// Panics if the given `DefId` does not refer to a `const fn`.
+//
+// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
+// functions can be called in a const-context by users of the stable compiler. "const-stable"
+// functions are subject to more stringent restrictions than "const-unstable" functions: They
+// cannot use unstable features and can only call other "const-stable" functions.
+pub fn is_const_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ // A default body in a `#[const_trait]` is not const-stable because const
+ // trait fns currently cannot be const-stable. We shouldn't
+ // restrict default bodies to only call const-stable functions.
+ if tcx.is_const_default_method(def_id) {
+ return false;
+ }
+
+ // Const-stability is only relevant for `const fn`.
+ assert!(tcx.is_const_fn_raw(def_id));
+
+ // A function is only const-stable if it has `#[rustc_const_stable]` or it the trait it belongs
+ // to is const-stable.
+ match tcx.lookup_const_stability(def_id) {
+ Some(stab) => stab.is_const_stable(),
+ None if is_parent_const_stable_trait(tcx, def_id) => {
+ // Remove this when `#![feature(const_trait_impl)]` is stabilized,
+ // returning `true` unconditionally.
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ "trait implementations cannot be const stable yet",
+ );
+ true
+ }
+ None => false, // By default, items are not const stable.
+ }
+}
+
+fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let local_def_id = def_id.expect_local();
+ let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
+
+ let Some(parent) = tcx.hir().find_parent_node(hir_id) else { return false };
+ let parent_def = tcx.hir().get(parent);
+
+ if !matches!(
+ parent_def,
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+ ..
+ })
+ ) {
+ return false;
+ }
+
+ tcx.lookup_const_stability(parent.owner).map_or(false, |stab| stab.is_const_stable())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
new file mode 100644
index 000000000..338022616
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -0,0 +1,771 @@
+//! Concrete error types for all operations which may be invalid in a certain const context.
+
+use hir::def_id::LocalDefId;
+use rustc_errors::{
+ error_code, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
+};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{
+ suggest_constraining_type_param, Adt, Closure, DefIdTree, FnDef, FnPtr, Param, TraitPredicate,
+ Ty,
+};
+use rustc_middle::ty::{Binder, BoundConstness, ImplPolarity, TraitRef};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_trait_selection::traits::SelectionContext;
+
+use super::ConstCx;
+use crate::errors::{
+ MutDerefErr, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
+ TransientMutBorrowErr, TransientMutBorrowErrRaw,
+};
+use crate::util::{call_kind, CallDesugaringKind, CallKind};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Status {
+ Allowed,
+ Unstable(Symbol),
+ Forbidden,
+}
+
+#[derive(Clone, Copy)]
+pub enum DiagnosticImportance {
+ /// An operation that must be removed for const-checking to pass.
+ Primary,
+
+ /// An operation that causes const-checking to fail, but is usually a side-effect of a `Primary` operation elsewhere.
+ Secondary,
+}
+
+/// An operation that is not *always* allowed in a const context.
+pub trait NonConstOp<'tcx>: std::fmt::Debug {
+ /// Returns an enum indicating whether this operation is allowed within the given item.
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Forbidden
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ DiagnosticImportance::Primary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+}
+
+#[derive(Debug)]
+pub struct FloatingPointOp;
+impl<'tcx> NonConstOp<'tcx> for FloatingPointOp {
+ fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+ if ccx.const_kind() == hir::ConstContext::ConstFn {
+ Status::Unstable(sym::const_fn_floating_point_arithmetic)
+ } else {
+ Status::Allowed
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_fn_floating_point_arithmetic,
+ span,
+ &format!("floating point arithmetic is not allowed in {}s", ccx.const_kind()),
+ )
+ }
+}
+
+/// A function call where the callee is a pointer.
+#[derive(Debug)]
+pub struct FnCallIndirect;
+impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.struct_span_err(
+ span,
+ &format!("function pointer calls are not allowed in {}s", ccx.const_kind()),
+ )
+ }
+}
+
+/// A function call where the callee is not marked as `const`.
+#[derive(Debug, Clone, Copy)]
+pub struct FnCallNonConst<'tcx> {
+ pub caller: LocalDefId,
+ pub callee: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub span: Span,
+ pub from_hir_call: bool,
+}
+
+impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ _: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let FnCallNonConst { caller, callee, substs, span, from_hir_call } = *self;
+ let ConstCx { tcx, param_env, .. } = *ccx;
+
+ let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
+ let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+
+ match self_ty.kind() {
+ Param(param_ty) => {
+ debug!(?param_ty);
+ let caller_hir_id = tcx.hir().local_def_id_to_hir_id(caller);
+ if let Some(generics) = tcx.hir().get(caller_hir_id).generics() {
+ let constraint = with_no_trimmed_paths!(format!(
+ "~const {}",
+ trait_ref.print_only_trait_path()
+ ));
+ suggest_constraining_type_param(
+ tcx,
+ generics,
+ err,
+ &param_ty.name.as_str(),
+ &constraint,
+ None,
+ );
+ }
+ }
+ Adt(..) => {
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ Binder::dummy(TraitPredicate {
+ trait_ref,
+ constness: BoundConstness::NotConst,
+ polarity: ImplPolarity::Positive,
+ }),
+ );
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation)
+ });
+
+ if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
+ let span = tcx.def_span(data.impl_def_id);
+ err.span_note(span, "impl defined here, but it is not `const`");
+ }
+ }
+ _ => {}
+ }
+ };
+
+ let call_kind = call_kind(tcx, ccx.param_env, callee, substs, span, from_hir_call, None);
+
+ debug!(?call_kind);
+
+ let mut err = match call_kind {
+ CallKind::Normal { desugaring: Some((kind, self_ty)), .. } => {
+ macro_rules! error {
+ ($fmt:literal) => {
+ struct_span_err!(tcx.sess, span, E0015, $fmt, self_ty, ccx.const_kind())
+ };
+ }
+
+ let mut err = match kind {
+ CallDesugaringKind::ForLoopIntoIter => {
+ error!("cannot convert `{}` into an iterator in {}s")
+ }
+ CallDesugaringKind::QuestionBranch => {
+ error!("`?` cannot determine the branch of `{}` in {}s")
+ }
+ CallDesugaringKind::QuestionFromResidual => {
+ error!("`?` cannot convert from residual of `{}` in {}s")
+ }
+ CallDesugaringKind::TryBlockFromOutput => {
+ error!("`try` block cannot convert `{}` to the result in {}s")
+ }
+ };
+
+ diag_trait(&mut err, self_ty, kind.trait_def_id(tcx));
+ err
+ }
+ CallKind::FnCall { fn_trait_id, self_ty } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const closure in {}s",
+ ccx.const_kind(),
+ );
+
+ match self_ty.kind() {
+ FnDef(def_id, ..) => {
+ let span = tcx.def_span(*def_id);
+ if ccx.tcx.is_const_fn_raw(*def_id) {
+ span_bug!(span, "calling const FnDef errored when it shouldn't");
+ }
+
+ err.span_note(span, "function defined here, but it is not `const`");
+ }
+ FnPtr(..) => {
+ err.note(&format!(
+ "function pointers need an RFC before allowed to be called in {}s",
+ ccx.const_kind()
+ ));
+ }
+ Closure(..) => {
+ err.note(&format!(
+ "closures need an RFC before allowed to be called in {}s",
+ ccx.const_kind()
+ ));
+ }
+ _ => {}
+ }
+
+ diag_trait(&mut err, self_ty, fn_trait_id);
+ err
+ }
+ CallKind::Operator { trait_id, self_ty, .. } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const operator in {}s",
+ ccx.const_kind()
+ );
+
+ if Some(trait_id) == ccx.tcx.lang_items().eq_trait() {
+ match (substs[0].unpack(), substs[1].unpack()) {
+ (GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty))
+ if self_ty == rhs_ty
+ && self_ty.is_ref()
+ && self_ty.peel_refs().is_primitive() =>
+ {
+ let mut num_refs = 0;
+ let mut tmp_ty = self_ty;
+ while let rustc_middle::ty::Ref(_, inner_ty, _) = tmp_ty.kind() {
+ num_refs += 1;
+ tmp_ty = *inner_ty;
+ }
+ let deref = "*".repeat(num_refs);
+
+ if let Ok(call_str) = ccx.tcx.sess.source_map().span_to_snippet(span) {
+ if let Some(eq_idx) = call_str.find("==") {
+ if let Some(rhs_idx) =
+ call_str[(eq_idx + 2)..].find(|c: char| !c.is_whitespace())
+ {
+ let rhs_pos =
+ span.lo() + BytePos::from_usize(eq_idx + 2 + rhs_idx);
+ let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos);
+ err.multipart_suggestion(
+ "consider dereferencing here",
+ vec![
+ (span.shrink_to_lo(), deref.clone()),
+ (rhs_span, deref),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ diag_trait(&mut err, self_ty, trait_id);
+ err
+ }
+ CallKind::DerefCoercion { deref_target, deref_target_ty, self_ty } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot perform deref coercion on `{}` in {}s",
+ self_ty,
+ ccx.const_kind()
+ );
+
+ err.note(&format!("attempting to deref into `{}`", deref_target_ty));
+
+ // Check first whether the source is accessible (issue #87060)
+ if tcx.sess.source_map().is_span_accessible(deref_target) {
+ err.span_note(deref_target, "deref defined here");
+ }
+
+ diag_trait(&mut err, self_ty, tcx.lang_items().deref_trait().unwrap());
+ err
+ }
+ _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
+ struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const formatting macro in {}s",
+ ccx.const_kind(),
+ )
+ }
+ _ => struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const fn `{}` in {}s",
+ ccx.tcx.def_path_str_with_substs(callee, substs),
+ ccx.const_kind(),
+ ),
+ };
+
+ err.note(&format!(
+ "calls in {}s are limited to constant functions, \
+ tuple structs and tuple variants",
+ ccx.const_kind(),
+ ));
+
+ err
+ }
+}
+
+/// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function.
+///
+/// Contains the name of the feature that would allow the use of this function.
+#[derive(Debug)]
+pub struct FnCallUnstable(pub DefId, pub Option<Symbol>);
+
+impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let FnCallUnstable(def_id, feature) = *self;
+
+ let mut err = ccx.tcx.sess.struct_span_err(
+ span,
+ &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
+ );
+
+ if ccx.is_const_stable_const_fn() {
+ err.help("const-stable functions can only call other const-stable functions");
+ } else if ccx.tcx.sess.is_nightly_build() {
+ if let Some(feature) = feature {
+ err.help(&format!(
+ "add `#![feature({})]` to the crate attributes to enable",
+ feature
+ ));
+ }
+ }
+
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct Generator(pub hir::GeneratorKind);
+impl<'tcx> NonConstOp<'tcx> for Generator {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ Status::Unstable(sym::const_async_blocks)
+ } else {
+ Status::Forbidden
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
+ if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg)
+ } else {
+ ccx.tcx.sess.struct_span_err(span, &msg)
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct HeapAllocation;
+impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0010,
+ "allocations are not allowed in {}s",
+ ccx.const_kind()
+ );
+ err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "The value of statics and constants must be known at compile time, \
+ and they live for the entire lifetime of a program. Creating a boxed \
+ value allocates memory on the heap at runtime, and therefore cannot \
+ be done at compile time.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct InlineAsm;
+impl<'tcx> NonConstOp<'tcx> for InlineAsm {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "inline assembly is not allowed in {}s",
+ ccx.const_kind()
+ )
+ }
+}
+
+#[derive(Debug)]
+pub struct LiveDrop {
+ pub dropped_at: Option<Span>,
+}
+impl<'tcx> NonConstOp<'tcx> for LiveDrop {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0493,
+ "destructors cannot be evaluated at compile-time"
+ );
+ err.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
+ if let Some(span) = self.dropped_at {
+ err.span_label(span, "value is dropped here");
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow never escapes to
+/// the final value of the constant.
+pub struct TransientCellBorrow;
+impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_refs_to_cell)
+ }
+ fn importance(&self) -> DiagnosticImportance {
+ // The cases that cannot possibly work will already emit a `CellBorrow`, so we should
+ // not additionally emit a feature gate error if activating the feature gate won't work.
+ DiagnosticImportance::Secondary
+ }
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_refs_to_cell,
+ span,
+ "cannot borrow here, since the borrowed element may contain interior mutability",
+ )
+ }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow might escape to
+/// the final value of the constant, and thus we cannot allow this (for now). We may allow
+/// it in the future for static items.
+pub struct CellBorrow;
+impl<'tcx> NonConstOp<'tcx> for CellBorrow {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0492,
+ "{}s cannot refer to interior mutable data",
+ ccx.const_kind(),
+ );
+ err.span_label(
+ span,
+ "this borrow of an interior mutable value may end up in the final value",
+ );
+ if let hir::ConstContext::Static(_) = ccx.const_kind() {
+ err.help(
+ "to fix this, the value can be extracted to a separate \
+ `static` item and then referenced",
+ );
+ }
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "A constant containing interior mutable data behind a reference can allow you
+ to modify that data. This would make multiple uses of a constant to be able to
+ see different values and allow circumventing the `Send` and `Sync` requirements
+ for shared mutable data, which is unsound.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+/// This op is for `&mut` borrows in the trailing expression of a constant
+/// which uses the "enclosing scopes rule" to leak its locals into anonymous
+/// static or const items.
+pub struct MutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for MutBorrow {
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Forbidden
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ // If there were primary errors (like non-const function calls), do not emit further
+ // errors about mutable references.
+ DiagnosticImportance::Secondary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let raw = match self.0 {
+ hir::BorrowKind::Raw => "raw ",
+ hir::BorrowKind::Ref => "",
+ };
+
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0764,
+ "{}mutable references are not allowed in the final value of {}s",
+ raw,
+ ccx.const_kind(),
+ );
+
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "References in statics and constants may only refer \
+ to immutable values.\n\n\
+ Statics are shared everywhere, and if they refer to \
+ mutable data one might violate memory safety since \
+ holding multiple mutable references to shared data \
+ is not allowed.\n\n\
+ If you really want global mutable state, try using \
+ static mut or a global UnsafeCell.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct TransientMutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = ccx.const_kind();
+ match self.0 {
+ hir::BorrowKind::Raw => ccx
+ .tcx
+ .sess
+ .create_feature_err(TransientMutBorrowErrRaw { span, kind }, sym::const_mut_refs),
+ hir::BorrowKind::Ref => ccx
+ .tcx
+ .sess
+ .create_feature_err(TransientMutBorrowErr { span, kind }, sym::const_mut_refs),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct MutDeref;
+impl<'tcx> NonConstOp<'tcx> for MutDeref {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ // Usually a side-effect of a `TransientMutBorrow` somewhere.
+ DiagnosticImportance::Secondary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx
+ .sess
+ .create_feature_err(MutDerefErr { span, kind: ccx.const_kind() }, sym::const_mut_refs)
+ }
+}
+
+/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
+#[derive(Debug)]
+pub struct PanicNonStr;
+impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(PanicNonStrErr { span })
+ }
+}
+
+/// Comparing raw pointers for equality.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrComparison;
+impl<'tcx> NonConstOp<'tcx> for RawPtrComparison {
+ fn build_error(
+ &self,
+ _: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ span_bug!(span, "raw ptr comparison should already be caught in the trait system");
+ }
+}
+
+#[derive(Debug)]
+pub struct RawMutPtrDeref;
+impl<'tcx> NonConstOp<'tcx> for RawMutPtrDeref {
+ fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_mut_refs,
+ span,
+ &format!("dereferencing raw mutable pointers in {}s is unstable", ccx.const_kind(),),
+ )
+ }
+}
+
+/// Casting raw pointer or function pointer to an integer.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrToIntCast;
+impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(RawPtrToIntErr { span })
+ }
+}
+
+/// An access to a (non-thread-local) `static`.
+#[derive(Debug)]
+pub struct StaticAccess;
+impl<'tcx> NonConstOp<'tcx> for StaticAccess {
+ fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+ if let hir::ConstContext::Static(_) = ccx.const_kind() {
+ Status::Allowed
+ } else {
+ Status::Forbidden
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(StaticAccessErr {
+ span,
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0013)).then_some(()),
+ })
+ }
+}
+
+/// An access to a thread-local `static`.
+#[derive(Debug)]
+pub struct ThreadLocalAccess;
+impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(NonConstOpErr { span })
+ }
+}
+
+// Types that cannot appear in the signature or locals of a `const fn`.
+pub mod ty {
+ use super::*;
+
+ #[derive(Debug)]
+ pub struct MutRef(pub mir::LocalKind);
+ impl<'tcx> NonConstOp<'tcx> for MutRef {
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ match self.0 {
+ mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
+ mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
+ DiagnosticImportance::Primary
+ }
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_mut_refs,
+ span,
+ &format!("mutable references are not allowed in {}s", ccx.const_kind()),
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
new file mode 100644
index 000000000..4e210f663
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -0,0 +1,123 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::{symbol::sym, Span};
+
+use super::check::Qualifs;
+use super::ops::{self, NonConstOp};
+use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::ConstCx;
+
+/// Returns `true` if we should use the more precise live drop checker that runs after drop
+/// elaboration.
+pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
+ // Const-stable functions must always use the stable live drop checker.
+ if ccx.is_const_stable_const_fn() {
+ return false;
+ }
+
+ ccx.tcx.features().const_precise_live_drops
+}
+
+/// Look for live drops in a const context.
+///
+/// This is separate from the rest of the const checking logic because it must run after drop
+/// elaboration.
+pub fn check_live_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+ let def_id = body.source.def_id().expect_local();
+ let const_kind = tcx.hir().body_const_context(def_id);
+ if const_kind.is_none() {
+ return;
+ }
+
+ if tcx.has_attr(def_id.to_def_id(), sym::rustc_do_not_const_check) {
+ return;
+ }
+
+ let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
+ if !checking_enabled(&ccx) {
+ return;
+ }
+
+ let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+
+ visitor.visit_body(body);
+}
+
+struct CheckLiveDrops<'mir, 'tcx> {
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ qualifs: Qualifs<'mir, 'tcx>,
+}
+
+// So we can access `body` and `tcx`.
+impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
+ type Target = ConstCx<'mir, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+impl CheckLiveDrops<'_, '_> {
+ fn check_live_drop(&self, span: Span) {
+ ops::LiveDrop { dropped_at: None }.build_error(self.ccx, span).emit();
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
+ trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+ // Ignore drop terminators in cleanup blocks.
+ if block.is_cleanup {
+ return;
+ }
+
+ self.super_basic_block_data(bb, block);
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+
+ match &terminator.kind {
+ mir::TerminatorKind::Drop { place: dropped_place, .. }
+ | mir::TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
+ if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+ // Instead of throwing a bug, we just return here. This is because we have to
+ // run custom `const Drop` impls.
+ return;
+ }
+
+ if dropped_place.is_indirect() {
+ self.check_live_drop(terminator.source_info.span);
+ return;
+ }
+
+ // Drop elaboration is not precise enough to accept code like
+ // `src/test/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
+ // initialized with `None` and never changed, it still emits drop glue.
+ // Hence we additionally check the qualifs here to allow more code to pass.
+ if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
+ // Use the span where the dropped local was declared for the error.
+ let span = self.body.local_decls[dropped_place.local].source_info.span;
+ self.check_live_drop(span);
+ }
+ }
+
+ mir::TerminatorKind::Abort
+ | mir::TerminatorKind::Call { .. }
+ | mir::TerminatorKind::Assert { .. }
+ | mir::TerminatorKind::FalseEdge { .. }
+ | mir::TerminatorKind::FalseUnwind { .. }
+ | mir::TerminatorKind::GeneratorDrop
+ | mir::TerminatorKind::Goto { .. }
+ | mir::TerminatorKind::InlineAsm { .. }
+ | mir::TerminatorKind::Resume
+ | mir::TerminatorKind::Return
+ | mir::TerminatorKind::SwitchInt { .. }
+ | mir::TerminatorKind::Unreachable
+ | mir::TerminatorKind::Yield { .. } => {}
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
new file mode 100644
index 000000000..c8a63c9c3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -0,0 +1,384 @@
+//! Structural const qualification.
+//!
+//! See the `Qualif` trait for more info.
+
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::LangItem;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::TraitEngine;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits::{
+ self, ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngineExt,
+};
+
+use super::ConstCx;
+
+pub fn in_any_value_of_ty<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+) -> ConstQualifs {
+ ConstQualifs {
+ has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
+ needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
+ needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+ custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
+ tainted_by_errors,
+ }
+}
+
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time.
+///
+/// Normally, we would determine what qualifications apply to each type and error when an illegal
+/// operation is performed on such a type. However, this was found to be too imprecise, especially
+/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
+/// needn't reject code unless it actually constructs and operates on the qualified variant.
+///
+/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
+/// type-based one). Qualifications propagate structurally across variables: If a local (or a
+/// projection of a local) is assigned a qualified value, that local itself becomes qualified.
+pub trait Qualif {
+ /// The name of the file used to debug the dataflow analysis that computes this qualif.
+ const ANALYSIS_NAME: &'static str;
+
+ /// Whether this `Qualif` is cleared when a local is moved from.
+ const IS_CLEARED_ON_MOVE: bool = false;
+
+ /// Whether this `Qualif` might be evaluated after the promotion and can encounter a promoted.
+ const ALLOW_PROMOTED: bool = false;
+
+ /// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool;
+
+ /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
+ ///
+ /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
+ /// propagation is context-insensitive, this includes function arguments and values returned
+ /// from a call to another function.
+ ///
+ /// It also determines the `Qualif`s for primitive types.
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
+
+ /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
+ ///
+ /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
+ /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
+ /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
+ /// with a custom `Drop` impl is inherently `NeedsDrop`.
+ ///
+ /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> bool;
+}
+
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
+pub struct HasMutInterior;
+
+impl Qualif for HasMutInterior {
+ const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.has_mut_interior
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
+ }
+
+ fn in_adt_inherently<'tcx>(
+ _cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
+ // It arises structurally for all other types.
+ adt.is_unsafe_cell()
+ }
+}
+
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out because implicit promotion would remove side-effects
+/// that occur as part of dropping that value. N.B., the implicit promotion has
+/// to reject const Drop implementations because even if side-effects are ruled
+/// out through other means, the execution of the drop could diverge.
+pub struct NeedsDrop;
+
+impl Qualif for NeedsDrop {
+ const ANALYSIS_NAME: &'static str = "flow_needs_drop";
+ const IS_CLEARED_ON_MOVE: bool = true;
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.needs_drop
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.needs_drop(cx.tcx, cx.param_env)
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ adt.has_dtor(cx.tcx)
+ }
+}
+
+/// Constant containing an ADT that implements non-const `Drop`.
+/// This must be ruled out because we cannot run `Drop` during compile-time.
+pub struct NeedsNonConstDrop;
+
+impl Qualif for NeedsNonConstDrop {
+ const ANALYSIS_NAME: &'static str = "flow_needs_nonconst_drop";
+ const IS_CLEARED_ON_MOVE: bool = true;
+ const ALLOW_PROMOTED: bool = true;
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.needs_non_const_drop
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ // Avoid selecting for simple cases, such as builtin types.
+ if ty::util::is_trivially_const_drop(ty) {
+ return false;
+ }
+
+ let destruct = cx.tcx.require_lang_item(LangItem::Destruct, None);
+
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ cx.param_env,
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: destruct,
+ substs: cx.tcx.mk_substs_trait(ty, &[]),
+ },
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+
+ cx.tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
+ // If we couldn't select a const destruct candidate, then it's bad
+ return true;
+ };
+
+ if !matches!(
+ impl_src,
+ ImplSource::ConstDestruct(_)
+ | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ) {
+ // If our const destruct candidate is not ConstDestruct or implied by the param env,
+ // then it's bad
+ return true;
+ }
+
+ if impl_src.borrow_nested_obligations().is_empty() {
+ return false;
+ }
+
+ // If we successfully found one, then select all of the predicates
+ // implied by our const drop impl.
+ let mut fcx = <dyn TraitEngine<'tcx>>::new(cx.tcx);
+ for nested in impl_src.nested_obligations() {
+ fcx.register_predicate_obligation(&infcx, nested);
+ }
+
+ // If we had any errors, then it's bad
+ !fcx.select_all_or_error(&infcx).is_empty()
+ })
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ adt.has_non_const_dtor(cx.tcx)
+ }
+}
+
+/// A constant that cannot be used as part of a pattern in a `match` expression.
+pub struct CustomEq;
+
+impl Qualif for CustomEq {
+ const ANALYSIS_NAME: &'static str = "flow_custom_eq";
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.custom_eq
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ // If *any* component of a composite data type does not implement `Structural{Partial,}Eq`,
+ // we know that at least some values of that type are not structural-match. I say "some"
+ // because that component may be part of an enum variant (e.g.,
+ // `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
+ // structural-match (`Option::None`).
+ traits::search_for_structural_match_violation(cx.body.span, cx.tcx, ty).is_some()
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> bool {
+ let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+ !ty.is_structural_eq_shallow(cx.tcx)
+ }
+}
+
+// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
+
+/// Returns `true` if this `Rvalue` contains qualif `Q`.
+pub fn in_rvalue<'tcx, Q, F>(
+ cx: &ConstCx<'_, 'tcx>,
+ in_local: &mut F,
+ rvalue: &Rvalue<'tcx>,
+) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ match rvalue {
+ Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
+ Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
+ }
+
+ Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+ in_place::<Q, _>(cx, in_local, place.as_ref())
+ }
+
+ Rvalue::CopyForDeref(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
+
+ Rvalue::Use(operand)
+ | Rvalue::Repeat(operand, _)
+ | Rvalue::UnaryOp(_, operand)
+ | Rvalue::Cast(_, operand, _)
+ | Rvalue::ShallowInitBox(operand, _) => in_operand::<Q, _>(cx, in_local, operand),
+
+ Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
+ in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
+ }
+
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ // Special-case reborrows to be more like a copy of the reference.
+ if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
+ let base_ty = place_base.ty(cx.body, cx.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ return in_place::<Q, _>(cx, in_local, place_base);
+ }
+ }
+
+ in_place::<Q, _>(cx, in_local, place.as_ref())
+ }
+
+ Rvalue::Aggregate(kind, operands) => {
+ // Return early if we know that the struct or enum being constructed is always
+ // qualified.
+ if let AggregateKind::Adt(adt_did, _, substs, ..) = **kind {
+ let def = cx.tcx.adt_def(adt_did);
+ if Q::in_adt_inherently(cx, def, substs) {
+ return true;
+ }
+ if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
+ return true;
+ }
+ }
+
+ // Otherwise, proceed structurally...
+ operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
+ }
+ }
+}
+
+/// Returns `true` if this `Place` contains qualif `Q`.
+pub fn in_place<'tcx, Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ let mut place = place;
+ while let Some((place_base, elem)) = place.last_projection() {
+ match elem {
+ ProjectionElem::Index(index) if in_local(index) => return true,
+
+ ProjectionElem::Deref
+ | ProjectionElem::Field(_, _)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Downcast(_, _)
+ | ProjectionElem::Index(_) => {}
+ }
+
+ let base_ty = place_base.ty(cx.body, cx.tcx);
+ let proj_ty = base_ty.projection_ty(cx.tcx, elem).ty;
+ if !Q::in_any_value_of_ty(cx, proj_ty) {
+ return false;
+ }
+
+ place = place_base;
+ }
+
+ assert!(place.projection.is_empty());
+ in_local(place.local)
+}
+
+/// Returns `true` if this `Operand` contains qualif `Q`.
+pub fn in_operand<'tcx, Q, F>(
+ cx: &ConstCx<'_, 'tcx>,
+ in_local: &mut F,
+ operand: &Operand<'tcx>,
+) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ let constant = match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ return in_place::<Q, _>(cx, in_local, place.as_ref());
+ }
+
+ Operand::Constant(c) => c,
+ };
+
+ // Check the qualifs of the value of `const` items.
+ if let Some(ct) = constant.literal.const_for_ty() {
+ if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted }) = ct.kind()
+ {
+ // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
+ // only for `NeedsNonConstDrop` with precise drop checking. This is the only const
+ // check performed after the promotion. Verify that with an assertion.
+ assert!(promoted.is_none() || Q::ALLOW_PROMOTED);
+ // Don't peek inside trait associated constants.
+ if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() {
+ let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
+ cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
+ } else {
+ cx.tcx.at(constant.span).mir_const_qualif(def.did)
+ };
+
+ if !Q::in_qualifs(&qualifs) {
+ return false;
+ }
+
+ // Just in case the type is more specific than
+ // the definition, e.g., impl associated const
+ // with type parameters, take it into account.
+ }
+ }
+ }
+ // Otherwise use the qualifs of the type.
+ Q::in_any_value_of_ty(cx, constant.literal.ty())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
new file mode 100644
index 000000000..60c1e4950
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -0,0 +1,384 @@
+//! Propagate `Qualif`s between locals and query the results.
+//!
+//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementKind};
+use rustc_mir_dataflow::fmt::DebugWithContext;
+use rustc_mir_dataflow::JoinSemiLattice;
+use rustc_mir_dataflow::{Analysis, AnalysisDomain, CallReturnPlaces};
+use rustc_span::DUMMY_SP;
+
+use std::fmt;
+use std::marker::PhantomData;
+
+use super::{qualifs, ConstCx, Qualif};
+
+/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
+/// `FlowSensitiveAnalysis`.
+///
+/// To account for indirect assignments, data flow conservatively assumes that local becomes
+/// qualified immediately after it is borrowed or its address escapes. The borrow must allow for
+/// mutation, which includes shared borrows of places with interior mutability. The type of
+/// borrowed place must contain the qualif.
+struct TransferFunction<'a, 'mir, 'tcx, Q> {
+ ccx: &'a ConstCx<'mir, 'tcx>,
+ state: &'a mut State,
+ _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> TransferFunction<'a, 'mir, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn new(ccx: &'a ConstCx<'mir, 'tcx>, state: &'a mut State) -> Self {
+ TransferFunction { ccx, state, _qualif: PhantomData }
+ }
+
+ fn initialize_state(&mut self) {
+ self.state.qualif.clear();
+ self.state.borrow.clear();
+
+ for arg in self.ccx.body.args_iter() {
+ let arg_ty = self.ccx.body.local_decls[arg].ty;
+ if Q::in_any_value_of_ty(self.ccx, arg_ty) {
+ self.state.qualif.insert(arg);
+ }
+ }
+ }
+
+ fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, mut value: bool) {
+ debug_assert!(!place.is_indirect());
+
+ if !value {
+ for (base, _elem) in place.iter_projections() {
+ let base_ty = base.ty(self.ccx.body, self.ccx.tcx);
+ if base_ty.ty.is_union() && Q::in_any_value_of_ty(self.ccx, base_ty.ty) {
+ value = true;
+ break;
+ }
+ }
+ }
+
+ match (value, place.as_ref()) {
+ (true, mir::PlaceRef { local, .. }) => {
+ self.state.qualif.insert(local);
+ }
+
+ // For now, we do not clear the qualif if a local is overwritten in full by
+ // an unqualified rvalue (e.g. `y = 5`). This is to be consistent
+ // with aggregates where we overwrite all fields with assignments, which would not
+ // get this feature.
+ (false, mir::PlaceRef { local: _, projection: &[] }) => {
+ // self.state.qualif.remove(*local);
+ }
+
+ _ => {}
+ }
+ }
+
+ fn apply_call_return_effect(
+ &mut self,
+ _block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // We cannot reason about another function's internals, so use conservative type-based
+ // qualification for the result of a function call.
+ let return_ty = place.ty(self.ccx.body, self.ccx.tcx).ty;
+ let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
+
+ if !place.is_indirect() {
+ self.assign_qualif_direct(&place, qualif);
+ }
+ });
+ }
+
+ fn address_of_allows_mutation(&self, _mt: mir::Mutability, _place: mir::Place<'tcx>) -> bool {
+ // Exact set of permissions granted by AddressOf is undecided. Conservatively assume that
+ // it might allow mutation until resolution of #56604.
+ true
+ }
+
+ fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
+ match kind {
+ mir::BorrowKind::Mut { .. } => true,
+ mir::BorrowKind::Shared | mir::BorrowKind::Shallow | mir::BorrowKind::Unique => {
+ self.shared_borrow_allows_mutation(place)
+ }
+ }
+ }
+
+ /// `&` only allow mutation if the borrowed place is `!Freeze`.
+ ///
+ /// This assumes that it is UB to take the address of a struct field whose type is
+ /// `Freeze`, then use pointer arithmetic to derive a pointer to a *different* field of
+ /// that same struct whose type is `!Freeze`. If we decide that this is not UB, we will
+ /// have to check the type of the borrowed **local** instead of the borrowed **place**
+ /// below. See [rust-lang/unsafe-code-guidelines#134].
+ ///
+ /// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
+ fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
+ !place
+ .ty(self.ccx.body, self.ccx.tcx)
+ .ty
+ .is_freeze(self.ccx.tcx.at(DUMMY_SP), self.ccx.param_env)
+ }
+}
+
+impl<'tcx, Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+
+ if !Q::IS_CLEARED_ON_MOVE {
+ return;
+ }
+
+ // If a local with no projections is moved from (e.g. `x` in `y = x`), record that
+ // it no longer needs to be dropped.
+ if let mir::Operand::Move(place) = operand {
+ if let Some(local) = place.as_local() {
+ // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier
+ // implementation we retain qualif if a local had been borrowed before. This might
+ // not be strictly necessary since the local is no longer initialized.
+ if !self.state.borrow.contains(local) {
+ self.state.qualif.remove(local);
+ }
+ }
+ }
+ }
+
+ fn visit_assign(
+ &mut self,
+ place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location,
+ ) {
+ let qualif =
+ qualifs::in_rvalue::<Q, _>(self.ccx, &mut |l| self.state.qualif.contains(l), rvalue);
+ if !place.is_indirect() {
+ self.assign_qualif_direct(place, qualif);
+ }
+
+ // We need to assign qualifs to the left-hand side before visiting `rvalue` since
+ // qualifs can be cleared on move.
+ self.super_assign(place, rvalue, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match rvalue {
+ mir::Rvalue::AddressOf(mt, borrowed_place) => {
+ if !borrowed_place.is_indirect()
+ && self.address_of_allows_mutation(*mt, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Ref(_, kind, borrowed_place) => {
+ if !borrowed_place.is_indirect() && self.ref_allows_mutation(*kind, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Cast(..)
+ | mir::Rvalue::ShallowInitBox(..)
+ | mir::Rvalue::Use(..)
+ | mir::Rvalue::CopyForDeref(..)
+ | mir::Rvalue::ThreadLocalRef(..)
+ | mir::Rvalue::Repeat(..)
+ | mir::Rvalue::Len(..)
+ | mir::Rvalue::BinaryOp(..)
+ | mir::Rvalue::CheckedBinaryOp(..)
+ | mir::Rvalue::NullaryOp(..)
+ | mir::Rvalue::UnaryOp(..)
+ | mir::Rvalue::Discriminant(..)
+ | mir::Rvalue::Aggregate(..) => {}
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match statement.kind {
+ StatementKind::StorageDead(local) => {
+ self.state.qualif.remove(local);
+ self.state.borrow.remove(local);
+ }
+ _ => self.super_statement(statement, location),
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
+ // here; that occurs in `apply_call_return_effect`.
+
+ if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
+ let qualif = qualifs::in_operand::<Q, _>(
+ self.ccx,
+ &mut |l| self.state.qualif.contains(l),
+ value,
+ );
+
+ if !place.is_indirect() {
+ self.assign_qualif_direct(place, qualif);
+ }
+ }
+
+ // We ignore borrow on drop because custom drop impls are not allowed in consts.
+ // FIXME: Reconsider if accounting for borrows in drops is necessary for const drop.
+
+ // We need to assign qualifs to the dropped location before visiting the operand that
+ // replaces it since qualifs can be cleared on move.
+ self.super_terminator(terminator, location);
+ }
+}
+
+/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
+pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
+ ccx: &'a ConstCx<'mir, 'tcx>,
+ _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
+ FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
+ }
+
+ fn transfer_function(&self, state: &'a mut State) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+ TransferFunction::<Q>::new(self.ccx, state)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(super) struct State {
+ /// Describes whether a local contains qualif.
+ pub qualif: BitSet<Local>,
+ /// Describes whether a local's address escaped and it might become qualified as a result an
+ /// indirect mutation.
+ pub borrow: BitSet<Local>,
+}
+
+impl Clone for State {
+ fn clone(&self) -> Self {
+ State { qualif: self.qualif.clone(), borrow: self.borrow.clone() }
+ }
+
+ // Data flow engine when possible uses `clone_from` for domain values.
+ // Providing an implementation will avoid some intermediate memory allocations.
+ fn clone_from(&mut self, other: &Self) {
+ self.qualif.clone_from(&other.qualif);
+ self.borrow.clone_from(&other.borrow);
+ }
+}
+
+impl State {
+ #[inline]
+ pub(super) fn contains(&self, local: Local) -> bool {
+ self.qualif.contains(local)
+ }
+}
+
+impl<C> DebugWithContext<C> for State {
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_with(ctxt, f)?;
+ f.write_str(" borrow: ")?;
+ self.borrow.fmt_with(ctxt, f)?;
+ Ok(())
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self == old {
+ return Ok(());
+ }
+
+ if self.qualif != old.qualif {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_diff_with(&old.qualif, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ if self.borrow != old.borrow {
+ f.write_str("borrow: ")?;
+ self.qualif.fmt_diff_with(&old.borrow, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ Ok(())
+ }
+}
+
+impl JoinSemiLattice for State {
+ fn join(&mut self, other: &Self) -> bool {
+ self.qualif.join(&other.qualif) || self.borrow.join(&other.borrow)
+ }
+}
+
+impl<'tcx, Q> AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ type Domain = State;
+
+ const NAME: &'static str = Q::ANALYSIS_NAME;
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ State {
+ qualif: BitSet::new_empty(body.local_decls.len()),
+ borrow: BitSet::new_empty(body.local_decls.len()),
+ }
+ }
+
+ fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ self.transfer_function(state).initialize_state();
+ }
+}
+
+impl<'tcx, Q> Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn apply_statement_effect(
+ &self,
+ state: &mut Self::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(state).visit_statement(statement, location);
+ }
+
+ fn apply_terminator_effect(
+ &self,
+ state: &mut Self::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(state).visit_terminator(terminator, location);
+ }
+
+ fn apply_call_return_effect(
+ &self,
+ state: &mut Self::Domain,
+ block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ self.transfer_function(state).apply_call_return_effect(block, return_places)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs
new file mode 100644
index 000000000..a2928bdf5
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/mod.rs
@@ -0,0 +1,3 @@
+pub mod check_consts;
+pub mod promote_consts;
+pub mod validate;
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
new file mode 100644
index 000000000..ed4d8c95d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -0,0 +1,1066 @@
+//! A pass that promotes borrows of constant rvalues.
+//!
+//! The rvalues considered constant are trees of temps,
+//! each with exactly one initialization, and holding
+//! a constant value with no interior mutability.
+//! They are placed into a new MIR constant body in
+//! `promoted` and the borrow rvalue is replaced with
+//! a `Literal::Promoted` using the index into `promoted`
+//! of that constant MIR.
+//!
+//! This pass assumes that every use is dominated by an
+//! initialization and can otherwise silence errors, if
+//! move analysis runs after promotion on broken MIR.
+
+use rustc_hir as hir;
+use rustc_middle::mir::traversal::ReversePostorderIter;
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, List, TyCtxt, TypeVisitable};
+use rustc_span::Span;
+
+use rustc_index::vec::{Idx, IndexVec};
+
+use std::cell::Cell;
+use std::{cmp, iter, mem};
+
+use crate::transform::check_consts::{qualifs, ConstCx};
+
+/// A `MirPass` for promotion.
+///
+/// Promotion is the extraction of promotable temps into separate MIR bodies so they can have
+/// `'static` lifetime.
+///
+/// After this pass is run, `promoted_fragments` will hold the MIR body corresponding to each
+/// newly created `Constant`.
+#[derive(Default)]
+pub struct PromoteTemps<'tcx> {
+ pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
+}
+
+impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::ConstsPromoted)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // There's not really any point in promoting errorful MIR.
+ //
+ // This does not include MIR that failed const-checking, which we still try to promote.
+ if body.return_ty().references_error() {
+ tcx.sess.delay_span_bug(body.span, "PromoteTemps: MIR had errors");
+ return;
+ }
+
+ if body.source.promoted.is_some() {
+ return;
+ }
+
+ let mut rpo = traversal::reverse_postorder(body);
+ let ccx = ConstCx::new(tcx, body);
+ let (mut temps, all_candidates) = collect_temps_and_candidates(&ccx, &mut rpo);
+
+ let promotable_candidates = validate_candidates(&ccx, &mut temps, &all_candidates);
+
+ let promoted = promote_candidates(body, tcx, temps, promotable_candidates);
+ self.promoted_fragments.set(promoted);
+ }
+}
+
+/// State of a temporary during collection and promotion.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TempState {
+ /// No references to this temp.
+ Undefined,
+ /// One direct assignment and any number of direct uses.
+ /// A borrow of this temp is promotable if the assigned
+ /// value is qualified as constant.
+ Defined { location: Location, uses: usize, valid: Result<(), ()> },
+ /// Any other combination of assignments/uses.
+ Unpromotable,
+ /// This temp was part of an rvalue which got extracted
+ /// during promotion and needs cleanup.
+ PromotedOut,
+}
+
+impl TempState {
+ pub fn is_promotable(&self) -> bool {
+ debug!("is_promotable: self={:?}", self);
+ matches!(self, TempState::Defined { .. })
+ }
+}
+
+/// A "root candidate" for promotion, which will become the
+/// returned value in a promoted MIR, unless it's a subset
+/// of a larger candidate.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Candidate {
+ location: Location,
+}
+
+struct Collector<'a, 'tcx> {
+ ccx: &'a ConstCx<'a, 'tcx>,
+ temps: IndexVec<Local, TempState>,
+ candidates: Vec<Candidate>,
+}
+
+impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> {
+ fn visit_local(&mut self, index: Local, context: PlaceContext, location: Location) {
+ debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
+ // We're only interested in temporaries and the return place
+ match self.ccx.body.local_kind(index) {
+ LocalKind::Temp | LocalKind::ReturnPointer => {}
+ LocalKind::Arg | LocalKind::Var => return,
+ }
+
+ // Ignore drops, if the temp gets promoted,
+ // then it's constant and thus drop is noop.
+ // Non-uses are also irrelevant.
+ if context.is_drop() || !context.is_use() {
+ debug!(
+ "visit_local: context.is_drop={:?} context.is_use={:?}",
+ context.is_drop(),
+ context.is_use(),
+ );
+ return;
+ }
+
+ let temp = &mut self.temps[index];
+ debug!("visit_local: temp={:?}", temp);
+ if *temp == TempState::Undefined {
+ match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Store)
+ | PlaceContext::MutatingUse(MutatingUseContext::Call) => {
+ *temp = TempState::Defined { location, uses: 0, valid: Err(()) };
+ return;
+ }
+ _ => { /* mark as unpromotable below */ }
+ }
+ } else if let TempState::Defined { ref mut uses, .. } = *temp {
+ // We always allow borrows, even mutable ones, as we need
+ // to promote mutable borrows of some ZSTs e.g., `&mut []`.
+ let allowed_use = match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ | PlaceContext::NonMutatingUse(_) => true,
+ PlaceContext::MutatingUse(_) | PlaceContext::NonUse(_) => false,
+ };
+ debug!("visit_local: allowed_use={:?}", allowed_use);
+ if allowed_use {
+ *uses += 1;
+ return;
+ }
+ /* mark as unpromotable below */
+ }
+ *temp = TempState::Unpromotable;
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match *rvalue {
+ Rvalue::Ref(..) => {
+ self.candidates.push(Candidate { location });
+ }
+ _ => {}
+ }
+ }
+}
+
+pub fn collect_temps_and_candidates<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ rpo: &mut ReversePostorderIter<'_, 'tcx>,
+) -> (IndexVec<Local, TempState>, Vec<Candidate>) {
+ let mut collector = Collector {
+ temps: IndexVec::from_elem(TempState::Undefined, &ccx.body.local_decls),
+ candidates: vec![],
+ ccx,
+ };
+ for (bb, data) in rpo {
+ collector.visit_basic_block_data(bb, data);
+ }
+ (collector.temps, collector.candidates)
+}
+
+/// Checks whether locals that appear in a promotion context (`Candidate`) are actually promotable.
+///
+/// This wraps an `Item`, and has access to all fields of that `Item` via `Deref` coercion.
+struct Validator<'a, 'tcx> {
+ ccx: &'a ConstCx<'a, 'tcx>,
+ temps: &'a mut IndexVec<Local, TempState>,
+}
+
+impl<'a, 'tcx> std::ops::Deref for Validator<'a, 'tcx> {
+ type Target = ConstCx<'a, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+struct Unpromotable;
+
+impl<'tcx> Validator<'_, 'tcx> {
+ fn validate_candidate(&mut self, candidate: Candidate) -> Result<(), Unpromotable> {
+ let loc = candidate.location;
+ let statement = &self.body[loc.block].statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, Rvalue::Ref(_, kind, place))) => {
+ // We can only promote interior borrows of promotable temps (non-temps
+ // don't get promoted anyway).
+ self.validate_local(place.local)?;
+
+ // The reference operation itself must be promotable.
+ // (Needs to come after `validate_local` to avoid ICEs.)
+ self.validate_ref(*kind, place)?;
+
+ // We do not check all the projections (they do not get promoted anyway),
+ // but we do stay away from promoting anything involving a dereference.
+ if place.projection.contains(&ProjectionElem::Deref) {
+ return Err(Unpromotable);
+ }
+
+ // We cannot promote things that need dropping, since the promoted value
+ // would not get dropped.
+ if self.qualif_local::<qualifs::NeedsDrop>(place.local) {
+ return Err(Unpromotable);
+ }
+
+ Ok(())
+ }
+ _ => bug!(),
+ }
+ }
+
+ // FIXME(eddyb) maybe cache this?
+ fn qualif_local<Q: qualifs::Qualif>(&mut self, local: Local) -> bool {
+ if let TempState::Defined { location: loc, .. } = self.temps[local] {
+ let num_stmts = self.body[loc.block].statements.len();
+
+ if loc.statement_index < num_stmts {
+ let statement = &self.body[loc.block].statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, rhs)) => qualifs::in_rvalue::<Q, _>(
+ &self.ccx,
+ &mut |l| self.qualif_local::<Q>(l),
+ rhs,
+ ),
+ _ => {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ }
+ }
+ } else {
+ let terminator = self.body[loc.block].terminator();
+ match &terminator.kind {
+ TerminatorKind::Call { .. } => {
+ let return_ty = self.body.local_decls[local].ty;
+ Q::in_any_value_of_ty(&self.ccx, return_ty)
+ }
+ kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ }
+ }
+ } else {
+ let span = self.body.local_decls[local].source_info.span;
+ span_bug!(span, "{:?} not promotable, qualif_local shouldn't have been called", local);
+ }
+ }
+
+ fn validate_local(&mut self, local: Local) -> Result<(), Unpromotable> {
+ if let TempState::Defined { location: loc, uses, valid } = self.temps[local] {
+ valid.or_else(|_| {
+ let ok = {
+ let block = &self.body[loc.block];
+ let num_stmts = block.statements.len();
+
+ if loc.statement_index < num_stmts {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, rhs)) => self.validate_rvalue(rhs),
+ _ => {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ }
+ }
+ } else {
+ let terminator = block.terminator();
+ match &terminator.kind {
+ TerminatorKind::Call { func, args, .. } => {
+ self.validate_call(func, args)
+ }
+ TerminatorKind::Yield { .. } => Err(Unpromotable),
+ kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ }
+ }
+ };
+ self.temps[local] = match ok {
+ Ok(()) => TempState::Defined { location: loc, uses, valid: Ok(()) },
+ Err(_) => TempState::Unpromotable,
+ };
+ ok
+ })
+ } else {
+ Err(Unpromotable)
+ }
+ }
+
+ fn validate_place(&mut self, place: PlaceRef<'tcx>) -> Result<(), Unpromotable> {
+ match place.last_projection() {
+ None => self.validate_local(place.local),
+ Some((place_base, elem)) => {
+ // Validate topmost projection, then recurse.
+ match elem {
+ ProjectionElem::Deref => {
+ let mut promotable = false;
+ // We need to make sure this is a `Deref` of a local with no further projections.
+ // Discussion can be found at
+ // https://github.com/rust-lang/rust/pull/74945#discussion_r463063247
+ if let Some(local) = place_base.as_local() {
+ // This is a special treatment for cases like *&STATIC where STATIC is a
+ // global static variable.
+ // This pattern is generated only when global static variables are directly
+ // accessed and is qualified for promotion safely.
+ if let TempState::Defined { location, .. } = self.temps[local] {
+ let def_stmt = self.body[location.block]
+ .statements
+ .get(location.statement_index);
+ if let Some(Statement {
+ kind:
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )),
+ ..
+ }) = def_stmt
+ {
+ if let Some(did) = c.check_static_ptr(self.tcx) {
+ // Evaluating a promoted may not read statics except if it got
+ // promoted from a static (this is a CTFE check). So we
+ // can only promote static accesses inside statics.
+ if let Some(hir::ConstContext::Static(..)) = self.const_kind
+ {
+ if !self.tcx.is_thread_local_static(did) {
+ promotable = true;
+ }
+ }
+ }
+ }
+ }
+ }
+ if !promotable {
+ return Err(Unpromotable);
+ }
+ }
+ ProjectionElem::Downcast(..) => {
+ return Err(Unpromotable);
+ }
+
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+
+ ProjectionElem::Index(local) => {
+ let mut promotable = false;
+ // Only accept if we can predict the index and are indexing an array.
+ let val =
+ if let TempState::Defined { location: loc, .. } = self.temps[local] {
+ let block = &self.body[loc.block];
+ if loc.statement_index < block.statements.len() {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )) => c.literal.try_eval_usize(self.tcx, self.param_env),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ if let Some(idx) = val {
+ // Determine the type of the thing we are indexing.
+ let ty = place_base.ty(self.body, self.tcx).ty;
+ match ty.kind() {
+ ty::Array(_, len) => {
+ // It's an array; determine its length.
+ if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+ {
+ // If the index is in-bounds, go ahead.
+ if idx < len {
+ promotable = true;
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ if !promotable {
+ return Err(Unpromotable);
+ }
+
+ self.validate_local(local)?;
+ }
+
+ ProjectionElem::Field(..) => {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if base_ty.is_union() {
+ // No promotion of union field accesses.
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ self.validate_place(place_base)
+ }
+ }
+ }
+
+ fn validate_operand(&mut self, operand: &Operand<'tcx>) -> Result<(), Unpromotable> {
+ match operand {
+ Operand::Copy(place) | Operand::Move(place) => self.validate_place(place.as_ref()),
+
+ // The qualifs for a constant (e.g. `HasMutInterior`) are checked in
+ // `validate_rvalue` upon access.
+ Operand::Constant(c) => {
+ if let Some(def_id) = c.check_static_ptr(self.tcx) {
+ // Only allow statics (not consts) to refer to other statics.
+ // FIXME(eddyb) does this matter at all for promotion?
+ // FIXME(RalfJung) it makes little sense to not promote this in `fn`/`const fn`,
+ // and in `const` this cannot occur anyway. The only concern is that we might
+ // promote even `let x = &STATIC` which would be useless, but this applies to
+ // promotion inside statics as well.
+ let is_static = matches!(self.const_kind, Some(hir::ConstContext::Static(_)));
+ if !is_static {
+ return Err(Unpromotable);
+ }
+
+ let is_thread_local = self.tcx.is_thread_local_static(def_id);
+ if is_thread_local {
+ return Err(Unpromotable);
+ }
+ }
+
+ Ok(())
+ }
+ }
+ }
+
+ fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
+ match kind {
+ // Reject these borrow types just to be safe.
+ // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
+ BorrowKind::Shallow | BorrowKind::Unique => return Err(Unpromotable),
+
+ BorrowKind::Shared => {
+ let has_mut_interior = self.qualif_local::<qualifs::HasMutInterior>(place.local);
+ if has_mut_interior {
+ return Err(Unpromotable);
+ }
+ }
+
+ BorrowKind::Mut { .. } => {
+ let ty = place.ty(self.body, self.tcx).ty;
+
+ // In theory, any zero-sized value could be borrowed
+ // mutably without consequences. However, only &mut []
+ // is allowed right now.
+ if let ty::Array(_, len) = ty.kind() {
+ match len.try_eval_usize(self.tcx, self.param_env) {
+ Some(0) => {}
+ _ => return Err(Unpromotable),
+ }
+ } else {
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn validate_rvalue(&mut self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
+ match rvalue {
+ Rvalue::Use(operand) | Rvalue::Repeat(operand, _) => {
+ self.validate_operand(operand)?;
+ }
+ Rvalue::CopyForDeref(place) => {
+ let op = &Operand::Copy(*place);
+ self.validate_operand(op)?
+ }
+
+ Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+ self.validate_place(place.as_ref())?
+ }
+
+ Rvalue::ThreadLocalRef(_) => return Err(Unpromotable),
+
+ // ptr-to-int casts are not possible in consts and thus not promotable
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => return Err(Unpromotable),
+
+ // all other casts including int-to-ptr casts are fine, they just use the integer value
+ // at pointer type.
+ Rvalue::Cast(_, operand, _) => {
+ self.validate_operand(operand)?;
+ }
+
+ Rvalue::NullaryOp(op, _) => match op {
+ NullOp::SizeOf => {}
+ NullOp::AlignOf => {}
+ },
+
+ Rvalue::ShallowInitBox(_, _) => return Err(Unpromotable),
+
+ Rvalue::UnaryOp(op, operand) => {
+ match op {
+ // These operations can never fail.
+ UnOp::Neg | UnOp::Not => {}
+ }
+
+ self.validate_operand(operand)?;
+ }
+
+ Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+ let op = *op;
+ let lhs_ty = lhs.ty(self.body, self.tcx);
+
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs_ty.kind() {
+ // Raw and fn pointer operations are not allowed inside consts and thus not promotable.
+ assert!(matches!(
+ op,
+ BinOp::Eq
+ | BinOp::Ne
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Offset
+ ));
+ return Err(Unpromotable);
+ }
+
+ match op {
+ BinOp::Div | BinOp::Rem => {
+ if lhs_ty.is_integral() {
+ // Integer division: the RHS must be a non-zero const.
+ let const_val = match rhs {
+ Operand::Constant(c) => {
+ c.literal.try_eval_bits(self.tcx, self.param_env, lhs_ty)
+ }
+ _ => None,
+ };
+ match const_val {
+ Some(x) if x != 0 => {} // okay
+ _ => return Err(Unpromotable), // value not known or 0 -- not okay
+ }
+ }
+ }
+ // The remaining operations can never fail.
+ BinOp::Eq
+ | BinOp::Ne
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Offset
+ | BinOp::Add
+ | BinOp::Sub
+ | BinOp::Mul
+ | BinOp::BitXor
+ | BinOp::BitAnd
+ | BinOp::BitOr
+ | BinOp::Shl
+ | BinOp::Shr => {}
+ }
+
+ self.validate_operand(lhs)?;
+ self.validate_operand(rhs)?;
+ }
+
+ Rvalue::AddressOf(_, place) => {
+ // We accept `&raw *`, i.e., raw reborrows -- creating a raw pointer is
+ // no problem, only using it is.
+ if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection()
+ {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ return self.validate_place(place_base);
+ }
+ }
+ return Err(Unpromotable);
+ }
+
+ Rvalue::Ref(_, kind, place) => {
+ // Special-case reborrows to be more like a copy of the reference.
+ let mut place_simplified = place.as_ref();
+ if let Some((place_base, ProjectionElem::Deref)) =
+ place_simplified.last_projection()
+ {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ place_simplified = place_base;
+ }
+ }
+
+ self.validate_place(place_simplified)?;
+
+ // Check that the reference is fine (using the original place!).
+ // (Needs to come after `validate_place` to avoid ICEs.)
+ self.validate_ref(*kind, place)?;
+ }
+
+ Rvalue::Aggregate(_, operands) => {
+ for o in operands {
+ self.validate_operand(o)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn validate_call(
+ &mut self,
+ callee: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ ) -> Result<(), Unpromotable> {
+ let fn_ty = callee.ty(self.body, self.tcx);
+
+ // Inside const/static items, we promote all (eligible) function calls.
+ // Everywhere else, we require `#[rustc_promotable]` on the callee.
+ let promote_all_const_fn = matches!(
+ self.const_kind,
+ Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+ );
+ if !promote_all_const_fn {
+ if let ty::FnDef(def_id, _) = *fn_ty.kind() {
+ // Never promote runtime `const fn` calls of
+ // functions without `#[rustc_promotable]`.
+ if !self.tcx.is_promotable_const_fn(def_id) {
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ let is_const_fn = match *fn_ty.kind() {
+ ty::FnDef(def_id, _) => self.tcx.is_const_fn_raw(def_id),
+ _ => false,
+ };
+ if !is_const_fn {
+ return Err(Unpromotable);
+ }
+
+ self.validate_operand(callee)?;
+ for arg in args {
+ self.validate_operand(arg)?;
+ }
+
+ Ok(())
+ }
+}
+
+// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
+pub fn validate_candidates(
+ ccx: &ConstCx<'_, '_>,
+ temps: &mut IndexVec<Local, TempState>,
+ candidates: &[Candidate],
+) -> Vec<Candidate> {
+ let mut validator = Validator { ccx, temps };
+
+ candidates
+ .iter()
+ .copied()
+ .filter(|&candidate| validator.validate_candidate(candidate).is_ok())
+ .collect()
+}
+
+struct Promoter<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ source: &'a mut Body<'tcx>,
+ promoted: Body<'tcx>,
+ temps: &'a mut IndexVec<Local, TempState>,
+ extra_statements: &'a mut Vec<(Location, Statement<'tcx>)>,
+
+ /// If true, all nested temps are also kept in the
+ /// source MIR, not moved to the promoted MIR.
+ keep_original: bool,
+}
+
+impl<'a, 'tcx> Promoter<'a, 'tcx> {
+ fn new_block(&mut self) -> BasicBlock {
+ let span = self.promoted.span;
+ self.promoted.basic_blocks_mut().push(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(span),
+ kind: TerminatorKind::Return,
+ }),
+ is_cleanup: false,
+ })
+ }
+
+ fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
+ let last = self.promoted.basic_blocks().last().unwrap();
+ let data = &mut self.promoted[last];
+ data.statements.push(Statement {
+ source_info: SourceInfo::outermost(span),
+ kind: StatementKind::Assign(Box::new((Place::from(dest), rvalue))),
+ });
+ }
+
+ fn is_temp_kind(&self, local: Local) -> bool {
+ self.source.local_kind(local) == LocalKind::Temp
+ }
+
+ /// Copies the initialization of this temp to the
+ /// promoted MIR, recursing through temps.
+ fn promote_temp(&mut self, temp: Local) -> Local {
+ let old_keep_original = self.keep_original;
+ let loc = match self.temps[temp] {
+ TempState::Defined { location, uses, .. } if uses > 0 => {
+ if uses > 1 {
+ self.keep_original = true;
+ }
+ location
+ }
+ state => {
+ span_bug!(self.promoted.span, "{:?} not promotable: {:?}", temp, state);
+ }
+ };
+ if !self.keep_original {
+ self.temps[temp] = TempState::PromotedOut;
+ }
+
+ let num_stmts = self.source[loc.block].statements.len();
+ let new_temp = self.promoted.local_decls.push(LocalDecl::new(
+ self.source.local_decls[temp].ty,
+ self.source.local_decls[temp].source_info.span,
+ ));
+
+ debug!("promote({:?} @ {:?}/{:?}, {:?})", temp, loc, num_stmts, self.keep_original);
+
+ // First, take the Rvalue or Call out of the source MIR,
+ // or duplicate it, depending on keep_original.
+ if loc.statement_index < num_stmts {
+ let (mut rvalue, source_info) = {
+ let statement = &mut self.source[loc.block].statements[loc.statement_index];
+ let StatementKind::Assign(box (_, ref mut rhs)) = statement.kind else {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ };
+
+ (
+ if self.keep_original {
+ rhs.clone()
+ } else {
+ let unit = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: statement.source_info.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(self.tcx.types.unit),
+ })));
+ mem::replace(rhs, unit)
+ },
+ statement.source_info,
+ )
+ };
+
+ self.visit_rvalue(&mut rvalue, loc);
+ self.assign(new_temp, rvalue, source_info.span);
+ } else {
+ let terminator = if self.keep_original {
+ self.source[loc.block].terminator().clone()
+ } else {
+ let terminator = self.source[loc.block].terminator_mut();
+ let target = match terminator.kind {
+ TerminatorKind::Call { target: Some(target), .. } => target,
+ ref kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ };
+ Terminator {
+ source_info: terminator.source_info,
+ kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto { target }),
+ }
+ };
+
+ match terminator.kind {
+ TerminatorKind::Call { mut func, mut args, from_hir_call, fn_span, .. } => {
+ self.visit_operand(&mut func, loc);
+ for arg in &mut args {
+ self.visit_operand(arg, loc);
+ }
+
+ let last = self.promoted.basic_blocks().last().unwrap();
+ let new_target = self.new_block();
+
+ *self.promoted[last].terminator_mut() = Terminator {
+ kind: TerminatorKind::Call {
+ func,
+ args,
+ cleanup: None,
+ destination: Place::from(new_temp),
+ target: Some(new_target),
+ from_hir_call,
+ fn_span,
+ },
+ source_info: SourceInfo::outermost(terminator.source_info.span),
+ ..terminator
+ };
+ }
+ ref kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ };
+ };
+
+ self.keep_original = old_keep_original;
+ new_temp
+ }
+
+ fn promote_candidate(mut self, candidate: Candidate, next_promoted_id: usize) -> Body<'tcx> {
+ let def = self.source.source.with_opt_param();
+ let mut rvalue = {
+ let promoted = &mut self.promoted;
+ let promoted_id = Promoted::new(next_promoted_id);
+ let tcx = self.tcx;
+ let mut promoted_operand = |ty, span| {
+ promoted.span = span;
+ promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
+ let _const = tcx.mk_const(ty::ConstS {
+ ty,
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def,
+ substs: InternalSubsts::for_item(tcx, def.did, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ promoted: Some(promoted_id),
+ }),
+ });
+
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::from_const(_const, tcx),
+ }))
+ };
+ let blocks = self.source.basic_blocks.as_mut();
+ let local_decls = &mut self.source.local_decls;
+ let loc = candidate.location;
+ let statement = &mut blocks[loc.block].statements[loc.statement_index];
+ match statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Ref(ref mut region, borrow_kind, ref mut place),
+ )) => {
+ // Use the underlying local for this (necessarily interior) borrow.
+ let ty = local_decls[place.local].ty;
+ let span = statement.source_info.span;
+
+ let ref_ty = tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
+ );
+
+ *region = tcx.lifetimes.re_erased;
+
+ let mut projection = vec![PlaceElem::Deref];
+ projection.extend(place.projection);
+ place.projection = tcx.intern_place_elems(&projection);
+
+ // Create a temp to hold the promoted reference.
+ // This is because `*r` requires `r` to be a local,
+ // otherwise we would use the `promoted` directly.
+ let mut promoted_ref = LocalDecl::new(ref_ty, span);
+ promoted_ref.source_info = statement.source_info;
+ let promoted_ref = local_decls.push(promoted_ref);
+ assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
+
+ let promoted_ref_statement = Statement {
+ source_info: statement.source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(promoted_ref),
+ Rvalue::Use(promoted_operand(ref_ty, span)),
+ ))),
+ };
+ self.extra_statements.push((loc, promoted_ref_statement));
+
+ Rvalue::Ref(
+ tcx.lifetimes.re_erased,
+ borrow_kind,
+ Place {
+ local: mem::replace(&mut place.local, promoted_ref),
+ projection: List::empty(),
+ },
+ )
+ }
+ _ => bug!(),
+ }
+ };
+
+ assert_eq!(self.new_block(), START_BLOCK);
+ self.visit_rvalue(
+ &mut rvalue,
+ Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+ );
+
+ let span = self.promoted.span;
+ self.assign(RETURN_PLACE, rvalue, span);
+ self.promoted
+ }
+}
+
+/// Replaces all temporaries with their promoted counterparts.
+impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ if self.is_temp_kind(*local) {
+ *local = self.promote_temp(*local);
+ }
+ }
+}
+
+pub fn promote_candidates<'tcx>(
+ body: &mut Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ mut temps: IndexVec<Local, TempState>,
+ candidates: Vec<Candidate>,
+) -> IndexVec<Promoted, Body<'tcx>> {
+ // Visit candidates in reverse, in case they're nested.
+ debug!("promote_candidates({:?})", candidates);
+
+ let mut promotions = IndexVec::new();
+
+ let mut extra_statements = vec![];
+ for candidate in candidates.into_iter().rev() {
+ let Location { block, statement_index } = candidate.location;
+ if let StatementKind::Assign(box (place, _)) = &body[block].statements[statement_index].kind
+ {
+ if let Some(local) = place.as_local() {
+ if temps[local] == TempState::PromotedOut {
+ // Already promoted.
+ continue;
+ }
+ }
+ }
+
+ // Declare return place local so that `mir::Body::new` doesn't complain.
+ let initial_locals = iter::once(LocalDecl::new(tcx.types.never, body.span)).collect();
+
+ let mut scope = body.source_scopes[body.source_info(candidate.location).scope].clone();
+ scope.parent_scope = None;
+
+ let promoted = Body::new(
+ body.source, // `promoted` gets filled in below
+ IndexVec::new(),
+ IndexVec::from_elem_n(scope, 1),
+ initial_locals,
+ IndexVec::new(),
+ 0,
+ vec![],
+ body.span,
+ body.generator_kind(),
+ body.tainted_by_errors,
+ );
+
+ let promoter = Promoter {
+ promoted,
+ tcx,
+ source: body,
+ temps: &mut temps,
+ extra_statements: &mut extra_statements,
+ keep_original: false,
+ };
+
+ let mut promoted = promoter.promote_candidate(candidate, promotions.len());
+ promoted.source.promoted = Some(promotions.next_index());
+ promotions.push(promoted);
+ }
+
+ // Insert each of `extra_statements` before its indicated location, which
+ // has to be done in reverse location order, to not invalidate the rest.
+ extra_statements.sort_by_key(|&(loc, _)| cmp::Reverse(loc));
+ for (loc, statement) in extra_statements {
+ body[loc.block].statements.insert(loc.statement_index, statement);
+ }
+
+ // Eliminate assignments to, and drops of promoted temps.
+ let promoted = |index: Local| temps[index] == TempState::PromotedOut;
+ for block in body.basic_blocks_mut() {
+ block.statements.retain(|statement| match &statement.kind {
+ StatementKind::Assign(box (place, _)) => {
+ if let Some(index) = place.as_local() {
+ !promoted(index)
+ } else {
+ true
+ }
+ }
+ StatementKind::StorageLive(index) | StatementKind::StorageDead(index) => {
+ !promoted(*index)
+ }
+ _ => true,
+ });
+ let terminator = block.terminator_mut();
+ if let TerminatorKind::Drop { place, target, .. } = &terminator.kind {
+ if let Some(index) = place.as_local() {
+ if promoted(index) {
+ terminator.kind = TerminatorKind::Goto { target: *target };
+ }
+ }
+ }
+ }
+
+ promotions
+}
+
+/// This function returns `true` if the function being called in the array
+/// repeat expression is a `const` function.
+pub fn is_const_fn_in_array_repeat_expression<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ place: &Place<'tcx>,
+ body: &Body<'tcx>,
+) -> bool {
+ match place.as_local() {
+ // rule out cases such as: `let my_var = some_fn(); [my_var; N]`
+ Some(local) if body.local_decls[local].is_user_variable() => return false,
+ None => return false,
+ _ => {}
+ }
+
+ for block in body.basic_blocks() {
+ if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
+ &block.terminator
+ {
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, _) = *literal.ty().kind() {
+ if destination == place {
+ if ccx.tcx.is_const_fn(def_id) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ false
+}
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
new file mode 100644
index 000000000..15e820f2d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -0,0 +1,913 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{
+ traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, Local, Location,
+ MirPass, MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope,
+ Statement, StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK,
+};
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_mir_dataflow::impls::MaybeStorageLive;
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_mir_dataflow::{Analysis, ResultsCursor};
+use rustc_target::abi::{Size, VariantIdx};
+
+#[derive(Copy, Clone, Debug)]
+enum EdgeKind {
+ Unwind,
+ Normal,
+}
+
+pub struct Validator {
+ /// Describes at which point in the pipeline this validation is happening.
+ pub when: String,
+ /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+ /// element, this validator will now emit errors if that specific element is encountered.
+ /// Note that phases that change the dialect cause all *following* phases to check the
+ /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+ /// itself.
+ pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
+ // terribly important that they pass the validator. However, I think other passes might
+ // still see them, in which case they might be surprised. It would probably be better if we
+ // didn't put this through the MIR pipeline at all.
+ if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
+ return;
+ }
+ let def_id = body.source.def_id();
+ let param_env = tcx.param_env(def_id);
+ let mir_phase = self.mir_phase;
+
+ let always_live_locals = always_storage_live_locals(body);
+ let storage_liveness = MaybeStorageLive::new(always_live_locals)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ TypeChecker {
+ when: &self.when,
+ body,
+ tcx,
+ param_env,
+ mir_phase,
+ reachable_blocks: traversal::reachable_as_bitset(body),
+ storage_liveness,
+ place_cache: Vec::new(),
+ value_cache: Vec::new(),
+ }
+ .visit_body(body);
+ }
+}
+
+/// Returns whether the two types are equal up to lifetimes.
+/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
+/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
+///
+/// The point of this function is to approximate "equal up to subtyping". However,
+/// the approximation is incorrect as variance is ignored.
+pub fn equal_up_to_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ src: Ty<'tcx>,
+ dest: Ty<'tcx>,
+) -> bool {
+ // Fast path.
+ if src == dest {
+ return true;
+ }
+
+ // Normalize lifetimes away on both sides, then compare.
+ let normalize = |ty: Ty<'tcx>| {
+ tcx.normalize_erasing_regions(
+ param_env,
+ ty.fold_with(&mut BottomUpFolder {
+ tcx,
+ // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
+ // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
+ // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
+ // since one may have an `impl SomeTrait for fn(&32)` and
+ // `impl SomeTrait for fn(&'static u32)` at the same time which
+ // specify distinct values for Assoc. (See also #56105)
+ lt_op: |_| tcx.lifetimes.re_erased,
+ // Leave consts and types unchanged.
+ ct_op: |ct| ct,
+ ty_op: |ty| ty,
+ }),
+ )
+ };
+ tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
+}
+
+struct TypeChecker<'a, 'tcx> {
+ when: &'a str,
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ mir_phase: MirPhase,
+ reachable_blocks: BitSet<BasicBlock>,
+ storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
+ place_cache: Vec<PlaceRef<'tcx>>,
+ value_cache: Vec<u128>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ fn fail(&self, location: Location, msg: impl AsRef<str>) {
+ let span = self.body.source_info(location).span;
+ // We use `delay_span_bug` as we might see broken MIR when other errors have already
+ // occurred.
+ self.tcx.sess.diagnostic().delay_span_bug(
+ span,
+ &format!(
+ "broken MIR in {:?} ({}) at {:?}:\n{}",
+ self.body.source.instance,
+ self.when,
+ location,
+ msg.as_ref()
+ ),
+ );
+ }
+
+ fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+ if bb == START_BLOCK {
+ self.fail(location, "start block must not have predecessors")
+ }
+ if let Some(bb) = self.body.basic_blocks().get(bb) {
+ let src = self.body.basic_blocks().get(location.block).unwrap();
+ match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+ // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+ (false, false, EdgeKind::Normal)
+ // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+ | (false, true, EdgeKind::Unwind)
+ // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+ | (true, true, EdgeKind::Normal) => {}
+ // All other jumps are invalid
+ _ => {
+ self.fail(
+ location,
+ format!(
+ "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+ edge_kind,
+ bb,
+ src.is_cleanup,
+ bb.is_cleanup,
+ )
+ )
+ }
+ }
+ } else {
+ self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+ }
+ }
+
+ /// Check if src can be assigned into dest.
+ /// This is not precise, it will accept some incorrect assignments.
+ fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+ // Fast path before we normalize.
+ if src == dest {
+ // Equal types, all is good.
+ return true;
+ }
+ // Normalization reveals opaque types, but we may be validating MIR while computing
+ // said opaque types, causing cycles.
+ if (src, dest).has_opaque_types() {
+ return true;
+ }
+ // Normalize projections and things like that.
+ let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+ let src = self.tcx.normalize_erasing_regions(param_env, src);
+ let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+
+ // Type-changing assignments can happen when subtyping is used. While
+ // all normal lifetimes are erased, higher-ranked types with their
+ // late-bound lifetimes are still around and can lead to type
+ // differences. So we compare ignoring lifetimes.
+ equal_up_to_regions(self.tcx, param_env, src, dest)
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
+ if self.body.local_decls.get(local).is_none() {
+ self.fail(
+ location,
+ format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+ );
+ }
+
+ if self.reachable_blocks.contains(location.block) && context.is_use() {
+ // We check that the local is live whenever it is used. Technically, violating this
+ // restriction is only UB and not actually indicative of not well-formed MIR. This means
+ // that an optimization which turns MIR that already has UB into MIR that fails this
+ // check is not necessarily wrong. However, we have no such optimizations at the moment,
+ // and so we include this check anyway to help us catch bugs. If you happen to write an
+ // optimization that might cause this to incorrectly fire, feel free to remove this
+ // check.
+ self.storage_liveness.seek_after_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if !locals_with_storage.contains(local) {
+ self.fail(location, format!("use of local {:?}, which has no storage here", local));
+ }
+ }
+ }
+
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+ // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+ if self.tcx.sess.opts.unstable_opts.validate_mir && self.mir_phase < MirPhase::DropsLowered
+ {
+ // `Operand::Copy` is only supposed to be used with `Copy` types.
+ if let Operand::Copy(place) = operand {
+ let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+ let span = self.body.source_info(location).span;
+
+ if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+ self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+ }
+ }
+ }
+
+ self.super_operand(operand, location);
+ }
+
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ match elem {
+ ProjectionElem::Index(index) => {
+ let index_ty = self.body.local_decls[index].ty;
+ if index_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+ }
+ }
+ ProjectionElem::Deref if self.mir_phase >= MirPhase::GeneratorsLowered => {
+ let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty;
+
+ if base_ty.is_box() {
+ self.fail(
+ location,
+ format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
+ )
+ }
+ }
+ ProjectionElem::Field(f, ty) => {
+ let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+ let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
+ let fail_out_of_bounds = |this: &Self, location| {
+ this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
+ };
+ let check_equal = |this: &Self, location, f_ty| {
+ if !this.mir_assign_valid_types(ty, f_ty) {
+ this.fail(
+ location,
+ format!(
+ "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is {:?}",
+ parent, f, ty, f_ty
+ )
+ )
+ }
+ };
+
+ let kind = match parent_ty.ty.kind() {
+ &ty::Opaque(def_id, substs) => {
+ self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+ }
+ kind => kind,
+ };
+
+ match kind {
+ ty::Tuple(fields) => {
+ let Some(f_ty) = fields.get(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, *f_ty);
+ }
+ ty::Adt(adt_def, substs) => {
+ let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
+ let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, field.ty(self.tcx, substs));
+ }
+ ty::Closure(_, substs) => {
+ let substs = substs.as_closure();
+ let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, f_ty);
+ }
+ &ty::Generator(def_id, substs, _) => {
+ let f_ty = if let Some(var) = parent_ty.variant_index {
+ let gen_body = if def_id == self.body.source.def_id() {
+ self.body
+ } else {
+ self.tcx.optimized_mir(def_id)
+ };
+
+ let Some(layout) = gen_body.generator_layout() else {
+ self.fail(location, format!("No generator layout for {:?}", parent_ty));
+ return;
+ };
+
+ let Some(&local) = layout.variant_fields[var].get(f) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+
+ let Some(&f_ty) = layout.field_tys.get(local) else {
+ self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
+ return;
+ };
+
+ f_ty
+ } else {
+ let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+
+ f_ty
+ };
+
+ check_equal(self, location, f_ty);
+ }
+ _ => {
+ self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
+ }
+ }
+ }
+ _ => {}
+ }
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
+ // Set off any `bug!`s in the type computation code
+ let _ = place.ty(&self.body.local_decls, self.tcx);
+
+ if self.mir_phase >= MirPhase::Derefered
+ && place.projection.len() > 1
+ && cntxt != PlaceContext::NonUse(VarDebugInfo)
+ && place.projection[1..].contains(&ProjectionElem::Deref)
+ {
+ self.fail(location, format!("{:?}, has deref at the wrong place", place));
+ }
+
+ self.super_place(place, cntxt, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ macro_rules! check_kinds {
+ ($t:expr, $text:literal, $($patterns:tt)*) => {
+ if !matches!(($t).kind(), $($patterns)*) {
+ self.fail(location, format!($text, $t));
+ }
+ };
+ }
+ match rvalue {
+ Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
+ Rvalue::Aggregate(agg_kind, _) => {
+ let disallowed = match **agg_kind {
+ AggregateKind::Array(..) => false,
+ AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered,
+ _ => self.mir_phase >= MirPhase::Deaggregated,
+ };
+ if disallowed {
+ self.fail(
+ location,
+ format!("{:?} have been lowered to field assignments", rvalue),
+ )
+ }
+ }
+ Rvalue::Ref(_, BorrowKind::Shallow, _) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
+ );
+ }
+ }
+ Rvalue::Ref(..) => {}
+ Rvalue::Len(p) => {
+ let pty = p.ty(&self.body.local_decls, self.tcx).ty;
+ check_kinds!(
+ pty,
+ "Cannot compute length of non-array type {:?}",
+ ty::Array(..) | ty::Slice(..)
+ );
+ }
+ Rvalue::BinaryOp(op, vals) => {
+ use BinOp::*;
+ let a = vals.0.ty(&self.body.local_decls, self.tcx);
+ let b = vals.1.ty(&self.body.local_decls, self.tcx);
+ match op {
+ Offset => {
+ check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
+ if b != self.tcx.types.isize && b != self.tcx.types.usize {
+ self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
+ }
+ }
+ Eq | Lt | Le | Ne | Ge | Gt => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot compare type {:?}",
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::RawPtr(..)
+ | ty::FnPtr(..)
+ )
+ }
+ // The function pointer types can have lifetimes
+ if !self.mir_assign_valid_types(a, b) {
+ self.fail(
+ location,
+ format!("Cannot compare unequal types {:?} and {:?}", a, b),
+ );
+ }
+ }
+ Shl | Shr => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot shift non-integer type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ }
+ BitAnd | BitOr | BitXor => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform bitwise op on type {:?}",
+ ty::Uint(..) | ty::Int(..) | ty::Bool
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform bitwise op on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ Add | Sub | Mul | Div | Rem => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform arithmetic on type {:?}",
+ ty::Uint(..) | ty::Int(..) | ty::Float(..)
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform arithmetic on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ }
+ }
+ Rvalue::CheckedBinaryOp(op, vals) => {
+ use BinOp::*;
+ let a = vals.0.ty(&self.body.local_decls, self.tcx);
+ let b = vals.1.ty(&self.body.local_decls, self.tcx);
+ match op {
+ Add | Sub | Mul => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform checked arithmetic on type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ Shl | Shr => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform checked shift on non-integer type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ }
+ _ => self.fail(location, format!("There is no checked version of {:?}", op)),
+ }
+ }
+ Rvalue::UnaryOp(op, operand) => {
+ let a = operand.ty(&self.body.local_decls, self.tcx);
+ match op {
+ UnOp::Neg => {
+ check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
+ }
+ UnOp::Not => {
+ check_kinds!(
+ a,
+ "Cannot binary not type {:?}",
+ ty::Int(..) | ty::Uint(..) | ty::Bool
+ );
+ }
+ }
+ }
+ Rvalue::ShallowInitBox(operand, _) => {
+ let a = operand.ty(&self.body.local_decls, self.tcx);
+ check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
+ }
+ Rvalue::Cast(kind, operand, target_type) => {
+ match kind {
+ CastKind::Misc => {
+ let op_ty = operand.ty(self.body, self.tcx);
+ if op_ty.is_enum() {
+ self.fail(
+ location,
+ format!(
+ "enum -> int casts should go through `Rvalue::Discriminant`: {operand:?}:{op_ty} as {target_type}",
+ ),
+ );
+ }
+ }
+ // Nothing to check here
+ CastKind::PointerFromExposedAddress
+ | CastKind::PointerExposeAddress
+ | CastKind::Pointer(_) => {}
+ }
+ }
+ Rvalue::Repeat(_, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::Discriminant(_) => {}
+ }
+ self.super_rvalue(rvalue, location);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match &statement.kind {
+ StatementKind::Assign(box (dest, rvalue)) => {
+ // LHS and RHS of the assignment must have the same type.
+ let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+ let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+ if !self.mir_assign_valid_types(right_ty, left_ty) {
+ self.fail(
+ location,
+ format!(
+ "encountered `{:?}` with incompatible types:\n\
+ left-hand side has type: {}\n\
+ right-hand side has type: {}",
+ statement.kind, left_ty, right_ty,
+ ),
+ );
+ }
+ if let Rvalue::CopyForDeref(place) = rvalue {
+ if !place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_some()
+ {
+ self.fail(
+ location,
+ "`CopyForDeref` should only be used for dereferenceable types",
+ )
+ }
+ }
+ // FIXME(JakobDegen): Check this for all rvalues, not just this one.
+ if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
+ // The sides of an assignment must not alias. Currently this just checks whether
+ // the places are identical.
+ if dest == src {
+ self.fail(
+ location,
+ "encountered `Assign` statement with overlapping memory",
+ );
+ }
+ }
+ }
+ StatementKind::AscribeUserType(..) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`AscribeUserType` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::FakeRead(..) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FakeRead` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let src_ty = src.ty(&self.body.local_decls, self.tcx);
+ let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+ src_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+ );
+ return;
+ };
+ let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+ let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+ dst_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+ );
+ return;
+ };
+ // since CopyNonOverlapping is parametrized by 1 type,
+ // we only need to check that they are equal and not keep an extra parameter.
+ if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
+ self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+ }
+
+ let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+ if op_cnt_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+ }
+ }
+ StatementKind::SetDiscriminant { place, .. } => {
+ if self.mir_phase < MirPhase::Deaggregated {
+ self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+ }
+ let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
+ if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
+ self.fail(
+ location,
+ format!(
+ "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
+ pty
+ ),
+ );
+ }
+ }
+ StatementKind::Deinit(..) => {
+ if self.mir_phase < MirPhase::Deaggregated {
+ self.fail(location, "`Deinit`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Retag(_, _) => {
+ // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+ // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+ // seem to fail to set their `MirPhase` correctly.
+ }
+ StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ match &terminator.kind {
+ TerminatorKind::Goto { target } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
+ let ty = discr.ty(&self.body.local_decls, self.tcx);
+ if ty != *switch_ty {
+ self.fail(
+ location,
+ format!(
+ "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
+ ty, switch_ty,
+ ),
+ );
+ }
+
+ let target_width = self.tcx.sess.target.pointer_width;
+
+ let size = Size::from_bits(match switch_ty.kind() {
+ ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+ ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+ ty::Char => 32,
+ ty::Bool => 1,
+ other => bug!("unhandled type: {:?}", other),
+ });
+
+ for (value, target) in targets.iter() {
+ if Scalar::<()>::try_from_uint(value, size).is_none() {
+ self.fail(
+ location,
+ format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+ )
+ }
+
+ self.check_edge(location, target, EdgeKind::Normal);
+ }
+ self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+ self.value_cache.clear();
+ self.value_cache.extend(targets.iter().map(|(value, _)| value));
+ let all_len = self.value_cache.len();
+ self.value_cache.sort_unstable();
+ self.value_cache.dedup();
+ let has_duplicates = all_len != self.value_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "duplicated values in `SwitchInt` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Drop { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::DropAndReplace { target, unwind, .. } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`DropAndReplace` should have been removed during drop elaboration",
+ );
+ }
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::Call { func, args, destination, target, cleanup, .. } => {
+ let func_ty = func.ty(&self.body.local_decls, self.tcx);
+ match func_ty.kind() {
+ ty::FnPtr(..) | ty::FnDef(..) => {}
+ _ => self.fail(
+ location,
+ format!("encountered non-callable type {} in `Call` terminator", func_ty),
+ ),
+ }
+ if let Some(target) = target {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+
+ // The call destination place and Operand::Move place used as an argument might be
+ // passed by a reference to the callee. Consequently they must be non-overlapping.
+ // Currently this simply checks for duplicate places.
+ self.place_cache.clear();
+ self.place_cache.push(destination.as_ref());
+ for arg in args {
+ if let Operand::Move(place) = arg {
+ self.place_cache.push(place.as_ref());
+ }
+ }
+ let all_len = self.place_cache.len();
+ let mut dedup = FxHashSet::default();
+ self.place_cache.retain(|p| dedup.insert(*p));
+ let has_duplicates = all_len != self.place_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "encountered overlapping memory in `Call` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Assert { cond, target, cleanup, .. } => {
+ let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+ if cond_ty != self.tcx.types.bool {
+ self.fail(
+ location,
+ format!(
+ "encountered non-boolean condition of type {} in `Assert` terminator",
+ cond_ty
+ ),
+ );
+ }
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::Yield { resume, drop, .. } => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`Yield` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(location, "`Yield` should have been replaced by generator lowering");
+ }
+ self.check_edge(location, *resume, EdgeKind::Normal);
+ if let Some(drop) = drop {
+ self.check_edge(location, *drop, EdgeKind::Normal);
+ }
+ }
+ TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseEdge` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+ }
+ TerminatorKind::FalseUnwind { real_target, unwind } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseUnwind` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::InlineAsm { destination, cleanup, .. } => {
+ if let Some(destination) = destination {
+ self.check_edge(location, *destination, EdgeKind::Normal);
+ }
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::GeneratorDrop => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(
+ location,
+ "`GeneratorDrop` should have been replaced by generator lowering",
+ );
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ let bb = location.block;
+ if !self.body.basic_blocks()[bb].is_cleanup {
+ self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
+ }
+ }
+ TerminatorKind::Return => {
+ let bb = location.block;
+ if self.body.basic_blocks()[bb].is_cleanup {
+ self.fail(location, "Cannot `Return` from cleanup basic block")
+ }
+ }
+ TerminatorKind::Unreachable => {}
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_source_scope(&mut self, scope: SourceScope) {
+ if self.body.source_scopes.get(scope).is_none() {
+ self.tcx.sess.diagnostic().delay_span_bug(
+ self.body.span,
+ &format!(
+ "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+ self.body.source.instance, self.when, scope,
+ ),
+ );
+ }
+ }
+}