summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/transform
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src/transform')
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs14
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs4
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs73
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs4
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs42
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs65
6 files changed, 106 insertions, 96 deletions
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 79f1737e3..aa24d9053 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -10,7 +10,7 @@ use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceC
use rustc_middle::mir::*;
use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
-use rustc_middle::ty::{Binder, TraitRef, TypeVisitable};
+use rustc_middle::ty::{Binder, TraitRef, TypeVisitableExt};
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
@@ -332,7 +332,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
fn check_static(&mut self, def_id: DefId, span: Span) {
if self.tcx.is_thread_local_static(def_id) {
- self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+ self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef`");
}
self.check_op_spanned(ops::StaticAccess, span)
}
@@ -453,7 +453,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
Rvalue::Aggregate(kind, ..) => {
if let AggregateKind::Generator(def_id, ..) = kind.as_ref()
- && let Some(generator_kind @ hir::GeneratorKind::Async(..)) = self.tcx.generator_kind(def_id.to_def_id())
+ && let Some(generator_kind @ hir::GeneratorKind::Async(..)) = self.tcx.generator_kind(def_id)
{
self.check_op(ops::Generator(generator_kind));
}
@@ -473,7 +473,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// that this is merely a ZST and it is already eligible for promotion.
// This may require an RFC?
/*
- ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+ ty::Array(_, len) if len.try_eval_target_usize(cx.tcx, cx.param_env) == Some(0)
=> true,
*/
_ => false,
@@ -693,6 +693,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
| StatementKind::Intrinsic(..)
+ | StatementKind::ConstEvalCounter
| StatementKind::Nop => {}
}
}
@@ -754,12 +755,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let ocx = ObligationCtxt::new(&infcx);
let predicates = tcx.predicates_of(callee).instantiate(tcx, substs);
- let hir_id = tcx
- .hir()
- .local_def_id_to_hir_id(self.body.source.def_id().expect_local());
let cause = ObligationCause::new(
terminator.source_info.span,
- hir_id,
+ self.body.source.def_id().expect_local(),
ObligationCauseCode::ItemObligation(callee),
);
let normalized_predicates = ocx.normalize(&cause, param_env, predicates);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
index 54868e418..0e4501922 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -68,11 +68,11 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
pub fn fn_sig(&self) -> PolyFnSig<'tcx> {
let did = self.def_id().to_def_id();
if self.tcx.is_closure(did) {
- let ty = self.tcx.type_of(did);
+ let ty = self.tcx.type_of(did).subst_identity();
let ty::Closure(_, substs) = ty.kind() else { bug!("type_of closure not ty::Closure") };
substs.as_closure().sig()
} else {
- self.tcx.fn_sig(did)
+ self.tcx.fn_sig(did).subst_identity()
}
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 0cb5d2ff8..3e416b89c 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -22,13 +22,7 @@ use rustc_span::{BytePos, Pos, Span, Symbol};
use rustc_trait_selection::traits::SelectionContext;
use super::ConstCx;
-use crate::errors::{
- InteriorMutabilityBorrow, InteriorMutableDataRefer, MutDerefErr, NonConstFmtMacroCall,
- NonConstFnCall, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
- TransientMutBorrowErr, TransientMutBorrowErrRaw, UnallowedFnPointerCall,
- UnallowedHeapAllocations, UnallowedInlineAsm, UnallowedMutableRefs, UnallowedMutableRefsRaw,
- UnallowedOpInConstContext, UnstableConstFn,
-};
+use crate::errors;
use crate::util::{call_kind, CallDesugaringKind, CallKind};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
@@ -99,7 +93,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(UnallowedFnPointerCall { span, kind: ccx.const_kind() })
+ ccx.tcx.sess.create_err(errors::UnallowedFnPointerCall { span, kind: ccx.const_kind() })
}
}
@@ -142,6 +136,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
&param_ty.name.as_str(),
&constraint,
None,
+ None,
);
}
}
@@ -303,10 +298,11 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
diag_trait(&mut err, self_ty, tcx.require_lang_item(LangItem::Deref, Some(span)));
err
}
- _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
- ccx.tcx.sess.create_err(NonConstFmtMacroCall { span, kind: ccx.const_kind() })
- }
- _ => ccx.tcx.sess.create_err(NonConstFnCall {
+ _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => ccx
+ .tcx
+ .sess
+ .create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() }),
+ _ => ccx.tcx.sess.create_err(errors::NonConstFnCall {
span,
def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs),
kind: ccx.const_kind(),
@@ -351,7 +347,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
let mut err = ccx
.tcx
.sess
- .create_err(UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
+ .create_err(errors::UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
if ccx.is_const_stable_const_fn() {
err.help("const-stable functions can only call other const-stable functions");
@@ -387,11 +383,11 @@ impl<'tcx> NonConstOp<'tcx> for Generator {
let msg = format!("{}s are not allowed in {}s", self.0.descr(), ccx.const_kind());
if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
ccx.tcx.sess.create_feature_err(
- UnallowedOpInConstContext { span, msg },
+ errors::UnallowedOpInConstContext { span, msg },
sym::const_async_blocks,
)
} else {
- ccx.tcx.sess.create_err(UnallowedOpInConstContext { span, msg })
+ ccx.tcx.sess.create_err(errors::UnallowedOpInConstContext { span, msg })
}
}
}
@@ -404,7 +400,7 @@ impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(UnallowedHeapAllocations {
+ ccx.tcx.sess.create_err(errors::UnallowedHeapAllocations {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0010)).then_some(()),
@@ -420,7 +416,7 @@ impl<'tcx> NonConstOp<'tcx> for InlineAsm {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(UnallowedInlineAsm { span, kind: ccx.const_kind() })
+ ccx.tcx.sess.create_err(errors::UnallowedInlineAsm { span, kind: ccx.const_kind() })
}
}
@@ -471,7 +467,9 @@ impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_feature_err(InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
+ ccx.tcx
+ .sess
+ .create_feature_err(errors::InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
}
}
@@ -488,14 +486,14 @@ impl<'tcx> NonConstOp<'tcx> for CellBorrow {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
// FIXME: Maybe a more elegant solution to this if else case
if let hir::ConstContext::Static(_) = ccx.const_kind() {
- ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+ ccx.tcx.sess.create_err(errors::InteriorMutableDataRefer {
span,
opt_help: Some(()),
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()),
})
} else {
- ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+ ccx.tcx.sess.create_err(errors::InteriorMutableDataRefer {
span,
opt_help: None,
kind: ccx.const_kind(),
@@ -528,12 +526,12 @@ impl<'tcx> NonConstOp<'tcx> for MutBorrow {
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
match self.0 {
- hir::BorrowKind::Raw => ccx.tcx.sess.create_err(UnallowedMutableRefsRaw {
+ hir::BorrowKind::Raw => ccx.tcx.sess.create_err(errors::UnallowedMutableRefsRaw {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
}),
- hir::BorrowKind::Ref => ccx.tcx.sess.create_err(UnallowedMutableRefs {
+ hir::BorrowKind::Ref => ccx.tcx.sess.create_err(errors::UnallowedMutableRefs {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
@@ -557,14 +555,14 @@ impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let kind = ccx.const_kind();
match self.0 {
- hir::BorrowKind::Raw => ccx
- .tcx
- .sess
- .create_feature_err(TransientMutBorrowErrRaw { span, kind }, sym::const_mut_refs),
- hir::BorrowKind::Ref => ccx
- .tcx
- .sess
- .create_feature_err(TransientMutBorrowErr { span, kind }, sym::const_mut_refs),
+ hir::BorrowKind::Raw => ccx.tcx.sess.create_feature_err(
+ errors::TransientMutBorrowErrRaw { span, kind },
+ sym::const_mut_refs,
+ ),
+ hir::BorrowKind::Ref => ccx.tcx.sess.create_feature_err(
+ errors::TransientMutBorrowErr { span, kind },
+ sym::const_mut_refs,
+ ),
}
}
}
@@ -586,9 +584,10 @@ impl<'tcx> NonConstOp<'tcx> for MutDeref {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx
- .sess
- .create_feature_err(MutDerefErr { span, kind: ccx.const_kind() }, sym::const_mut_refs)
+ ccx.tcx.sess.create_feature_err(
+ errors::MutDerefErr { span, kind: ccx.const_kind() },
+ sym::const_mut_refs,
+ )
}
}
@@ -601,7 +600,7 @@ impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(PanicNonStrErr { span })
+ ccx.tcx.sess.create_err(errors::PanicNonStrErr { span })
}
}
@@ -652,7 +651,7 @@ impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(RawPtrToIntErr { span })
+ ccx.tcx.sess.create_err(errors::RawPtrToIntErr { span })
}
}
@@ -673,7 +672,7 @@ impl<'tcx> NonConstOp<'tcx> for StaticAccess {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(StaticAccessErr {
+ ccx.tcx.sess.create_err(errors::StaticAccessErr {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0013)).then_some(()),
@@ -690,7 +689,7 @@ impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(NonConstOpErr { span })
+ ccx.tcx.sess.create_err(errors::NonConstOpErr { span })
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 8ca3fdf40..bb4b7ad50 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -217,10 +217,10 @@ impl Qualif for CustomEq {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: AdtDef<'tcx>,
+ def: AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
) -> bool {
- let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+ let ty = cx.tcx.mk_adt(def, substs);
!ty.is_structural_eq_shallow(cx.tcx)
}
}
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index fae6117f8..3f3b66b06 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -18,7 +18,7 @@ use rustc_middle::mir::traversal::ReversePostorderIter;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::subst::InternalSubsts;
-use rustc_middle::ty::{self, List, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, List, TyCtxt, TypeVisitableExt};
use rustc_span::Span;
use rustc_index::vec::{Idx, IndexVec};
@@ -364,31 +364,33 @@ impl<'tcx> Validator<'_, 'tcx> {
ProjectionElem::Index(local) => {
let mut promotable = false;
// Only accept if we can predict the index and are indexing an array.
- let val =
- if let TempState::Defined { location: loc, .. } = self.temps[local] {
- let block = &self.body[loc.block];
- if loc.statement_index < block.statements.len() {
- let statement = &block.statements[loc.statement_index];
- match &statement.kind {
- StatementKind::Assign(box (
- _,
- Rvalue::Use(Operand::Constant(c)),
- )) => c.literal.try_eval_usize(self.tcx, self.param_env),
- _ => None,
- }
- } else {
- None
+ let val = if let TempState::Defined { location: loc, .. } =
+ self.temps[local]
+ {
+ let block = &self.body[loc.block];
+ if loc.statement_index < block.statements.len() {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )) => c.literal.try_eval_target_usize(self.tcx, self.param_env),
+ _ => None,
}
} else {
None
- };
+ }
+ } else {
+ None
+ };
if let Some(idx) = val {
// Determine the type of the thing we are indexing.
let ty = place_base.ty(self.body, self.tcx).ty;
match ty.kind() {
ty::Array(_, len) => {
// It's an array; determine its length.
- if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+ if let Some(len) =
+ len.try_eval_target_usize(self.tcx, self.param_env)
{
// If the index is in-bounds, go ahead.
if idx < len {
@@ -470,7 +472,7 @@ impl<'tcx> Validator<'_, 'tcx> {
// mutably without consequences. However, only &mut []
// is allowed right now.
if let ty::Array(_, len) = ty.kind() {
- match len.try_eval_usize(self.tcx, self.param_env) {
+ match len.try_eval_target_usize(self.tcx, self.param_env) {
Some(0) => {}
_ => return Err(Unpromotable),
}
@@ -864,7 +866,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let mut projection = vec![PlaceElem::Deref];
projection.extend(place.projection);
- place.projection = tcx.intern_place_elems(&projection);
+ place.projection = tcx.mk_place_elems(&projection);
// Create a temp to hold the promoted reference.
// This is because `*r` requires `r` to be a local,
@@ -896,7 +898,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
assert_eq!(self.new_block(), START_BLOCK);
self.visit_rvalue(
&mut rvalue,
- Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+ Location { block: START_BLOCK, statement_index: usize::MAX },
);
let span = self.promoted.span;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index dd168a9ac..fb37eb79a 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -8,12 +8,12 @@ use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
use rustc_middle::mir::visit::{PlaceContext, Visitor};
use rustc_middle::mir::{
- traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping,
- Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef,
- ProjectionElem, RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind,
- Terminator, TerminatorKind, UnOp, START_BLOCK,
+ traversal, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping, Local, Location,
+ MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef, ProjectionElem,
+ RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator,
+ TerminatorKind, UnOp, START_BLOCK,
};
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
@@ -230,8 +230,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
// Equal types, all is good.
return true;
}
- // Normalization reveals opaque types, but we may be validating MIR while computing
- // said opaque types, causing cycles.
+
+ // We sometimes have to use `defining_opaque_types` for subtyping
+ // to succeed here and figuring out how exactly that should work
+ // is annoying. It is harmless enough to just not validate anything
+ // in that case. We still check this after analysis as all opque
+ // types have been revealed at this point.
if (src, dest).has_opaque_types() {
return true;
}
@@ -311,7 +315,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
ProjectionElem::Field(f, ty) => {
- let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+ let parent = Place { local, projection: self.tcx.mk_place_elems(proj_base) };
let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
let fail_out_of_bounds = |this: &Self, location| {
this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
@@ -330,7 +334,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
let kind = match parent_ty.ty.kind() {
&ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+ self.tcx.type_of(def_id).subst(self.tcx, substs).kind()
}
kind => kind,
};
@@ -377,12 +381,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
return;
};
- let Some(&f_ty) = layout.field_tys.get(local) else {
+ let Some(f_ty) = layout.field_tys.get(local) else {
self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
return;
};
- f_ty
+ f_ty.ty
} else {
let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
fail_out_of_bounds(self, location);
@@ -428,19 +432,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
}
match rvalue {
- Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
- Rvalue::Aggregate(agg_kind, _) => {
- let disallowed = match **agg_kind {
- AggregateKind::Array(..) => false,
- _ => self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup),
- };
- if disallowed {
- self.fail(
- location,
- format!("{:?} have been lowered to field assignments", rvalue),
- )
- }
- }
+ Rvalue::Use(_) | Rvalue::CopyForDeref(_) | Rvalue::Aggregate(..) => {}
Rvalue::Ref(_, BorrowKind::Shallow, _) => {
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
@@ -759,13 +751,32 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// FIXME(JakobDegen) The validator should check that `self.mir_phase <
// DropsLowered`. However, this causes ICEs with generation of drop shims, which
// seem to fail to set their `MirPhase` correctly.
- if *kind == RetagKind::Raw || *kind == RetagKind::TwoPhase {
+ if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
self.fail(location, format!("explicit `{:?}` is forbidden", kind));
}
}
- StatementKind::StorageLive(..)
- | StatementKind::StorageDead(..)
+ StatementKind::StorageLive(local) => {
+ // We check that the local is not live when entering a `StorageLive` for it.
+ // Technically, violating this restriction is only UB and not actually indicative
+ // of not well-formed MIR. This means that an optimization which turns MIR that
+ // already has UB into MIR that fails this check is not necessarily wrong. However,
+ // we have no such optimizations at the moment, and so we include this check anyway
+ // to help us catch bugs. If you happen to write an optimization that might cause
+ // this to incorrectly fire, feel free to remove this check.
+ if self.reachable_blocks.contains(location.block) {
+ self.storage_liveness.seek_before_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if locals_with_storage.contains(*local) {
+ self.fail(
+ location,
+ format!("StorageLive({local:?}) which already has storage here"),
+ );
+ }
+ }
+ }
+ StatementKind::StorageDead(_)
| StatementKind::Coverage(_)
+ | StatementKind::ConstEvalCounter
| StatementKind::Nop => {}
}