summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs9
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs18
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs15
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs71
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs10
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs202
-rw-r--r--compiler/rustc_const_eval/src/errors.rs178
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs14
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs110
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs42
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs135
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs88
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs30
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs35
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs276
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs9
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs566
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs417
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs32
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs252
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs234
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs692
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs71
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs19
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs29
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs6
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs30
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs13
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs16
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs627
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs14
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs13
35 files changed, 2214 insertions, 2083 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index 7890d878d..d39a7e8a1 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -138,7 +138,10 @@ where
err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
ErrorHandled::TooGeneric
}
- err_inval!(AlreadyReported(error_reported)) => ErrorHandled::Reported(error_reported),
+ err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar),
+ err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
+ ErrorHandled::Reported(guar.into())
+ }
err_inval!(Layout(layout_error @ LayoutError::SizeOverflow(_))) => {
// We must *always* hard error on these, even if the caller wants just a lint.
// The `message` makes little sense here, this is a more serious error than the
@@ -150,8 +153,8 @@ where
tcx.sess.create_err(Spanned { span, node: layout_error.into_diagnostic() });
err.code(rustc_errors::error_code!(E0080));
let Some((mut err, handler)) = err.into_diagnostic() else {
- panic!("did not emit diag");
- };
+ panic!("did not emit diag");
+ };
for frame in frames {
err.eager_subdiagnostic(handler, frame);
}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 417ab78fd..4c7e91944 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -45,20 +45,20 @@ fn eval_body_using_ecx<'mir, 'tcx>(
"Unexpected DefKind: {:?}",
ecx.tcx.def_kind(cid.instance.def_id())
);
- let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
+ let layout = ecx.layout_of(body.bound_return_ty().instantiate(tcx, cid.instance.args))?;
assert!(layout.is_sized());
let ret = ecx.allocate(layout, MemoryKind::Stack)?;
trace!(
"eval_body_using_ecx: pushing stack frame for global: {}{}",
with_no_trimmed_paths!(ecx.tcx.def_path_str(cid.instance.def_id())),
- cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
+ cid.promoted.map_or_else(String::new, |p| format!("::promoted[{p:?}]"))
);
ecx.push_stack_frame(
cid.instance,
body,
- &ret.into(),
+ &ret.clone().into(),
StackPopCleanup::Root { cleanup: false },
)?;
@@ -228,7 +228,6 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
- assert!(key.param_env.is_const());
// see comment in eval_to_allocation_raw_provider for what we're doing here
if key.param_env.reveal() == Reveal::All {
let mut key = key;
@@ -245,10 +244,10 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
// Catch such calls and evaluate them instead of trying to load a constant's MIR.
if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
let ty = key.value.instance.ty(tcx, key.param_env);
- let ty::FnDef(_, substs) = ty.kind() else {
+ let ty::FnDef(_, args) = ty.kind() else {
bug!("intrinsic with type {:?}", ty);
};
- return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+ return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).map_err(|error| {
let span = tcx.def_span(def_id);
super::report(
@@ -269,7 +268,6 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
- assert!(key.param_env.is_const());
// Because the constant is computed twice (once per value of `Reveal`), we are at risk of
// reporting the same error twice here. To resolve this, we check whether we can evaluate the
// constant in the more restrictive `Reveal::UserFacing`, which most likely already was
@@ -328,10 +326,10 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
("static", String::new())
} else {
// If the current item has generics, we'd like to enrich the message with the
- // instance and its substs: to show the actual compile-time values, in addition to
+ // instance and its args: to show the actual compile-time values, in addition to
// the expression, leading to the const eval error.
let instance = &key.value.instance;
- if !instance.substs.is_empty() {
+ if !instance.args.is_empty() {
let instance = with_no_trimmed_paths!(instance.to_string());
("const_with_path", instance)
} else {
@@ -356,7 +354,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
// Since evaluation had no errors, validate the resulting constant.
// This is a separate `try` block to provide more targeted error reporting.
let validation: Result<_, InterpErrorInfo<'_>> = try {
- let mut ref_tracking = RefTracking::new(mplace);
+ let mut ref_tracking = RefTracking::new(mplace.clone());
let mut inner = false;
while let Some((mplace, path)) = ref_tracking.todo.pop() {
let mode = match tcx.static_mutability(cid.instance.def_id()) {
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index fa8253d5e..4ee4ebbb9 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -28,16 +28,19 @@ pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
&& tcx.constness(parent_id) == hir::Constness::Const
}
-/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
-/// it is a trait impl/function, return if it has a `const` modifier. If it is an intrinsic,
-/// report whether said intrinsic has a `rustc_const_{un,}stable` attribute. Otherwise, return
-/// `Constness::NotConst`.
+/// Checks whether an item is considered to be `const`. If it is a constructor, anonymous const,
+/// const block, const item or associated const, it is const. If it is a trait impl/function,
+/// return if it has a `const` modifier. If it is an intrinsic, report whether said intrinsic
+/// has a `rustc_const_{un,}stable` attribute. Otherwise, return `Constness::NotConst`.
fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
let node = tcx.hir().get_by_def_id(def_id);
match node {
- hir::Node::Ctor(_) => hir::Constness::Const,
- hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
+ hir::Node::Ctor(_)
+ | hir::Node::AnonConst(_)
+ | hir::Node::ConstBlock(_)
+ | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => hir::Constness::Const,
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => tcx.generics_of(def_id).host_effect_index.map_or(hir::Constness::NotConst, |_| hir::Constness::Const),
hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
// Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
// foreign items cannot be evaluated at compile-time.
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index f9f645af4..b740b79d1 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -22,7 +22,7 @@ use rustc_target::spec::abi::Abi as CallAbi;
use crate::errors::{LongRunning, LongRunningWarn};
use crate::interpret::{
- self, compile_time_machine, AllocId, ConstAllocation, FnVal, Frame, ImmTy, InterpCx,
+ self, compile_time_machine, AllocId, ConstAllocation, FnArg, FnVal, Frame, ImmTy, InterpCx,
InterpResult, OpTy, PlaceTy, Pointer, Scalar,
};
use crate::{errors, fluent_generated as fluent};
@@ -201,7 +201,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
fn hook_special_const_fn(
&mut self,
instance: ty::Instance<'tcx>,
- args: &[OpTy<'tcx>],
+ args: &[FnArg<'tcx>],
dest: &PlaceTy<'tcx>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
@@ -210,12 +210,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
if Some(def_id) == self.tcx.lang_items().panic_display()
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
{
+ let args = self.copy_fn_args(args)?;
// &str or &&str
assert!(args.len() == 1);
- let mut msg_place = self.deref_operand(&args[0])?;
+ let mut msg_place = self.deref_pointer(&args[0])?;
while msg_place.layout.ty.is_ref() {
- msg_place = self.deref_operand(&msg_place.into())?;
+ msg_place = self.deref_pointer(&msg_place)?;
}
let msg = Symbol::intern(self.read_str(&msg_place)?);
@@ -229,15 +230,16 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
*self.tcx,
ty::ParamEnv::reveal_all(),
const_def_id,
- instance.substs,
+ instance.args,
)
.unwrap()
.unwrap();
return Ok(Some(new_instance));
} else if Some(def_id) == self.tcx.lang_items().align_offset_fn() {
+ let args = self.copy_fn_args(args)?;
// For align_offset, we replace the function call if the pointer has no address.
- match self.align_offset(instance, args, dest, ret)? {
+ match self.align_offset(instance, &args, dest, ret)? {
ControlFlow::Continue(()) => return Ok(Some(instance)),
ControlFlow::Break(()) => return Ok(None),
}
@@ -293,7 +295,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
self.eval_fn_call(
FnVal::Instance(instance),
(CallAbi::Rust, fn_abi),
- &[addr, align],
+ &[FnArg::Copy(addr), FnArg::Copy(align)],
/* with_caller_location = */ false,
dest,
ret,
@@ -425,52 +427,41 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
fn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
- instance: ty::Instance<'tcx>,
+ orig_instance: ty::Instance<'tcx>,
_abi: CallAbi,
- args: &[OpTy<'tcx>],
+ args: &[FnArg<'tcx>],
dest: &PlaceTy<'tcx>,
ret: Option<mir::BasicBlock>,
_unwind: mir::UnwindAction, // unwinding is not supported in consts
) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
- debug!("find_mir_or_eval_fn: {:?}", instance);
+ debug!("find_mir_or_eval_fn: {:?}", orig_instance);
+
+ // Replace some functions.
+ let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
+ // Call has already been handled.
+ return Ok(None);
+ };
// Only check non-glue functions
if let ty::InstanceDef::Item(def) = instance.def {
// Execution might have wandered off into other crates, so we cannot do a stability-
- // sensitive check here. But we can at least rule out functions that are not const
- // at all.
- if !ecx.tcx.is_const_fn_raw(def) {
- // allow calling functions inside a trait marked with #[const_trait].
- if !ecx.tcx.is_const_default_method(def) {
- // We certainly do *not* want to actually call the fn
- // though, so be sure we return here.
- throw_unsup_format!("calling non-const function `{}`", instance)
- }
- }
-
- let Some(new_instance) = ecx.hook_special_const_fn(instance, args, dest, ret)? else {
- return Ok(None);
- };
-
- if new_instance != instance {
- // We call another const fn instead.
- // However, we return the *original* instance to make backtraces work out
- // (and we hope this does not confuse the FnAbi checks too much).
- return Ok(Self::find_mir_or_eval_fn(
- ecx,
- new_instance,
- _abi,
- args,
- dest,
- ret,
- _unwind,
- )?
- .map(|(body, _instance)| (body, instance)));
+ // sensitive check here. But we can at least rule out functions that are not const at
+ // all. That said, we have to allow calling functions inside a trait marked with
+ // #[const_trait]. These *are* const-checked!
+ // FIXME: why does `is_const_fn_raw` not classify them as const?
+ if (!ecx.tcx.is_const_fn_raw(def) && !ecx.tcx.is_const_default_method(def))
+ || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check)
+ {
+ // We certainly do *not* want to actually call the fn
+ // though, so be sure we return here.
+ throw_unsup_format!("calling non-const function `{}`", instance)
}
}
// This is a const fn. Call it.
- Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
+ // In case of replacement, we return the *original* instance to make backtraces work out
+ // (and we hope this does not confuse the FnAbi checks too much).
+ Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
}
fn call_intrinsic(
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index a3064b53d..854104622 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -85,7 +85,7 @@ pub(crate) fn eval_to_valtree<'tcx>(
}
#[instrument(skip(tcx), level = "debug")]
-pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
+pub fn try_destructure_mir_constant_for_diagnostics<'tcx>(
tcx: TyCtxt<'tcx>,
val: ConstValue<'tcx>,
ty: Ty<'tcx>,
@@ -101,17 +101,17 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
return None;
}
ty::Adt(def, _) => {
- let variant = ecx.read_discriminant(&op).ok()?.1;
- let down = ecx.operand_downcast(&op, variant).ok()?;
+ let variant = ecx.read_discriminant(&op).ok()?;
+ let down = ecx.project_downcast(&op, variant).ok()?;
(def.variants()[variant].fields.len(), Some(variant), down)
}
- ty::Tuple(substs) => (substs.len(), None, op),
+ ty::Tuple(args) => (args.len(), None, op),
_ => bug!("cannot destructure mir constant {:?}", val),
};
let fields_iter = (0..field_count)
.map(|i| {
- let field_op = ecx.operand_field(&down, i).ok()?;
+ let field_op = ecx.project_field(&down, i).ok()?;
let val = op_to_const(&ecx, &field_op);
Some((val, field_op.layout.ty))
})
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index e574df276..b15a65d67 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -2,14 +2,15 @@ use super::eval_queries::{mk_eval_cx, op_to_const};
use super::machine::CompileTimeEvalContext;
use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
use crate::const_eval::CanAccessStatics;
+use crate::interpret::MPlaceTy;
use crate::interpret::{
intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
- MemoryKind, PlaceTy, Scalar,
+ MemoryKind, Place, Projectable, Scalar,
};
-use crate::interpret::{MPlaceTy, Value};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_span::source_map::DUMMY_SP;
-use rustc_target::abi::{Align, FieldIdx, VariantIdx, FIRST_VARIANT};
+use rustc_target::abi::VariantIdx;
#[instrument(skip(ecx), level = "debug")]
fn branches<'tcx>(
@@ -20,15 +21,15 @@ fn branches<'tcx>(
num_nodes: &mut usize,
) -> ValTreeCreationResult<'tcx> {
let place = match variant {
- Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
- None => *place,
+ Some(variant) => ecx.project_downcast(place, variant).unwrap(),
+ None => place.clone(),
};
let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
debug!(?place, ?variant);
let mut fields = Vec::with_capacity(n);
for i in 0..n {
- let field = ecx.mplace_field(&place, i).unwrap();
+ let field = ecx.project_field(&place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
fields.push(Some(valtree));
}
@@ -55,13 +56,11 @@ fn slice_branches<'tcx>(
place: &MPlaceTy<'tcx>,
num_nodes: &mut usize,
) -> ValTreeCreationResult<'tcx> {
- let n = place
- .len(&ecx.tcx.tcx)
- .unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
+ let n = place.len(ecx).unwrap_or_else(|_| panic!("expected to use len of place {place:?}"));
let mut elems = Vec::with_capacity(n as usize);
for i in 0..n {
- let place_elem = ecx.mplace_index(place, i).unwrap();
+ let place_elem = ecx.project_index(place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
elems.push(valtree);
}
@@ -88,7 +87,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
Ok(ty::ValTree::zst())
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
- let Ok(val) = ecx.read_immediate(&place.into()) else {
+ let Ok(val) = ecx.read_immediate(place) else {
return Err(ValTreeCreationError::Other);
};
let val = val.to_scalar();
@@ -104,7 +103,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
ty::Ref(_, _, _) => {
- let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
+ let Ok(derefd_place)= ecx.deref_pointer(place) else {
return Err(ValTreeCreationError::Other);
};
debug!(?derefd_place);
@@ -132,7 +131,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
bug!("uninhabited types should have errored and never gotten converted to valtree")
}
- let Ok((_, variant)) = ecx.read_discriminant(&place.into()) else {
+ let Ok(variant) = ecx.read_discriminant(place) else {
return Err(ValTreeCreationError::Other);
};
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
@@ -156,52 +155,37 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
}
}
-#[instrument(skip(ecx), level = "debug")]
-fn create_mplace_from_layout<'tcx>(
- ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
- ty: Ty<'tcx>,
-) -> MPlaceTy<'tcx> {
- let tcx = ecx.tcx;
- let param_env = ecx.param_env;
- let layout = tcx.layout_of(param_env.and(ty)).unwrap();
- debug!(?layout);
-
- ecx.allocate(layout, MemoryKind::Stack).unwrap()
-}
-
-// Walks custom DSTs and gets the type of the unsized field and the number of elements
-// in the unsized field.
-fn get_info_on_unsized_field<'tcx>(
- ty: Ty<'tcx>,
+/// Valtrees don't store the `MemPlaceMeta` that all dynamically sized values have in the interpreter.
+/// This function reconstructs it.
+fn reconstruct_place_meta<'tcx>(
+ layout: TyAndLayout<'tcx>,
valtree: ty::ValTree<'tcx>,
tcx: TyCtxt<'tcx>,
-) -> (Ty<'tcx>, usize) {
+) -> MemPlaceMeta {
+ if layout.is_sized() {
+ return MemPlaceMeta::None;
+ }
+
let mut last_valtree = valtree;
+ // Traverse the type, and update `last_valtree` as we go.
let tail = tcx.struct_tail_with_normalize(
- ty,
+ layout.ty,
|ty| ty,
|| {
let branches = last_valtree.unwrap_branch();
- last_valtree = branches[branches.len() - 1];
+ last_valtree = *branches.last().unwrap();
debug!(?branches, ?last_valtree);
},
);
- let unsized_inner_ty = match tail.kind() {
- ty::Slice(t) => *t,
- ty::Str => tail,
- _ => bug!("expected Slice or Str"),
- };
-
- // Have to adjust type for ty::Str
- let unsized_inner_ty = match unsized_inner_ty.kind() {
- ty::Str => tcx.types.u8,
- _ => unsized_inner_ty,
+ // Sanity-check that we got a tail we support.
+ match tail.kind() {
+ ty::Slice(..) | ty::Str => {}
+ _ => bug!("unsized tail of a valtree must be Slice or Str"),
};
- // Get the number of elements in the unsized field
+ // Get the number of elements in the unsized field.
let num_elems = last_valtree.unwrap_branch().len();
-
- (unsized_inner_ty, num_elems)
+ MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx))
}
#[instrument(skip(ecx), level = "debug", ret)]
@@ -210,41 +194,9 @@ fn create_pointee_place<'tcx>(
ty: Ty<'tcx>,
valtree: ty::ValTree<'tcx>,
) -> MPlaceTy<'tcx> {
- let tcx = ecx.tcx.tcx;
-
- if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty()) {
- // We need to create `Allocation`s for custom DSTs
-
- let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
- let unsized_inner_ty = match unsized_inner_ty.kind() {
- ty::Str => tcx.types.u8,
- _ => unsized_inner_ty,
- };
- let unsized_inner_ty_size =
- tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size();
- debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems);
-
- // for custom DSTs only the last field/element is unsized, but we need to also allocate
- // space for the other fields/elements
- let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap();
- let size_of_sized_part = layout.layout.size();
-
- // Get the size of the memory behind the DST
- let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap();
-
- let size = size_of_sized_part.checked_add(dst_size, &tcx).unwrap();
- let align = Align::from_bytes(size.bytes().next_power_of_two()).unwrap();
- let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap();
- debug!(?ptr);
-
- MPlaceTy::from_aligned_ptr_with_meta(
- ptr.into(),
- layout,
- MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx)),
- )
- } else {
- create_mplace_from_layout(ecx, ty)
- }
+ let layout = ecx.layout_of(ty).unwrap();
+ let meta = reconstruct_place_meta(layout, valtree, ecx.tcx.tcx);
+ ecx.allocate_dyn(layout, MemoryKind::Stack, meta).unwrap()
}
/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
@@ -282,17 +234,20 @@ pub fn valtree_to_const_value<'tcx>(
),
},
ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
- let mut place = match ty.kind() {
+ let place = match ty.kind() {
ty::Ref(_, inner_ty, _) => {
- // Need to create a place for the pointee to fill for Refs
+ // Need to create a place for the pointee (the reference itself will be an immediate)
create_pointee_place(&mut ecx, *inner_ty, valtree)
}
- _ => create_mplace_from_layout(&mut ecx, ty),
+ _ => {
+ // Need to create a place for this valtree.
+ create_pointee_place(&mut ecx, ty, valtree)
+ }
};
debug!(?place);
- valtree_into_mplace(&mut ecx, &mut place, valtree);
- dump_place(&ecx, place.into());
+ valtree_into_mplace(&mut ecx, &place, valtree);
+ dump_place(&ecx, &place);
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
match ty.kind() {
@@ -331,7 +286,7 @@ pub fn valtree_to_const_value<'tcx>(
#[instrument(skip(ecx), level = "debug")]
fn valtree_into_mplace<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
- place: &mut MPlaceTy<'tcx>,
+ place: &MPlaceTy<'tcx>,
valtree: ty::ValTree<'tcx>,
) {
// This will match on valtree and write the value(s) corresponding to the ValTree
@@ -347,14 +302,14 @@ fn valtree_into_mplace<'tcx>(
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let scalar_int = valtree.unwrap_leaf();
debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
- ecx.write_immediate(Immediate::Scalar(scalar_int.into()), &place.into()).unwrap();
+ ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
}
ty::Ref(_, inner_ty, _) => {
- let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
+ let pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
debug!(?pointee_place);
- valtree_into_mplace(ecx, &mut pointee_place, valtree);
- dump_place(ecx, pointee_place.into());
+ valtree_into_mplace(ecx, &pointee_place, valtree);
+ dump_place(ecx, &pointee_place);
intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
let imm = match inner_ty.kind() {
@@ -371,7 +326,7 @@ fn valtree_into_mplace<'tcx>(
};
debug!(?imm);
- ecx.write_immediate(imm, &place.into()).unwrap();
+ ecx.write_immediate(imm, place).unwrap();
}
ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
let branches = valtree.unwrap_branch();
@@ -386,12 +341,12 @@ fn valtree_into_mplace<'tcx>(
debug!(?variant);
(
- place.project_downcast(ecx, variant_idx).unwrap(),
+ ecx.project_downcast(place, variant_idx).unwrap(),
&branches[1..],
Some(variant_idx),
)
}
- _ => (*place, branches, None),
+ _ => (place.clone(), branches, None),
};
debug!(?place_adjusted, ?branches);
@@ -400,70 +355,33 @@ fn valtree_into_mplace<'tcx>(
for (i, inner_valtree) in branches.iter().enumerate() {
debug!(?i, ?inner_valtree);
- let mut place_inner = match ty.kind() {
- ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
- _ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty())
- && i == branches.len() - 1 =>
- {
- // Note: For custom DSTs we need to manually process the last unsized field.
- // We created a `Pointer` for the `Allocation` of the complete sized version of
- // the Adt in `create_pointee_place` and now we fill that `Allocation` with the
- // values in the ValTree. For the unsized field we have to additionally add the meta
- // data.
-
- let (unsized_inner_ty, num_elems) =
- get_info_on_unsized_field(ty, valtree, tcx);
- debug!(?unsized_inner_ty);
-
- let inner_ty = match ty.kind() {
- ty::Adt(def, substs) => {
- let i = FieldIdx::from_usize(i);
- def.variant(FIRST_VARIANT).fields[i].ty(tcx, substs)
- }
- ty::Tuple(inner_tys) => inner_tys[i],
- _ => bug!("unexpected unsized type {:?}", ty),
- };
-
- let inner_layout =
- tcx.layout_of(ty::ParamEnv::empty().and(inner_ty)).unwrap();
- debug!(?inner_layout);
-
- let offset = place_adjusted.layout.fields.offset(i);
- place
- .offset_with_meta(
- offset,
- MemPlaceMeta::Meta(Scalar::from_target_usize(
- num_elems as u64,
- &tcx,
- )),
- inner_layout,
- &tcx,
- )
- .unwrap()
+ let place_inner = match ty.kind() {
+ ty::Str | ty::Slice(_) | ty::Array(..) => {
+ ecx.project_index(place, i as u64).unwrap()
}
- _ => ecx.mplace_field(&place_adjusted, i).unwrap(),
+ _ => ecx.project_field(&place_adjusted, i).unwrap(),
};
debug!(?place_inner);
- valtree_into_mplace(ecx, &mut place_inner, *inner_valtree);
- dump_place(&ecx, place_inner.into());
+ valtree_into_mplace(ecx, &place_inner, *inner_valtree);
+ dump_place(&ecx, &place_inner);
}
debug!("dump of place_adjusted:");
- dump_place(ecx, place_adjusted.into());
+ dump_place(ecx, &place_adjusted);
if let Some(variant_idx) = variant_idx {
// don't forget filling the place with the discriminant of the enum
- ecx.write_discriminant(variant_idx, &place.into()).unwrap();
+ ecx.write_discriminant(variant_idx, place).unwrap();
}
debug!("dump of place after writing discriminant:");
- dump_place(ecx, place.into());
+ dump_place(ecx, place);
}
_ => bug!("shouldn't have created a ValTree for {:?}", ty),
}
}
-fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
- trace!("{:?}", ecx.dump_place(*place));
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
+ trace!("{:?}", ecx.dump_place(Place::Ptr(**place)));
}
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index ca38cce71..4362cae7e 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -492,7 +492,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidMeta(InvalidMetaKind::SliceTooBig) => const_eval_invalid_meta_slice,
InvalidMeta(InvalidMetaKind::TooBig) => const_eval_invalid_meta,
UnterminatedCString(_) => const_eval_unterminated_c_string,
- PointerUseAfterFree(_) => const_eval_pointer_use_after_free,
+ PointerUseAfterFree(_, _) => const_eval_pointer_use_after_free,
PointerOutOfBounds { ptr_size: Size::ZERO, .. } => const_eval_zst_pointer_out_of_bounds,
PointerOutOfBounds { .. } => const_eval_pointer_out_of_bounds,
DanglingIntPointer(0, _) => const_eval_dangling_null_pointer,
@@ -511,8 +511,9 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes,
DeadLocal => const_eval_dead_local,
ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
- UninhabitedEnumVariantWritten => const_eval_uninhabited_enum_variant_written,
- Validation(e) => e.diagnostic_message(),
+ UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
+ UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
+ ValidationError(e) => e.diagnostic_message(),
Custom(x) => (x.msg)(),
}
}
@@ -535,7 +536,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
| InvalidMeta(InvalidMetaKind::TooBig)
| InvalidUninitBytes(None)
| DeadLocal
- | UninhabitedEnumVariantWritten => {}
+ | UninhabitedEnumVariantWritten(_)
+ | UninhabitedEnumVariantRead(_) => {}
BoundsCheckFailed { len, index } => {
builder.set_arg("len", len);
builder.set_arg("index", index);
@@ -543,8 +545,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
UnterminatedCString(ptr) | InvalidFunctionPointer(ptr) | InvalidVTablePointer(ptr) => {
builder.set_arg("pointer", ptr);
}
- PointerUseAfterFree(allocation) => {
- builder.set_arg("allocation", allocation);
+ PointerUseAfterFree(alloc_id, msg) => {
+ builder
+ .set_arg("alloc_id", alloc_id)
+ .set_arg("bad_pointer_message", bad_pointer_message(msg, handler));
}
PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size, msg } => {
builder
@@ -583,13 +587,13 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidUninitBytes(Some((alloc, info))) => {
builder.set_arg("alloc", alloc);
builder.set_arg("access", info.access);
- builder.set_arg("uninit", info.uninit);
+ builder.set_arg("uninit", info.bad);
}
ScalarSizeMismatch(info) => {
builder.set_arg("target_size", info.target_size);
builder.set_arg("data_size", info.data_size);
}
- Validation(e) => e.add_args(handler, builder),
+ ValidationError(e) => e.add_args(handler, builder),
Custom(custom) => {
(custom.add_args)(&mut |name, value| {
builder.set_arg(name, value);
@@ -604,73 +608,72 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
use crate::fluent_generated::*;
use rustc_middle::mir::interpret::ValidationErrorKind::*;
match self.kind {
- PtrToUninhabited { ptr_kind: PointerKind::Box, .. } => const_eval_box_to_uninhabited,
- PtrToUninhabited { ptr_kind: PointerKind::Ref, .. } => const_eval_ref_to_uninhabited,
-
- PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_box_to_static,
- PtrToStatic { ptr_kind: PointerKind::Ref } => const_eval_ref_to_static,
-
- PtrToMut { ptr_kind: PointerKind::Box } => const_eval_box_to_mut,
- PtrToMut { ptr_kind: PointerKind::Ref } => const_eval_ref_to_mut,
-
- ExpectedNonPtr { .. } => const_eval_expected_non_ptr,
- MutableRefInConst => const_eval_mutable_ref_in_const,
- NullFnPtr => const_eval_null_fn_ptr,
- NeverVal => const_eval_never_val,
- NullablePtrOutOfRange { .. } => const_eval_nullable_ptr_out_of_range,
- PtrOutOfRange { .. } => const_eval_ptr_out_of_range,
- OutOfRange { .. } => const_eval_out_of_range,
- UnsafeCell => const_eval_unsafe_cell,
- UninhabitedVal { .. } => const_eval_uninhabited_val,
- InvalidEnumTag { .. } => const_eval_invalid_enum_tag,
- UninitEnumTag => const_eval_uninit_enum_tag,
- UninitStr => const_eval_uninit_str,
- Uninit { expected: ExpectedKind::Bool } => const_eval_uninit_bool,
- Uninit { expected: ExpectedKind::Reference } => const_eval_uninit_ref,
- Uninit { expected: ExpectedKind::Box } => const_eval_uninit_box,
- Uninit { expected: ExpectedKind::RawPtr } => const_eval_uninit_raw_ptr,
- Uninit { expected: ExpectedKind::InitScalar } => const_eval_uninit_init_scalar,
- Uninit { expected: ExpectedKind::Char } => const_eval_uninit_char,
- Uninit { expected: ExpectedKind::Float } => const_eval_uninit_float,
- Uninit { expected: ExpectedKind::Int } => const_eval_uninit_int,
- Uninit { expected: ExpectedKind::FnPtr } => const_eval_uninit_fn_ptr,
- UninitVal => const_eval_uninit,
- InvalidVTablePtr { .. } => const_eval_invalid_vtable_ptr,
+ PtrToUninhabited { ptr_kind: PointerKind::Box, .. } => {
+ const_eval_validation_box_to_uninhabited
+ }
+ PtrToUninhabited { ptr_kind: PointerKind::Ref, .. } => {
+ const_eval_validation_ref_to_uninhabited
+ }
+
+ PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_static,
+ PtrToStatic { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_static,
+
+ PtrToMut { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_mut,
+ PtrToMut { ptr_kind: PointerKind::Ref } => const_eval_validation_ref_to_mut,
+
+ PointerAsInt { .. } => const_eval_validation_pointer_as_int,
+ PartialPointer => const_eval_validation_partial_pointer,
+ MutableRefInConst => const_eval_validation_mutable_ref_in_const,
+ NullFnPtr => const_eval_validation_null_fn_ptr,
+ NeverVal => const_eval_validation_never_val,
+ NullablePtrOutOfRange { .. } => const_eval_validation_nullable_ptr_out_of_range,
+ PtrOutOfRange { .. } => const_eval_validation_ptr_out_of_range,
+ OutOfRange { .. } => const_eval_validation_out_of_range,
+ UnsafeCell => const_eval_validation_unsafe_cell,
+ UninhabitedVal { .. } => const_eval_validation_uninhabited_val,
+ InvalidEnumTag { .. } => const_eval_validation_invalid_enum_tag,
+ UninhabitedEnumVariant => const_eval_validation_uninhabited_enum_variant,
+ Uninit { .. } => const_eval_validation_uninit,
+ InvalidVTablePtr { .. } => const_eval_validation_invalid_vtable_ptr,
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Box } => {
- const_eval_invalid_box_slice_meta
+ const_eval_validation_invalid_box_slice_meta
}
InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Ref } => {
- const_eval_invalid_ref_slice_meta
+ const_eval_validation_invalid_ref_slice_meta
}
- InvalidMetaTooLarge { ptr_kind: PointerKind::Box } => const_eval_invalid_box_meta,
- InvalidMetaTooLarge { ptr_kind: PointerKind::Ref } => const_eval_invalid_ref_meta,
- UnalignedPtr { ptr_kind: PointerKind::Ref, .. } => const_eval_unaligned_ref,
- UnalignedPtr { ptr_kind: PointerKind::Box, .. } => const_eval_unaligned_box,
+ InvalidMetaTooLarge { ptr_kind: PointerKind::Box } => {
+ const_eval_validation_invalid_box_meta
+ }
+ InvalidMetaTooLarge { ptr_kind: PointerKind::Ref } => {
+ const_eval_validation_invalid_ref_meta
+ }
+ UnalignedPtr { ptr_kind: PointerKind::Ref, .. } => const_eval_validation_unaligned_ref,
+ UnalignedPtr { ptr_kind: PointerKind::Box, .. } => const_eval_validation_unaligned_box,
- NullPtr { ptr_kind: PointerKind::Box } => const_eval_null_box,
- NullPtr { ptr_kind: PointerKind::Ref } => const_eval_null_ref,
+ NullPtr { ptr_kind: PointerKind::Box } => const_eval_validation_null_box,
+ NullPtr { ptr_kind: PointerKind::Ref } => const_eval_validation_null_ref,
DanglingPtrNoProvenance { ptr_kind: PointerKind::Box, .. } => {
- const_eval_dangling_box_no_provenance
+ const_eval_validation_dangling_box_no_provenance
}
DanglingPtrNoProvenance { ptr_kind: PointerKind::Ref, .. } => {
- const_eval_dangling_ref_no_provenance
+ const_eval_validation_dangling_ref_no_provenance
}
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Box } => {
- const_eval_dangling_box_out_of_bounds
+ const_eval_validation_dangling_box_out_of_bounds
}
DanglingPtrOutOfBounds { ptr_kind: PointerKind::Ref } => {
- const_eval_dangling_ref_out_of_bounds
+ const_eval_validation_dangling_ref_out_of_bounds
}
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Box } => {
- const_eval_dangling_box_use_after_free
+ const_eval_validation_dangling_box_use_after_free
}
DanglingPtrUseAfterFree { ptr_kind: PointerKind::Ref } => {
- const_eval_dangling_ref_use_after_free
+ const_eval_validation_dangling_ref_use_after_free
}
InvalidBool { .. } => const_eval_validation_invalid_bool,
InvalidChar { .. } => const_eval_validation_invalid_char,
- InvalidFnPtr { .. } => const_eval_invalid_fn_ptr,
+ InvalidFnPtr { .. } => const_eval_validation_invalid_fn_ptr,
}
}
@@ -678,13 +681,21 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
use crate::fluent_generated as fluent;
use rustc_middle::mir::interpret::ValidationErrorKind::*;
+ if let PointerAsInt { .. } | PartialPointer = self.kind {
+ err.help(fluent::const_eval_ptr_as_bytes_1);
+ err.help(fluent::const_eval_ptr_as_bytes_2);
+ }
+
let message = if let Some(path) = self.path {
handler.eagerly_translate_to_string(
- fluent::const_eval_invalid_value_with_path,
+ fluent::const_eval_validation_front_matter_invalid_value_with_path,
[("path".into(), DiagnosticArgValue::Str(path.into()))].iter().map(|(a, b)| (a, b)),
)
} else {
- handler.eagerly_translate_to_string(fluent::const_eval_invalid_value, [].into_iter())
+ handler.eagerly_translate_to_string(
+ fluent::const_eval_validation_front_matter_invalid_value,
+ [].into_iter(),
+ )
};
err.set_arg("front_matter", message);
@@ -724,8 +735,24 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
PtrToUninhabited { ty, .. } | UninhabitedVal { ty } => {
err.set_arg("ty", ty);
}
- ExpectedNonPtr { value }
- | InvalidEnumTag { value }
+ PointerAsInt { expected } | Uninit { expected } => {
+ let msg = match expected {
+ ExpectedKind::Reference => fluent::const_eval_validation_expected_ref,
+ ExpectedKind::Box => fluent::const_eval_validation_expected_box,
+ ExpectedKind::RawPtr => fluent::const_eval_validation_expected_raw_ptr,
+ ExpectedKind::InitScalar => fluent::const_eval_validation_expected_init_scalar,
+ ExpectedKind::Bool => fluent::const_eval_validation_expected_bool,
+ ExpectedKind::Char => fluent::const_eval_validation_expected_char,
+ ExpectedKind::Float => fluent::const_eval_validation_expected_float,
+ ExpectedKind::Int => fluent::const_eval_validation_expected_int,
+ ExpectedKind::FnPtr => fluent::const_eval_validation_expected_fn_ptr,
+ ExpectedKind::EnumTag => fluent::const_eval_validation_expected_enum_tag,
+ ExpectedKind::Str => fluent::const_eval_validation_expected_str,
+ };
+ let msg = handler.eagerly_translate_to_string(msg, [].into_iter());
+ err.set_arg("expected", msg);
+ }
+ InvalidEnumTag { value }
| InvalidVTablePtr { value }
| InvalidBool { value }
| InvalidChar { value }
@@ -753,14 +780,12 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
| NullFnPtr
| NeverVal
| UnsafeCell
- | UninitEnumTag
- | UninitStr
- | Uninit { .. }
- | UninitVal
| InvalidMetaSliceTooLarge { .. }
| InvalidMetaTooLarge { .. }
| DanglingPtrUseAfterFree { .. }
- | DanglingPtrOutOfBounds { .. } => {}
+ | DanglingPtrOutOfBounds { .. }
+ | UninhabitedEnumVariant
+ | PartialPointer => {}
}
}
}
@@ -770,9 +795,9 @@ impl ReportErrorExt for UnsupportedOpInfo {
use crate::fluent_generated::*;
match self {
UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
- UnsupportedOpInfo::PartialPointerOverwrite(_) => const_eval_partial_pointer_overwrite,
- UnsupportedOpInfo::PartialPointerCopy(_) => const_eval_partial_pointer_copy,
- UnsupportedOpInfo::ReadPointerAsBytes => const_eval_read_pointer_as_bytes,
+ UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
+ UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
+ UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
UnsupportedOpInfo::ReadExternStatic(_) => const_eval_read_extern_static,
}
@@ -781,13 +806,16 @@ impl ReportErrorExt for UnsupportedOpInfo {
use crate::fluent_generated::*;
use UnsupportedOpInfo::*;
- if let ReadPointerAsBytes | PartialPointerOverwrite(_) | PartialPointerCopy(_) = self {
+ if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self {
builder.help(const_eval_ptr_as_bytes_1);
builder.help(const_eval_ptr_as_bytes_2);
}
match self {
- Unsupported(_) | ReadPointerAsBytes => {}
- PartialPointerOverwrite(ptr) | PartialPointerCopy(ptr) => {
+ // `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to
+ // be further processed by validity checking which then turns it into something nice to
+ // print. So it's not worth the effort of having diagnostics that can print the `info`.
+ Unsupported(_) | ReadPointerAsInt(_) => {}
+ OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
builder.set_arg("ptr", ptr);
}
ThreadLocalStatic(did) | ReadExternStatic(did) => {
@@ -834,8 +862,9 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
InvalidProgramInfo::FnAbiAdjustForForeignAbi(_) => {
rustc_middle::error::middle_adjust_for_foreign_abi_error
}
- InvalidProgramInfo::SizeOfUnsizedType(_) => const_eval_size_of_unsized,
- InvalidProgramInfo::UninitUnsizedLocal => const_eval_uninit_unsized_local,
+ InvalidProgramInfo::ConstPropNonsense => {
+ panic!("We had const-prop nonsense, this should never be printed")
+ }
}
}
fn add_args<G: EmissionGuarantee>(
@@ -846,7 +875,7 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
match self {
InvalidProgramInfo::TooGeneric
| InvalidProgramInfo::AlreadyReported(_)
- | InvalidProgramInfo::UninitUnsizedLocal => {}
+ | InvalidProgramInfo::ConstPropNonsense => {}
InvalidProgramInfo::Layout(e) => {
let diag: DiagnosticBuilder<'_, ()> = e.into_diagnostic().into_diagnostic(handler);
for (name, val) in diag.args() {
@@ -860,9 +889,6 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
builder.set_arg("arch", arch);
builder.set_arg("abi", abi.name());
}
- InvalidProgramInfo::SizeOfUnsizedType(ty) => {
- builder.set_arg("ty", ty);
- }
}
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 83a072d6f..98e853dc4 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -56,7 +56,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
- let src = self.read_immediate(&src)?;
+ let src = self.read_immediate(src)?;
let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
@@ -75,12 +75,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The src operand does not matter, just its type
match *src.layout.ty.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let instance = ty::Instance::resolve_for_fn_ptr(
*self.tcx,
self.param_env,
def_id,
- substs,
+ args,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
@@ -108,11 +108,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The src operand does not matter, just its type
match *src.layout.ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = ty::Instance::resolve_closure(
*self.tcx,
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
@@ -420,8 +420,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if cast_ty_field.is_zst() {
continue;
}
- let src_field = self.operand_field(src, i)?;
- let dst_field = self.place_field(dest, i)?;
+ let src_field = self.project_field(src, i)?;
+ let dst_field = self.project_field(dest, i)?;
if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else {
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 015a9beab..6c35fb01a 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,11 +1,11 @@
//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
-use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
+use super::{ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Writes the discriminant of the given variant.
@@ -13,7 +13,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn write_discriminant(
&mut self,
variant_index: VariantIdx,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// Layout computation excludes uninhabited variants from consideration
// therefore there's no way to represent those variants in the given layout.
@@ -21,11 +21,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// discriminant, so we cannot do anything here.
// When evaluating we will always error before even getting here, but ConstProp 'executes'
// dead code, so we cannot ICE here.
- if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
- throw_ub!(UninhabitedEnumVariantWritten)
+ if dest.layout().for_variant(self, variant_index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantWritten(variant_index))
}
- match dest.layout.variants {
+ match dest.layout().variants {
abi::Variants::Single { index } => {
assert_eq!(index, variant_index);
}
@@ -38,8 +38,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
- let discr_val =
- dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+ let discr_val = dest
+ .layout()
+ .ty
+ .discriminant_for_variant(*self.tcx, variant_index)
+ .unwrap()
+ .val;
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
@@ -47,7 +51,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val);
- let tag_dest = self.place_field(dest, tag_field)?;
+ let tag_dest = self.project_field(dest, tag_field)?;
self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
}
abi::Variants::Multiple {
@@ -78,7 +82,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&niche_start_val,
)?;
// Write result.
- let niche_dest = self.place_field(dest, tag_field)?;
+ let niche_dest = self.project_field(dest, tag_field)?;
self.write_immediate(*tag_val, &niche_dest)?;
}
}
@@ -92,11 +96,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[instrument(skip(self), level = "trace")]
pub fn read_discriminant(
&self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
- trace!("read_discriminant_value {:#?}", op.layout);
+ op: &impl Readable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, VariantIdx> {
+ let ty = op.layout().ty;
+ trace!("read_discriminant_value {:#?}", op.layout());
// Get type and layout of the discriminant.
- let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+ let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
trace!("discriminant type: {:?}", discr_layout.ty);
// We use "discriminant" to refer to the value associated with a particular enum variant.
@@ -104,21 +109,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// declared list of variants -- they can differ with explicitly assigned discriminants.
// We use "tag" to refer to how the discriminant is encoded in memory, which can be either
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
- let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+ let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants {
Variants::Single { index } => {
- let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
- Some(discr) => {
- // This type actually has discriminants.
- assert_eq!(discr.ty, discr_layout.ty);
- Scalar::from_uint(discr.val, discr_layout.size)
+ // Do some extra checks on enums.
+ if ty.is_enum() {
+ // Hilariously, `Single` is used even for 0-variant enums.
+ // (See https://github.com/rust-lang/rust/issues/89765).
+ if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
+ throw_ub!(UninhabitedEnumVariantRead(index))
}
- None => {
- // On a type without actual discriminants, variant is 0.
- assert_eq!(index.as_u32(), 0);
- Scalar::from_uint(index.as_u32(), discr_layout.size)
+ // For consisteny with `write_discriminant`, and to make sure that
+ // `project_downcast` cannot fail due to strange layouts, we declare immediate UB
+ // for uninhabited variants.
+ if op.layout().for_variant(self, index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantRead(index))
}
- };
- return Ok((discr, index));
+ }
+ return Ok(index);
}
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
@@ -138,13 +145,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
- let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+ let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.
- Ok(match *tag_encoding {
+ let index = match *tag_encoding {
TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
@@ -160,21 +167,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
let discr_bits = discr_val.assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
- let index = match *op.layout.ty.kind() {
+ let index = match *ty.kind() {
ty::Adt(adt, _) => {
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
}
- ty::Generator(def_id, substs, _) => {
- let substs = substs.as_generator();
- substs
- .discriminants(def_id, *self.tcx)
- .find(|(_, var)| var.val == discr_bits)
+ ty::Generator(def_id, args, _) => {
+ let args = args.as_generator();
+ args.discriminants(def_id, *self.tcx).find(|(_, var)| var.val == discr_bits)
}
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
}
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
- (discr_val, index.0)
+ index.0
}
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
let tag_val = tag_val.to_scalar();
@@ -216,12 +221,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.checked_add(variant_index_relative)
.expect("overflow computing absolute variant idx"),
);
- let variants = op
- .layout
- .ty
- .ty_adt_def()
- .expect("tagged layout for non adt")
- .variants();
+ let variants =
+ ty.ty_adt_def().expect("tagged layout for non adt").variants();
assert!(variant_index < variants.next_index());
variant_index
} else {
@@ -232,7 +233,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Compute the size of the scalar we need to return.
// No need to cast, because the variant index directly serves as discriminant and is
// encoded in the tag.
- (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+ variant
+ }
+ };
+ // For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants.
+ if op.layout().for_variant(self, index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantRead(index))
+ }
+ Ok(index)
+ }
+
+ pub fn discriminant_for_variant(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
+ Ok(match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+ Some(discr) => {
+ // This type actually has discriminants.
+ assert_eq!(discr.ty, discr_layout.ty);
+ Scalar::from_uint(discr.val, discr_layout.size)
+ }
+ None => {
+ // On a type without actual discriminants, variant is 0.
+ assert_eq!(variant.as_u32(), 0);
+ Scalar::from_uint(variant.as_u32(), discr_layout.size)
}
})
}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 36606ff69..3ac6f07e8 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
};
-use rustc_middle::ty::{self, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_session::Limit;
use rustc_span::Span;
@@ -91,7 +91,7 @@ pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
/// The MIR for the function called on this frame.
pub body: &'mir mir::Body<'tcx>,
- /// The def_id and substs of the current function.
+ /// The def_id and args of the current function.
pub instance: ty::Instance<'tcx>,
/// Extra data for the machine.
@@ -529,16 +529,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_err(|_| err_inval!(TooGeneric))
}
- /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
+ /// The `args` are assumed to already be in our interpreter "universe" (param_env).
pub(super) fn resolve(
&self,
def: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> InterpResult<'tcx, ty::Instance<'tcx>> {
- trace!("resolve: {:?}, {:#?}", def, substs);
+ trace!("resolve: {:?}, {:#?}", def, args);
trace!("param_env: {:#?}", self.param_env);
- trace!("substs: {:#?}", substs);
- match ty::Instance::resolve(*self.tcx, self.param_env, def, substs) {
+ trace!("args: {:#?}", args);
+ match ty::Instance::resolve(*self.tcx, self.param_env, def, args) {
Ok(Some(instance)) => Ok(instance),
Ok(None) => throw_inval!(TooGeneric),
@@ -604,7 +604,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the last field). Can't have foreign types here, how would we
// adjust alignment and size for them?
let field = layout.field(self, layout.fields.count() - 1);
- let Some((unsized_size, mut unsized_align)) = self.size_and_align_of(metadata, &field)? else {
+ let Some((unsized_size, mut unsized_align)) =
+ self.size_and_align_of(metadata, &field)?
+ else {
// A field with an extern type. We don't know the actual dynamic size
// or the alignment.
return Ok(None);
@@ -682,11 +684,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
- // Clobber previous return place contents, nobody is supposed to be able to see them any more
- // This also checks dereferenceable, but not align. We rely on all constructed places being
- // sufficiently aligned (in particular we rely on `deref_operand` checking alignment).
- self.write_uninit(return_place)?;
- // first push a stack frame so we have access to the local substs
+ // First push a stack frame so we have access to the local args
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
@@ -805,6 +803,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub_custom!(fluent::const_eval_unwind_past_top);
}
+ M::before_stack_pop(self, self.frame())?;
+
// Copy return value. Must of course happen *before* we deallocate the locals.
let copy_ret_result = if !unwinding {
let op = self
@@ -958,7 +958,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} else {
self.param_env
};
- let param_env = param_env.with_const();
let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
@@ -1014,9 +1013,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
{
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.place {
- Place::Local { frame, local } => {
+ Place::Local { frame, local, offset } => {
let mut allocs = Vec::new();
- write!(fmt, "{:?}", local)?;
+ write!(fmt, "{local:?}")?;
+ if let Some(offset) = offset {
+ write!(fmt, "+{:#x}", offset.bytes())?;
+ }
if frame != self.ecx.frame_idx() {
write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
}
@@ -1032,7 +1034,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
fmt,
" by {} ref {:?}:",
match mplace.meta {
- MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+ MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"),
MemPlaceMeta::None => String::new(),
},
mplace.ptr,
@@ -1040,13 +1042,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
- write!(fmt, " {:?}", val)?;
+ write!(fmt, " {val:?}")?;
if let Scalar::Ptr(ptr, _size) = val {
allocs.push(ptr.provenance.get_alloc_id());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
- write!(fmt, " ({:?}, {:?})", val1, val2)?;
+ write!(fmt, " ({val1:?}, {val2:?})")?;
if let Scalar::Ptr(ptr, _size) = val1 {
allocs.push(ptr.provenance.get_alloc_id());
}
@@ -1062,7 +1064,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
Some(alloc_id) => {
write!(fmt, "by ref {:?}: {:?}", mplace.ptr, self.ecx.dump_alloc(alloc_id))
}
- ptr => write!(fmt, " integral by ref: {:?}", ptr),
+ ptr => write!(fmt, " integral by ref: {ptr:?}"),
},
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 7b11ad330..910c3ca5d 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -30,7 +30,7 @@ use super::{
use crate::const_eval;
use crate::errors::{DanglingPtrInFinal, UnsupportedUntypedPointer};
-pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
+pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
'mir,
'tcx,
MemoryKind = T,
@@ -164,82 +164,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
&self.ecx
}
- fn visit_aggregate(
- &mut self,
- mplace: &MPlaceTy<'tcx>,
- fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
- // We want to walk the aggregate to look for references to intern. While doing that we
- // also need to take special care of interior mutability.
- //
- // As an optimization, however, if the allocation does not contain any references: we don't
- // need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
- let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
- // ZSTs cannot contain pointers, we can avoid the interning walk.
- if mplace.layout.is_zst() {
- return Ok(false);
- }
-
- // Now, check whether this allocation could contain references.
- //
- // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
- // to avoid could be expensive: on the potentially larger types, arrays and slices,
- // rather than on all aggregates unconditionally.
- if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
- let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
- // We do the walk if we can't determine the size of the mplace: we may be
- // dealing with extern types here in the future.
- return Ok(true);
- };
-
- // If there is no provenance in this allocation, it does not contain references
- // that point to another allocation, and we can avoid the interning walk.
- if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
- if !alloc.has_provenance() {
- return Ok(false);
- }
- } else {
- // We're encountering a ZST here, and can avoid the walk as well.
- return Ok(false);
- }
- }
-
- // In the general case, we do the walk.
- Ok(true)
- };
-
- // If this allocation contains no references to intern, we avoid the potentially costly
- // walk.
- //
- // We can do this before the checks for interior mutability below, because only references
- // are relevant in that situation, and we're checking if there are any here.
- if !is_walk_needed(mplace)? {
- return Ok(());
- }
-
- if let Some(def) = mplace.layout.ty.ty_adt_def() {
- if def.is_unsafe_cell() {
- // We are crossing over an `UnsafeCell`, we can mutate again. This means that
- // References we encounter inside here are interned as pointing to mutable
- // allocations.
- // Remember the `old` value to handle nested `UnsafeCell`.
- let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
- let walked = self.walk_aggregate(mplace, fields);
- self.inside_unsafe_cell = old;
- return walked;
- }
- }
-
- self.walk_aggregate(mplace, fields)
- }
-
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
// Handle Reference types, as these are the only types with provenance supported by const eval.
// Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
let tcx = self.ecx.tcx;
let ty = mplace.layout.ty;
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
- let value = self.ecx.read_immediate(&mplace.into())?;
+ let value = self.ecx.read_immediate(mplace)?;
let mplace = self.ecx.ref_to_mplace(&value)?;
assert_eq!(mplace.layout.ty, referenced_ty);
// Handle trait object vtables.
@@ -315,7 +246,63 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
Ok(())
} else {
- // Not a reference -- proceed recursively.
+ // Not a reference. Check if we want to recurse.
+ let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
+ // ZSTs cannot contain pointers, we can avoid the interning walk.
+ if mplace.layout.is_zst() {
+ return Ok(false);
+ }
+
+ // Now, check whether this allocation could contain references.
+ //
+ // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
+ // to avoid could be expensive: on the potentially larger types, arrays and slices,
+ // rather than on all aggregates unconditionally.
+ if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
+ let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+ // We do the walk if we can't determine the size of the mplace: we may be
+ // dealing with extern types here in the future.
+ return Ok(true);
+ };
+
+ // If there is no provenance in this allocation, it does not contain references
+ // that point to another allocation, and we can avoid the interning walk.
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+ if !alloc.has_provenance() {
+ return Ok(false);
+ }
+ } else {
+ // We're encountering a ZST here, and can avoid the walk as well.
+ return Ok(false);
+ }
+ }
+
+ // In the general case, we do the walk.
+ Ok(true)
+ };
+
+ // If this allocation contains no references to intern, we avoid the potentially costly
+ // walk.
+ //
+ // We can do this before the checks for interior mutability below, because only references
+ // are relevant in that situation, and we're checking if there are any here.
+ if !is_walk_needed(mplace)? {
+ return Ok(());
+ }
+
+ if let Some(def) = mplace.layout.ty.ty_adt_def() {
+ if def.is_unsafe_cell() {
+ // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+ // References we encounter inside here are interned as pointing to mutable
+ // allocations.
+ // Remember the `old` value to handle nested `UnsafeCell`.
+ let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+ let walked = self.walk_value(mplace);
+ self.inside_unsafe_cell = old;
+ return walked;
+ }
+ }
+
self.walk_value(mplace)
}
}
@@ -371,7 +358,7 @@ pub fn intern_const_alloc_recursive<
Some(ret.layout.ty),
);
- ref_tracking.track((*ret, base_intern_mode), || ());
+ ref_tracking.track((ret.clone(), base_intern_mode), || ());
while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
let res = InternVisitor {
@@ -477,7 +464,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
) -> InterpResult<'tcx, ()>,
) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
let dest = self.allocate(layout, MemoryKind::Stack)?;
- f(self, &dest.into())?;
+ f(self, &dest.clone().into())?;
let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
alloc.mutability = Mutability::Not;
Ok(self.tcx.mk_const_alloc(alloc))
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index ed64a7655..f22cd919c 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -1,4 +1,4 @@
-//! Intrinsics and other functions that the miri engine executes without
+//! Intrinsics and other functions that the interpreter executes without
//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
//! and miri.
@@ -12,7 +12,7 @@ use rustc_middle::mir::{
};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, Align, Primitive, Size};
@@ -56,9 +56,9 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> InterpResult<'tcx, ConstValue<'tcx>> {
- let tp_ty = substs.type_at(0);
+ let tp_ty = args.type_at(0);
let name = tcx.item_name(def_id);
Ok(match name {
sym::type_name => {
@@ -123,7 +123,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dest: &PlaceTy<'tcx, M::Provenance>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, bool> {
- let substs = instance.substs;
+ let instance_args = instance.args;
let intrinsic_name = self.tcx.item_name(instance.def_id());
// First handle intrinsics without return place.
@@ -144,7 +144,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
sym::min_align_of_val | sym::size_of_val => {
- // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
+ // Avoid `deref_pointer` -- this is not a deref, the ptr does not have to be
// dereferenceable!
let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
let (size, align) = self
@@ -187,7 +187,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| sym::ctlz_nonzero
| sym::bswap
| sym::bitreverse => {
- let ty = substs.type_at(0);
+ let ty = instance_args.type_at(0);
let layout_of = self.layout_of(ty)?;
let val = self.read_scalar(&args[0])?;
let bits = val.to_bits(layout_of.size)?;
@@ -225,9 +225,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.write_scalar(val, dest)?;
}
sym::discriminant_value => {
- let place = self.deref_operand(&args[0])?;
- let discr_val = self.read_discriminant(&place.into())?.0;
- self.write_scalar(discr_val, dest)?;
+ let place = self.deref_pointer(&args[0])?;
+ let variant = self.read_discriminant(&place)?;
+ let discr = self.discriminant_for_variant(place.layout, variant)?;
+ self.write_scalar(discr, dest)?;
}
sym::exact_div => {
let l = self.read_immediate(&args[0])?;
@@ -237,7 +238,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::rotate_left | sym::rotate_right => {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
- let layout = self.layout_of(substs.type_at(0))?;
+ let layout = self.layout_of(instance_args.type_at(0))?;
let val = self.read_scalar(&args[0])?;
let val_bits = val.to_bits(layout.size)?;
let raw_shift = self.read_scalar(&args[1])?;
@@ -260,10 +261,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::write_bytes => {
self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?;
}
+ sym::compare_bytes => {
+ let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+ self.write_scalar(result, dest)?;
+ }
sym::arith_offset => {
let ptr = self.read_pointer(&args[0])?;
let offset_count = self.read_target_isize(&args[1])?;
- let pointee_ty = substs.type_at(0);
+ let pointee_ty = instance_args.type_at(0);
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
let offset_bytes = offset_count.wrapping_mul(pointee_size);
@@ -368,7 +373,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
isize_layout
};
- let pointee_layout = self.layout_of(substs.type_at(0))?;
+ let pointee_layout = self.layout_of(instance_args.type_at(0))?;
// If ret_layout is unsigned, we checked that so is the distance, so we are good.
let val = ImmTy::from_int(dist, ret_layout);
let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
@@ -378,7 +383,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::assert_inhabited
| sym::assert_zero_valid
| sym::assert_mem_uninitialized_valid => {
- let ty = instance.substs.type_at(0);
+ let ty = instance.args.type_at(0);
let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
let should_panic = !self
@@ -393,17 +398,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// For *all* intrinsics we first check `is_uninhabited` to give a more specific
// error message.
_ if layout.abi.is_uninhabited() => format!(
- "aborted execution: attempted to instantiate uninhabited type `{}`",
- ty
+ "aborted execution: attempted to instantiate uninhabited type `{ty}`"
),
ValidityRequirement::Inhabited => bug!("handled earlier"),
ValidityRequirement::Zero => format!(
- "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
- ty
+ "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
),
ValidityRequirement::UninitMitigated0x01Fill => format!(
- "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
- ty
+ "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
),
ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
};
@@ -419,19 +421,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(input_len, dest_len, "Return vector length must match input length");
assert!(
index < dest_len,
- "Index `{}` must be in bounds of vector with length {}",
- index,
- dest_len
+ "Index `{index}` must be in bounds of vector with length {dest_len}"
);
for i in 0..dest_len {
- let place = self.mplace_index(&dest, i)?;
+ let place = self.project_index(&dest, i)?;
let value = if i == index {
elem.clone()
} else {
- self.mplace_index(&input, i)?.into()
+ self.project_index(&input, i)?.into()
};
- self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
+ self.copy_op(&value, &place, /*allow_transmute*/ false)?;
}
}
sym::simd_extract => {
@@ -439,12 +439,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (input, input_len) = self.operand_to_simd(&args[0])?;
assert!(
index < input_len,
- "index `{}` must be in bounds of vector with length {}",
- index,
- input_len
+ "index `{index}` must be in bounds of vector with length {input_len}"
);
self.copy_op(
- &self.mplace_index(&input, index)?.into(),
+ &self.project_index(&input, index)?,
dest,
/*allow_transmute*/ false,
)?;
@@ -609,7 +607,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
- let count = self.read_target_usize(&count)?;
+ let count = self.read_target_usize(count)?;
let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
let (size, align) = (layout.size, layout.align.abi);
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
@@ -621,8 +619,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
})?;
- let src = self.read_pointer(&src)?;
- let dst = self.read_pointer(&dst)?;
+ let src = self.read_pointer(src)?;
+ let dst = self.read_pointer(dst)?;
self.mem_copy(src, align, dst, align, size, nonoverlapping)
}
@@ -635,9 +633,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx> {
let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
- let dst = self.read_pointer(&dst)?;
- let byte = self.read_scalar(&byte)?.to_u8()?;
- let count = self.read_target_usize(&count)?;
+ let dst = self.read_pointer(dst)?;
+ let byte = self.read_scalar(byte)?.to_u8()?;
+ let count = self.read_target_usize(count)?;
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
@@ -649,6 +647,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.write_bytes_ptr(dst, bytes)
}
+ pub(crate) fn compare_bytes_intrinsic(
+ &mut self,
+ left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ byte_count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ let left = self.read_pointer(left)?;
+ let right = self.read_pointer(right)?;
+ let n = Size::from_bytes(self.read_target_usize(byte_count)?);
+
+ let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
+ let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
+
+ // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
+ let result = Ord::cmp(left_bytes, right_bytes) as i32;
+ Ok(Scalar::from_i32(result))
+ }
+
pub(crate) fn raw_eq_intrinsic(
&mut self,
lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index df5b58100..948bec746 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -96,16 +96,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let loc_ty = self
.tcx
.type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
- .subst(*self.tcx, self.tcx.mk_substs(&[self.tcx.lifetimes.re_erased.into()]));
+ .instantiate(*self.tcx, self.tcx.mk_args(&[self.tcx.lifetimes.re_erased.into()]));
let loc_layout = self.layout_of(loc_ty).unwrap();
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields.
- self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+ self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap())
.expect("writing to memory we just allocated cannot fail");
- self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+ self.write_scalar(line, &self.project_field(&location, 1).unwrap())
.expect("writing to memory we just allocated cannot fail");
- self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+ self.write_scalar(col, &self.project_field(&location, 2).unwrap())
.expect("writing to memory we just allocated cannot fail");
location
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index b448e3a24..e101785b6 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -17,7 +17,7 @@ use rustc_target::spec::abi::Abi as CallAbi;
use crate::const_eval::CheckAlignment;
use super::{
- AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx,
+ AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar,
};
@@ -84,7 +84,7 @@ pub trait AllocMap<K: Hash + Eq, V> {
/// Methods of this trait signifies a point where CTFE evaluation would fail
/// and some use case dependent behaviour can instead be applied.
-pub trait Machine<'mir, 'tcx>: Sized {
+pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
@@ -182,7 +182,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
abi: CallAbi,
- args: &[OpTy<'tcx, Self::Provenance>],
+ args: &[FnArg<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: mir::UnwindAction,
@@ -194,7 +194,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: Self::ExtraFnVal,
abi: CallAbi,
- args: &[OpTy<'tcx, Self::Provenance>],
+ args: &[FnArg<'tcx, Self::Provenance>],
destination: &PlaceTy<'tcx, Self::Provenance>,
target: Option<mir::BasicBlock>,
unwind: mir::UnwindAction,
@@ -418,6 +418,18 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(())
}
+ /// Called on places used for in-place function argument and return value handling.
+ ///
+ /// These places need to be protected to make sure the program cannot tell whether the
+ /// argument/return value was actually copied or passed in-place..
+ fn protect_in_place_function_argument(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ place: &PlaceTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Without an aliasing model, all we can do is put `Uninit` into the place.
+ ecx.write_uninit(place)
+ }
+
/// Called immediately before a new stack frame gets pushed.
fn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
@@ -439,6 +451,14 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(())
}
+ /// Called just before the return value is copied to the caller-provided return place.
+ fn before_stack_pop(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _frame: &Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
/// The `locals` have already been destroyed!
fn after_stack_pop(
@@ -484,7 +504,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
_ecx: &mut InterpCx<$mir, $tcx, Self>,
fn_val: !,
_abi: CallAbi,
- _args: &[OpTy<$tcx>],
+ _args: &[FnArg<$tcx>],
_destination: &PlaceTy<$tcx, Self::Provenance>,
_target: Option<mir::BasicBlock>,
_unwind: mir::UnwindAction,
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 1125d8d1f..11bffedf5 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -53,7 +53,7 @@ impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
match self {
MemoryKind::Stack => write!(f, "stack variable"),
MemoryKind::CallerLocation => write!(f, "caller location"),
- MemoryKind::Machine(m) => write!(f, "{}", m),
+ MemoryKind::Machine(m) => write!(f, "{m}"),
}
}
}
@@ -91,7 +91,7 @@ impl<'tcx, Other> FnVal<'tcx, Other> {
// `Memory` has to depend on the `Machine` because some of its operations
// (e.g., `get`) call a `Machine` hook.
pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
- /// Allocations local to this instance of the miri engine. The kind
+ /// Allocations local to this instance of the interpreter. The kind
/// helps ensure that the same mechanism is used for allocation and
/// deallocation. When an allocation is not found here, it is a
/// global and looked up in the `tcx` for read access. Some machines may
@@ -317,7 +317,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
kind = "static_mem"
)
}
- None => err_ub!(PointerUseAfterFree(alloc_id)),
+ None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccessTest)),
}
.into());
};
@@ -380,7 +380,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
M::enforce_alignment(self),
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
- let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ let (size, align) = self
+ .get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccessTest)?;
Ok((size, align, (alloc_id, offset, prov)))
},
)
@@ -404,7 +405,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CheckAlignment::Error,
msg,
|alloc_id, _, _| {
- let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
Ok((size, align, ()))
},
)?;
@@ -414,7 +415,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, as the actual
/// checking is offloaded to a helper closure. `align` defines whether and which alignment check
- /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
+ /// is done.
+ ///
+ /// If this returns `None`, the size is 0; it can however return `Some` even for size 0.
fn check_and_deref_ptr<T>(
&self,
ptr: Pointer<Option<M::Provenance>>,
@@ -515,7 +518,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
- None => throw_ub!(PointerUseAfterFree(id)),
+ None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccessTest)),
Some(GlobalAlloc::Static(def_id)) => {
assert!(self.tcx.is_static(def_id));
assert!(!self.tcx.is_thread_local_static(def_id));
@@ -761,11 +764,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Obtain the size and alignment of a live allocation.
- pub fn get_live_alloc_size_and_align(&self, id: AllocId) -> InterpResult<'tcx, (Size, Align)> {
+ /// Obtain the size and alignment of a *live* allocation.
+ fn get_live_alloc_size_and_align(
+ &self,
+ id: AllocId,
+ msg: CheckInAllocMsg,
+ ) -> InterpResult<'tcx, (Size, Align)> {
let (size, align, kind) = self.get_alloc_info(id);
if matches!(kind, AllocKind::Dead) {
- throw_ub!(PointerUseAfterFree(id))
+ throw_ub!(PointerUseAfterFree(id, msg))
}
Ok((size, align))
}
@@ -907,7 +914,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
match self.ecx.memory.alloc_map.get(id) {
Some((kind, alloc)) => {
// normal alloc
- write!(fmt, " ({}, ", kind)?;
+ write!(fmt, " ({kind}, ")?;
write_allocation_track_relocs(
&mut *fmt,
*self.ecx.tcx,
@@ -1060,11 +1067,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = Size::from_bytes(len);
let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
// zero-sized access
- assert_matches!(
- src.next(),
- None,
- "iterator said it was empty but returned an element"
- );
+ assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
return Ok(());
};
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 898d62361..b0b553c45 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -24,10 +24,12 @@ pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue,
pub use self::intern::{intern_const_alloc_recursive, InternKind};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
-pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
-pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
+pub use self::projection::Projectable;
+pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking};
-pub use self::visitor::{MutValueVisitor, Value, ValueVisitor};
+pub use self::visitor::ValueVisitor;
pub(crate) use self::intrinsics::eval_nullary_intrinsic;
use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 5f89d652f..6e57a56b4 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,6 +1,8 @@
//! Functions concerning immediate values and operands, and reading from operands.
//! All high-level functions to read from memory work on operands as sources.
+use std::assert_matches::assert_matches;
+
use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
@@ -13,8 +15,8 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
- InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
- Provenance, Scalar,
+ InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
+ Projectable, Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -31,7 +33,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`).
ScalarPair(Scalar<Prov>, Scalar<Prov>),
- /// A value of fully uninitialized memory. Can have and size and layout.
+ /// A value of fully uninitialized memory. Can have arbitrary size and layout.
Uninit,
}
@@ -178,20 +180,6 @@ impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
- }
-}
-
-impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
- }
-}
-
impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Prov>) -> Self {
@@ -240,43 +228,126 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
let int = self.to_scalar().assert_int();
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
}
+
+ /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
+ /// given layout.
+ // Not called `offset` to avoid confusion with the trait method.
+ fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+ // This makes several assumptions about what layouts we will encounter; we match what
+ // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
+ let inner_val: Immediate<_> = match (**self, self.layout.abi) {
+ // if the entire value is uninit, then so is the field (can happen in ConstProp)
+ (Immediate::Uninit, _) => Immediate::Uninit,
+ // the field contains no information, can be left uninit
+ _ if layout.is_zst() => Immediate::Uninit,
+ // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
+ // to detect those here and also give them no data
+ _ if matches!(layout.abi, Abi::Aggregate { .. })
+ && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
+ {
+ Immediate::Uninit
+ }
+ // the field covers the entire type
+ _ if layout.size == self.layout.size => {
+ assert_eq!(offset.bytes(), 0);
+ assert!(
+ match (self.layout.abi, layout.abi) {
+ (Abi::Scalar(..), Abi::Scalar(..)) => true,
+ (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ _ => false,
+ },
+ "cannot project into {} immediate with equally-sized field {}\nouter ABI: {:#?}\nfield ABI: {:#?}",
+ self.layout.ty,
+ layout.ty,
+ self.layout.abi,
+ layout.abi,
+ );
+ **self
+ }
+ // extract fields from types with `ScalarPair` ABI
+ (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+ assert!(matches!(layout.abi, Abi::Scalar(..)));
+ Immediate::from(if offset.bytes() == 0 {
+ debug_assert_eq!(layout.size, a.size(cx));
+ a_val
+ } else {
+ debug_assert_eq!(offset, a.size(cx).align_to(b.align(cx).abi));
+ debug_assert_eq!(layout.size, b.size(cx));
+ b_val
+ })
+ }
+ // everything else is a bug
+ _ => bug!("invalid field access on immediate {}, layout {:#?}", self, self.layout),
+ };
+
+ ImmTy::from_immediate(inner_val, layout)
+ }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+ Ok(MemPlaceMeta::None)
+ }
+
+ fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
+ Ok(self.offset_(offset, layout, cx))
+ }
+
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone().into())
+ }
}
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
- pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
- if self.layout.is_unsized() {
- if matches!(self.op, Operand::Immediate(Immediate::Uninit)) {
- // Uninit unsized places shouldn't occur. In the interpreter we have them
- // temporarily for unsized arguments before their value is put in; in ConstProp they
- // remain uninit and this code can actually be reached.
- throw_inval!(UninitUnsizedLocal);
+ // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
+ pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
+ Ok(if self.layout.is_unsized() {
+ if matches!(self.op, Operand::Immediate(_)) {
+ // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
+ // However, ConstProp doesn't do that, so we can run into this nonsense situation.
+ throw_inval!(ConstPropNonsense);
}
// There are no unsized immediates.
- self.assert_mem_place().len(cx)
+ self.assert_mem_place().meta
} else {
- match self.layout.fields {
- abi::FieldsShape::Array { count, .. } => Ok(count),
- _ => bug!("len not supported on sized type {:?}", self.layout.ty),
- }
- }
+ MemPlaceMeta::None
+ })
}
+}
- /// Replace the layout of this operand. There's basically no sanity check that this makes sense,
- /// you better know what you are doing! If this is an immediate, applying the wrong layout can
- /// not just lead to invalid data, it can actually *shift the data around* since the offsets of
- /// a ScalarPair are entirely determined by the layout, not the data.
- pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self {
- assert_eq!(
- self.layout.size, layout.size,
- "transmuting with a size change, that doesn't seem right"
- );
- OpTy { layout, ..*self }
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
}
- /// Offset the operand in memory (if possible) and change its metadata.
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
- pub(super) fn offset_with_meta(
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ self.meta()
+ }
+
+ fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -286,28 +357,43 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
match self.as_mplace_or_imm() {
Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
Right(imm) => {
- assert!(
- matches!(*imm, Immediate::Uninit),
- "Scalar/ScalarPair cannot be offset into"
- );
assert!(!meta.has_meta()); // no place to store metadata here
// Every part of an uninit is uninit.
- Ok(ImmTy::uninit(layout).into())
+ Ok(imm.offset(offset, layout, cx)?.into())
}
}
}
- /// Offset the operand in memory (if possible).
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
- pub fn offset(
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
- offset: Size,
- layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
- ) -> InterpResult<'tcx, Self> {
- assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+}
+
+pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ self.as_mplace_or_imm()
+ }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ Left(self.clone())
+ }
+}
+
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ Right(self.clone())
}
}
@@ -383,14 +469,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// ConstProp needs it, though.
pub fn read_immediate_raw(
&self,
- src: &OpTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
Ok(match src.as_mplace_or_imm() {
Left(ref mplace) => {
if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
Right(val)
} else {
- Left(*mplace)
+ Left(mplace.clone())
}
}
Right(val) => Right(val),
@@ -403,14 +489,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[inline(always)]
pub fn read_immediate(
&self,
- op: &OpTy<'tcx, M::Provenance>,
+ op: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
if !matches!(
- op.layout.abi,
+ op.layout().abi,
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
) {
- span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
+ span_bug!(
+ self.cur_span(),
+ "primitive read not possible for type: {:?}",
+ op.layout().ty
+ );
}
let imm = self.read_immediate_raw(op)?.right().unwrap();
if matches!(*imm, Immediate::Uninit) {
@@ -422,7 +512,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Read a scalar from a place
pub fn read_scalar(
&self,
- op: &OpTy<'tcx, M::Provenance>,
+ op: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
Ok(self.read_immediate(op)?.to_scalar())
}
@@ -433,16 +523,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Read a pointer from a place.
pub fn read_pointer(
&self,
- op: &OpTy<'tcx, M::Provenance>,
+ op: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
self.read_scalar(op)?.to_pointer(self)
}
/// Read a pointer-sized unsigned integer from a place.
- pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
+ pub fn read_target_usize(
+ &self,
+ op: &impl Readable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, u64> {
self.read_scalar(op)?.to_target_usize(self)
}
/// Read a pointer-sized signed integer from a place.
- pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
+ pub fn read_target_isize(
+ &self,
+ op: &impl Readable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, i64> {
self.read_scalar(op)?.to_target_isize(self)
}
@@ -497,18 +593,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Every place can be read from, so we can turn them into an operand.
/// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
/// will never actually read from memory.
- #[inline(always)]
pub fn place_to_op(
&self,
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let op = match **place {
- Place::Ptr(mplace) => Operand::Indirect(mplace),
- Place::Local { frame, local } => {
- *self.local_to_op(&self.stack()[frame], local, None)?
+ match place.as_mplace_or_local() {
+ Left(mplace) => Ok(mplace.into()),
+ Right((frame, local, offset)) => {
+ let base = self.local_to_op(&self.stack()[frame], local, None)?;
+ let mut field = if let Some(offset) = offset {
+ // This got offset. We can be sure that the field is sized.
+ base.offset(offset, place.layout, self)?
+ } else {
+ assert_eq!(place.layout, base.layout);
+ // Unsized cases are possible here since an unsized local will be a
+ // `Place::Local` until the first projection calls `place_to_op` to extract the
+ // underlying mplace.
+ base
+ };
+ field.align = Some(place.align);
+ Ok(field)
}
- };
- Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
+ }
}
/// Evaluate a place with the goal of reading from it. This lets us sometimes
@@ -525,7 +631,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
// Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() {
- op = self.operand_projection(&op, elem)?
+ op = self.project(&op, elem)?
}
trace!("eval_place_to_op: got {:?}", *op);
@@ -575,14 +681,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(op)
}
- /// Evaluate a bunch of operands at once
- pub(super) fn eval_operands(
- &self,
- ops: &[mir::Operand<'tcx>],
- ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
- ops.iter().map(|op| self.eval_operand(op, None)).collect()
- }
-
fn eval_ty_constant(
&self,
val: ty::Const<'tcx>,
@@ -598,12 +696,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_inval!(AlreadyReported(reported.into()))
}
ty::ConstKind::Unevaluated(uv) => {
- let instance = self.resolve(uv.def, uv.substs)?;
+ let instance = self.resolve(uv.def, uv.args)?;
let cid = GlobalId { instance, promoted: None };
- self.ctfe_query(span, |tcx| {
- tcx.eval_to_valtree(self.param_env.with_const().and(cid))
- })?
- .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
+ self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
+ .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
}
ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
@@ -627,7 +723,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
mir::ConstantKind::Unevaluated(uv, _) => {
- let instance = self.resolve(uv.def, uv.substs)?;
+ let instance = self.resolve(uv.def, uv.args)?;
Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index e04764636..eb0645780 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -24,8 +24,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
debug_assert_eq!(
Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
dest.layout.ty,
- "type mismatch for result of {:?}",
- op,
+ "type mismatch for result of {op:?}",
);
// Write the result to `dest`.
if let Abi::ScalarPair(..) = dest.layout.abi {
@@ -38,9 +37,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
- let val_field = self.place_field(&dest, 0)?;
+ let val_field = self.project_field(dest, 0)?;
self.write_scalar(val, &val_field)?;
- let overflowed_field = self.place_field(&dest, 1)?;
+ let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
Ok(())
@@ -56,7 +55,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
- assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+ assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_scalar(val, dest)
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index ca1106384..daadb7589 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -2,11 +2,14 @@
//! into a place.
//! All high-level functions to write to memory work on places as destinations.
+use std::assert_matches::assert_matches;
+
use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_index::IndexSlice;
use rustc_middle::mir;
+use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
@@ -15,7 +18,7 @@ use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_V
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
- Pointer, Provenance, Scalar,
+ Pointer, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -44,6 +47,27 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> {
Self::None => false,
}
}
+
+ pub(crate) fn len<'tcx>(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, u64> {
+ if layout.is_unsized() {
+ // We need to consult `meta` metadata
+ match layout.ty.kind() {
+ ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx),
+ _ => bug!("len not supported on unsized type {:?}", layout.ty),
+ }
+ } else {
+ // Go through the layout. There are lots of types that support a length,
+ // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+ match layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", layout.ty),
+ }
+ }
+ }
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -57,7 +81,7 @@ pub struct MemPlace<Prov: Provenance = AllocId> {
}
/// A MemPlace with its layout. Constructing it is only possible in this module.
-#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
+#[derive(Clone, Hash, Eq, PartialEq, Debug)]
pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
mplace: MemPlace<Prov>,
pub layout: TyAndLayout<'tcx>,
@@ -68,14 +92,26 @@ pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
pub align: Align,
}
+impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
+ type Target = MemPlace<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &MemPlace<Prov> {
+ &self.mplace
+ }
+}
+
#[derive(Copy, Clone, Debug)]
pub enum Place<Prov: Provenance = AllocId> {
/// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Prov>),
- /// To support alloc-free locals, we are able to write directly to a local.
+ /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
+ /// where in the local this place is located; if it is `None`, no projection has been applied.
+ /// Such projections are meaningful even if the offset is 0, since they can change layouts.
/// (Without that optimization, we'd just always be a `MemPlace`.)
- Local { frame: usize, local: mir::Local },
+ /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
+ /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
+ Local { frame: usize, local: mir::Local, offset: Option<Size> },
}
#[derive(Clone, Debug)]
@@ -97,14 +133,6 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
- type Target = MemPlace<Prov>;
- #[inline(always)]
- fn deref(&self) -> &MemPlace<Prov> {
- &self.mplace
- }
-}
-
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
@@ -112,33 +140,23 @@ impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov>
}
}
-impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
- }
-}
-
-impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
- }
-}
-
impl<Prov: Provenance> MemPlace<Prov> {
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
MemPlace { ptr, meta: MemPlaceMeta::None }
}
+ #[inline(always)]
+ pub fn from_ptr_with_meta(ptr: Pointer<Option<Prov>>, meta: MemPlaceMeta<Prov>) -> Self {
+ MemPlace { ptr, meta }
+ }
+
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
- /// This is the inverse of `ref_to_mplace`.
#[inline(always)]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
match self.meta {
@@ -150,7 +168,8 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
#[inline]
- pub(super) fn offset_with_meta<'tcx>(
+ // Not called `offset_with_meta` to avoid confusion with the trait method.
+ fn offset_with_meta_<'tcx>(
self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -164,19 +183,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
}
-impl<Prov: Provenance> Place<Prov> {
- /// Asserts that this points to some local variable.
- /// Returns the frame idx and the variable idx.
- #[inline]
- #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn assert_local(&self) -> (usize, mir::Local) {
- match self {
- Place::Local { frame, local } => (*frame, *local),
- _ => bug!("assert_local: expected Place::Local, got {:?}", self),
- }
- }
-}
-
impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
/// Produces a MemPlace that works for ZST but nothing else.
/// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
@@ -189,11 +195,39 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
}
- /// Offset the place in memory and change its metadata.
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
#[inline]
- pub(crate) fn offset_with_meta(
+ pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
+ MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr_with_meta(
+ ptr: Pointer<Option<Prov>>,
+ layout: TyAndLayout<'tcx>,
+ meta: MemPlaceMeta<Prov>,
+ ) -> Self {
+ MPlaceTy {
+ mplace: MemPlace::from_ptr_with_meta(ptr, meta),
+ layout,
+ align: layout.align.abi,
+ }
+ }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ Ok(self.meta)
+ }
+
+ fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -201,58 +235,65 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
- mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
+ mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
align: self.align.restrict_for_offset(offset),
layout,
})
}
- /// Offset the place in memory.
- ///
- /// This can go wrong very easily if you give the wrong layout for the new place!
- pub fn offset(
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
- offset: Size,
- layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
- ) -> InterpResult<'tcx, Self> {
- assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone().into())
}
+}
- #[inline]
- pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
- MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
}
- #[inline]
- pub fn from_aligned_ptr_with_meta(
- ptr: Pointer<Option<Prov>>,
- layout: TyAndLayout<'tcx>,
- meta: MemPlaceMeta<Prov>,
- ) -> Self {
- let mut mplace = MemPlace::from_ptr(ptr);
- mplace.meta = meta;
-
- MPlaceTy { mplace, layout, align: layout.align.abi }
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ ecx.place_meta(self)
}
- #[inline]
- pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
- if self.layout.is_unsized() {
- // We need to consult `meta` metadata
- match self.layout.ty.kind() {
- ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx),
- _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
- }
- } else {
- // Go through the layout. There are lots of types that support a length,
- // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
- match self.layout.fields {
- abi::FieldsShape::Array { count, .. } => Ok(count),
- _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+ fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ Ok(match self.as_mplace_or_local() {
+ Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(),
+ Right((frame, local, old_offset)) => {
+ assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
+ let new_offset = cx
+ .data_layout()
+ .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
+ PlaceTy {
+ place: Place::Local {
+ frame,
+ local,
+ offset: Some(Size::from_bytes(new_offset)),
+ },
+ align: self.align.restrict_for_offset(offset),
+ layout,
+ }
}
- }
+ })
+ }
+
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ecx.place_to_op(self)
}
}
@@ -280,13 +321,15 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
/// A place is either an mplace or some local.
#[inline]
- pub fn as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+ pub fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
match **self {
Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
- Place::Local { frame, local } => Right((frame, local)),
+ Place::Local { frame, local, offset } => Right((frame, local, offset)),
}
}
@@ -302,18 +345,80 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
}
}
+pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+ fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>;
+
+ fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
+ {
+ self.as_mplace_or_local()
+ .map_right(|(frame, local, offset)| (frame, local, offset, self.align, self.layout))
+ }
+
+ #[inline(always)]
+ fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+ ecx.force_allocation(self)
+ }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
+ {
+ Left(self.clone())
+ }
+
+ #[inline(always)]
+ fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ _ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+ Ok(self.clone())
+ }
+}
+
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
+ /// Get the metadata of the given place.
+ pub(super) fn place_meta(
+ &self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+ if place.layout.is_unsized() {
+ // For `Place::Local`, the metadata is stored with the local, not the place. So we have
+ // to look that up first.
+ self.place_to_op(place)?.meta()
+ } else {
+ Ok(MemPlaceMeta::None)
+ }
+ }
+
/// Take a value, which represents a (thin or wide) reference, and make it a place.
- /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
+ /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
///
/// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
/// want to ever use the place for memory access!
- /// Generally prefer `deref_operand`.
+ /// Generally prefer `deref_pointer`.
pub fn ref_to_mplace(
&self,
val: &ImmTy<'tcx, M::Provenance>,
@@ -327,17 +432,29 @@ where
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
};
- let mplace = MemPlace { ptr: ptr.to_pointer(self)?, meta };
- // When deref'ing a pointer, the *static* alignment given by the type is what matters.
- let align = layout.align.abi;
- Ok(MPlaceTy { mplace, layout, align })
+ // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
+ // we hence can't call `size_and_align_of` since that asserts more validity than we want.
+ Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.to_pointer(self)?, layout, meta))
+ }
+
+ /// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
+ /// `align` information is lost!
+ /// This is the inverse of `ref_to_mplace`.
+ pub fn mplace_to_ref(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ let imm = mplace.to_ref(self);
+ let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
+ Ok(ImmTy::from_immediate(imm, layout))
}
/// Take an operand, representing a pointer, and dereference it to a place.
+ /// Corresponds to the `*` operator in Rust.
#[instrument(skip(self), level = "debug")]
- pub fn deref_operand(
+ pub fn deref_pointer(
&self,
- src: &OpTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
@@ -347,41 +464,44 @@ where
}
let mplace = self.ref_to_mplace(&val)?;
- self.check_mplace(mplace)?;
+ self.check_mplace(&mplace)?;
Ok(mplace)
}
#[inline]
pub(super) fn get_place_alloc(
&self,
- place: &MPlaceTy<'tcx, M::Provenance>,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
- assert!(place.layout.is_sized());
- assert!(!place.meta.has_meta());
- let size = place.layout.size;
- self.get_ptr_alloc(place.ptr, size, place.align)
+ let (size, _align) = self
+ .size_and_align_of_mplace(&mplace)?
+ .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+ // Due to packed places, only `mplace.align` matters.
+ self.get_ptr_alloc(mplace.ptr, size, mplace.align)
}
#[inline]
pub(super) fn get_place_alloc_mut(
&mut self,
- place: &MPlaceTy<'tcx, M::Provenance>,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
- assert!(place.layout.is_sized());
- assert!(!place.meta.has_meta());
- let size = place.layout.size;
- self.get_ptr_alloc_mut(place.ptr, size, place.align)
+ let (size, _align) = self
+ .size_and_align_of_mplace(&mplace)?
+ .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+ // Due to packed places, only `mplace.align` matters.
+ self.get_ptr_alloc_mut(mplace.ptr, size, mplace.align)
}
/// Check if this mplace is dereferenceable and sufficiently aligned.
- pub fn check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
- let (size, align) = self
+ pub fn check_mplace(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ let (size, _align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
- let align = if M::enforce_alignment(self).should_check() { align } else { Align::ONE };
+ // Due to packed places, only `mplace.align` matters.
+ let align =
+ if M::enforce_alignment(self).should_check() { mplace.align } else { Align::ONE };
self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?;
Ok(())
}
@@ -418,7 +538,7 @@ where
local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
- let place = Place::Local { frame, local };
+ let place = Place::Local { frame, local, offset: None };
Ok(PlaceTy { place, layout, align: layout.align.abi })
}
@@ -426,13 +546,13 @@ where
/// place; for reading, a more efficient alternative is `eval_place_to_op`.
#[instrument(skip(self), level = "debug")]
pub fn eval_place(
- &mut self,
+ &self,
mir_place: mir::Place<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
// Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() {
- place = self.place_projection(&place, elem)?
+ place = self.project(&place, elem)?
}
trace!("{:?}", self.dump_place(place.place));
@@ -459,13 +579,13 @@ where
pub fn write_immediate(
&mut self,
src: Immediate<M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate_no_validate(src, dest)?;
- if M::enforce_validity(self, dest.layout) {
+ if M::enforce_validity(self, dest.layout()) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(&self.place_to_op(dest)?)?;
+ self.validate_operand(&dest.to_op(self)?)?;
}
Ok(())
@@ -476,7 +596,7 @@ where
pub fn write_scalar(
&mut self,
val: impl Into<Scalar<M::Provenance>>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
}
@@ -486,7 +606,7 @@ where
pub fn write_pointer(
&mut self,
ptr: impl Into<Pointer<Option<M::Provenance>>>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
}
@@ -497,32 +617,63 @@ where
fn write_immediate_no_validate(
&mut self,
src: Immediate<M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- assert!(dest.layout.is_sized(), "Cannot write unsized data");
- trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+ assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
// but not factored as a separate function.
- let mplace = match dest.place {
- Place::Local { frame, local } => {
- match M::access_local_mut(self, frame, local)? {
- Operand::Immediate(local) => {
- // Local can be updated in-place.
- *local = src;
- return Ok(());
- }
- Operand::Indirect(mplace) => {
- // The local is in memory, go on below.
- *mplace
+ let mplace = match dest.as_mplace_or_local() {
+ Right((frame, local, offset, align, layout)) => {
+ if offset.is_some() {
+ // This has been projected to a part of this local. We could have complicated
+ // logic to still keep this local as an `Operand`... but it's much easier to
+ // just fall back to the indirect path.
+ dest.force_mplace(self)?
+ } else {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local_val) => {
+ // Local can be updated in-place.
+ *local_val = src;
+ // Double-check that the value we are storing and the local fit to each other.
+ // (*After* doing the update for borrow checker reasons.)
+ if cfg!(debug_assertions) {
+ let local_layout =
+ self.layout_of_local(&self.stack()[frame], local, None)?;
+ match (src, local_layout.abi) {
+ (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+ assert_eq!(scalar.size(), s.size(self))
+ }
+ (
+ Immediate::ScalarPair(a_val, b_val),
+ Abi::ScalarPair(a, b),
+ ) => {
+ assert_eq!(a_val.size(), a.size(self));
+ assert_eq!(b_val.size(), b.size(self));
+ }
+ (Immediate::Uninit, _) => {}
+ (src, abi) => {
+ bug!(
+ "value {src:?} cannot be written into local with type {} (ABI {abi:?})",
+ local_layout.ty
+ )
+ }
+ };
+ }
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ MPlaceTy { mplace: *mplace, align, layout }
+ }
}
}
}
- Place::Ptr(mplace) => mplace, // already referring to memory
+ Left(mplace) => mplace, // already referring to memory
};
// This is already in memory, write there.
- self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
+ self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.align, mplace.mplace)
}
/// Write an immediate to memory.
@@ -541,14 +692,17 @@ where
// wrong type.
let tcx = *self.tcx;
- let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
+ let Some(mut alloc) =
+ self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })?
+ else {
// zero-sized access
return Ok(());
};
match value {
Immediate::Scalar(scalar) => {
- let Abi::Scalar(s) = layout.abi else { span_bug!(
+ let Abi::Scalar(s) = layout.abi else {
+ span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
)
@@ -561,7 +715,8 @@ where
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
- let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
+ let Abi::ScalarPair(a, b) = layout.abi else {
+ span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
layout
@@ -582,18 +737,29 @@ where
}
}
- pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ pub fn write_uninit(
+ &mut self,
+ dest: &impl Writeable<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
let mplace = match dest.as_mplace_or_local() {
Left(mplace) => mplace,
- Right((frame, local)) => {
- match M::access_local_mut(self, frame, local)? {
- Operand::Immediate(local) => {
- *local = Immediate::Uninit;
- return Ok(());
- }
- Operand::Indirect(mplace) => {
- // The local is in memory, go on below.
- MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
+ Right((frame, local, offset, align, layout)) => {
+ if offset.is_some() {
+ // This has been projected to a part of this local. We could have complicated
+ // logic to still keep this local as an `Operand`... but it's much easier to
+ // just fall back to the indirect path.
+ // FIXME: share the logic with `write_immediate_no_validate`.
+ dest.force_mplace(self)?
+ } else {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local) => {
+ *local = Immediate::Uninit;
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ MPlaceTy { mplace: *mplace, layout, align }
+ }
}
}
}
@@ -612,15 +778,15 @@ where
#[instrument(skip(self), level = "debug")]
pub fn copy_op(
&mut self,
- src: &OpTy<'tcx, M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest, allow_transmute)?;
- if M::enforce_validity(self, dest.layout) {
+ if M::enforce_validity(self, dest.layout()) {
// Data got changed, better make sure it matches the type!
- self.validate_operand(&self.place_to_op(dest)?)?;
+ self.validate_operand(&dest.to_op(self)?)?;
}
Ok(())
@@ -633,20 +799,20 @@ where
#[instrument(skip(self), level = "debug")]
fn copy_op_no_validate(
&mut self,
- src: &OpTy<'tcx, M::Provenance>,
- dest: &PlaceTy<'tcx, M::Provenance>,
+ src: &impl Readable<'tcx, M::Provenance>,
+ dest: &impl Writeable<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
let layout_compat =
- mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
+ mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
- src.layout.ty,
- dest.layout.ty,
+ src.layout().ty,
+ dest.layout().ty,
);
}
@@ -659,13 +825,13 @@ where
// actually sized, due to a trivially false where-clause
// predicate like `where Self: Sized` with `Self = dyn Trait`.
// See #102553 for an example of such a predicate.
- if src.layout.is_unsized() {
- throw_inval!(SizeOfUnsizedType(src.layout.ty));
+ if src.layout().is_unsized() {
+ throw_inval!(ConstPropNonsense);
}
- if dest.layout.is_unsized() {
- throw_inval!(SizeOfUnsizedType(dest.layout.ty));
+ if dest.layout().is_unsized() {
+ throw_inval!(ConstPropNonsense);
}
- assert_eq!(src.layout.size, dest.layout.size);
+ assert_eq!(src.layout().size, dest.layout().size);
// Yay, we got a value that we can write directly.
return if layout_compat {
self.write_immediate_no_validate(*src_val, dest)
@@ -674,10 +840,10 @@ where
// loaded using the offsets defined by `src.layout`. When we put this back into
// the destination, we have to use the same offsets! So (a) we make sure we
// write back to memory, and (b) we use `dest` *with the source layout*.
- let dest_mem = self.force_allocation(dest)?;
+ let dest_mem = dest.force_mplace(self)?;
self.write_immediate_to_mplace_no_validate(
*src_val,
- src.layout,
+ src.layout(),
dest_mem.align,
*dest_mem,
)
@@ -686,9 +852,9 @@ where
Left(mplace) => mplace,
};
// Slow path, this does not fit into an immediate. Just memcpy.
- trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+ trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout().ty);
- let dest = self.force_allocation(&dest)?;
+ let dest = dest.force_mplace(self)?;
let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
};
@@ -720,8 +886,8 @@ where
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let mplace = match place.place {
- Place::Local { frame, local } => {
- match M::access_local_mut(self, frame, local)? {
+ Place::Local { frame, local, offset } => {
+ let whole_local = match M::access_local_mut(self, frame, local)? {
&mut Operand::Immediate(local_val) => {
// We need to make an allocation.
@@ -734,10 +900,11 @@ where
throw_unsup_format!("unsized locals are not supported");
}
let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+ // Preserve old value. (As an optimization, we can skip this if it was uninit.)
if !matches!(local_val, Immediate::Uninit) {
- // Preserve old value. (As an optimization, we can skip this if it was uninit.)
- // We don't have to validate as we can assume the local
- // was already valid for its type.
+ // We don't have to validate as we can assume the local was already
+ // valid for its type. We must not use any part of `place` here, that
+ // could be a projection to a part of the local!
self.write_immediate_to_mplace_no_validate(
local_val,
local_layout,
@@ -745,29 +912,48 @@ where
mplace,
)?;
}
- // Now we can call `access_mut` again, asserting it goes well,
- // and actually overwrite things.
+ // Now we can call `access_mut` again, asserting it goes well, and actually
+ // overwrite things. This points to the entire allocation, not just the part
+ // the place refers to, i.e. we do this before we apply `offset`.
*M::access_local_mut(self, frame, local).unwrap() =
Operand::Indirect(mplace);
mplace
}
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
+ };
+ if let Some(offset) = offset {
+ whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
+ } else {
+ // Preserve wide place metadata, do not call `offset`.
+ whole_local
}
}
Place::Ptr(mplace) => mplace,
};
- // Return with the original layout, so that the caller can go on
+ // Return with the original layout and align, so that the caller can go on
Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
}
+ pub fn allocate_dyn(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ kind: MemoryKind<M::MemoryKind>,
+ meta: MemPlaceMeta<M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let Some((size, align)) = self.size_and_align_of(&meta, &layout)? else {
+ span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
+ };
+ let ptr = self.allocate_ptr(size, align, kind)?;
+ Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), layout, meta))
+ }
+
pub fn allocate(
&mut self,
layout: TyAndLayout<'tcx>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
assert!(layout.is_sized());
- let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
- Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ self.allocate_dyn(layout, kind, MemPlaceMeta::None)
}
/// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
@@ -798,10 +984,10 @@ where
operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- self.write_uninit(&dest)?;
+ self.write_uninit(dest)?;
let (variant_index, variant_dest, active_field_index) = match *kind {
mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
- let variant_dest = self.place_downcast(&dest, variant_index)?;
+ let variant_dest = self.project_downcast(dest, variant_index)?;
(variant_index, variant_dest, active_field_index)
}
_ => (FIRST_VARIANT, dest.clone(), None),
@@ -811,11 +997,11 @@ where
}
for (field_index, operand) in operands.iter_enumerated() {
let field_index = active_field_index.unwrap_or(field_index);
- let field_dest = self.place_field(&variant_dest, field_index.as_usize())?;
+ let field_dest = self.project_field(&variant_dest, field_index.as_usize())?;
let op = self.eval_operand(operand, Some(field_dest.layout))?;
self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
}
- self.write_discriminant(variant_index, &dest)
+ self.write_discriminant(variant_index, dest)
}
pub fn raw_const_to_mplace(
@@ -851,22 +1037,24 @@ where
Ok((mplace, vtable))
}
- /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
- /// Aso returns the vtable.
- pub(super) fn unpack_dyn_star(
+ /// Turn a `dyn* Trait` type into an value with the actual dynamic type.
+ /// Also returns the vtable.
+ pub(super) fn unpack_dyn_star<P: Projectable<'tcx, M::Provenance>>(
&self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+ val: &P,
+ ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
assert!(
- matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+ matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"`unpack_dyn_star` only makes sense on `dyn*` types"
);
- let data = self.operand_field(&op, 0)?;
- let vtable = self.operand_field(&op, 1)?;
- let vtable = self.read_pointer(&vtable)?;
+ let data = self.project_field(val, 0)?;
+ let vtable = self.project_field(val, 1)?;
+ let vtable = self.read_pointer(&vtable.to_op(self)?)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
- let data = data.transmute(layout);
+ // `data` is already the right thing but has the wrong type. So we transmute it, by
+ // projecting with offset 0.
+ let data = data.transmute(layout, self)?;
Ok((data, vtable))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index d7d31fe18..882097ad2 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,18 +7,70 @@
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
-use either::{Left, Right};
-
use rustc_middle::mir;
use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
-use rustc_target::abi::{self, Abi, VariantIdx};
+use rustc_middle::ty::TyCtxt;
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::Size;
+use rustc_target::abi::{self, VariantIdx};
+
+use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
-use super::{
- ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy,
- Provenance, Scalar,
-};
+/// A thing that we can project into, and that has a layout.
+pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
+ /// Get the layout.
+ fn layout(&self) -> TyAndLayout<'tcx>;
+
+ /// Get the metadata of a wide value.
+ fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
+
+ fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, u64> {
+ self.meta(ecx)?.len(self.layout(), ecx)
+ }
+
+ /// Offset the value by the given amount, replacing the layout and metadata.
+ fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self>;
+
+ fn offset(
+ &self,
+ offset: Size,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert!(layout.is_sized());
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ }
+
+ fn transmute(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert_eq!(self.layout().size, layout.size);
+ self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
+ }
+
+ /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
+ /// reading from this thing.
+ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+}
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
@@ -26,167 +78,83 @@ where
Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
- //# Field access
-
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
- /// This supports both struct and array fields.
+ /// This supports both struct and array fields, but not slices!
///
/// This also works for arrays, but then the `usize` index type is restricting.
/// For indexing into arrays, use `mplace_index`.
- pub fn mplace_field(
+ pub fn project_field<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &MPlaceTy<'tcx, M::Provenance>,
+ base: &P,
field: usize,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- let offset = base.layout.fields.offset(field);
- let field_layout = base.layout.field(self, field);
+ ) -> InterpResult<'tcx, P> {
+ // Slices nominally have length 0, so they will panic somewhere in `fields.offset`.
+ debug_assert!(
+ !matches!(base.layout().ty.kind(), ty::Slice(..)),
+ "`field` projection called on a slice -- call `index` projection instead"
+ );
+ let offset = base.layout().fields.offset(field);
+ let field_layout = base.layout().field(self, field);
// Offset may need adjustment for unsized fields.
let (meta, offset) = if field_layout.is_unsized() {
+ if base.layout().is_sized() {
+ // An unsized field of a sized type? Sure...
+ // But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`)
+ throw_inval!(ConstPropNonsense);
+ }
+ let base_meta = base.meta(self)?;
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
- match self.size_and_align_of(&base.meta, &field_layout)? {
- Some((_, align)) => (base.meta, offset.align_to(align)),
+ match self.size_and_align_of(&base_meta, &field_layout)? {
+ Some((_, align)) => (base_meta, offset.align_to(align)),
None => {
// For unsized types with an extern type tail we perform no adjustments.
// NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
- assert!(matches!(base.meta, MemPlaceMeta::None));
- (base.meta, offset)
+ assert!(matches!(base_meta, MemPlaceMeta::None));
+ (base_meta, offset)
}
}
} else {
- // base.meta could be present; we might be accessing a sized field of an unsized
+ // base_meta could be present; we might be accessing a sized field of an unsized
// struct.
(MemPlaceMeta::None, offset)
};
- // We do not look at `base.layout.align` nor `field_layout.align`, unlike
- // codegen -- mostly to see if we can get away with that
base.offset_with_meta(offset, meta, field_layout, self)
}
- /// Gets the place of a field inside the place, and also the field's type.
- /// Just a convenience function, but used quite a bit.
- /// This is the only projection that might have a side-effect: We cannot project
- /// into the field of a local `ScalarPair`, we have to first allocate it.
- pub fn place_field(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- field: usize,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- // FIXME: We could try to be smarter and avoid allocation for fields that span the
- // entire place.
- let base = self.force_allocation(base)?;
- Ok(self.mplace_field(&base, field)?.into())
- }
-
- pub fn operand_field(
+ /// Downcasting to an enum variant.
+ pub fn project_downcast<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
- field: usize,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let base = match base.as_mplace_or_imm() {
- Left(ref mplace) => {
- // We can reuse the mplace field computation logic for indirect operands.
- let field = self.mplace_field(mplace, field)?;
- return Ok(field.into());
- }
- Right(value) => value,
- };
-
- let field_layout = base.layout.field(self, field);
- let offset = base.layout.fields.offset(field);
- // This makes several assumptions about what layouts we will encounter; we match what
- // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
- let field_val: Immediate<_> = match (*base, base.layout.abi) {
- // if the entire value is uninit, then so is the field (can happen in ConstProp)
- (Immediate::Uninit, _) => Immediate::Uninit,
- // the field contains no information, can be left uninit
- _ if field_layout.is_zst() => Immediate::Uninit,
- // the field covers the entire type
- _ if field_layout.size == base.layout.size => {
- assert!(match (base.layout.abi, field_layout.abi) {
- (Abi::Scalar(..), Abi::Scalar(..)) => true,
- (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
- _ => false,
- });
- assert!(offset.bytes() == 0);
- *base
- }
- // extract fields from types with `ScalarPair` ABI
- (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
- assert!(matches!(field_layout.abi, Abi::Scalar(..)));
- Immediate::from(if offset.bytes() == 0 {
- debug_assert_eq!(field_layout.size, a.size(self));
- a_val
- } else {
- debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
- debug_assert_eq!(field_layout.size, b.size(self));
- b_val
- })
- }
- // everything else is a bug
- _ => span_bug!(
- self.cur_span(),
- "invalid field access on immediate {}, layout {:#?}",
- base,
- base.layout
- ),
- };
-
- Ok(ImmTy::from_immediate(field_val, field_layout).into())
- }
-
- //# Downcasting
-
- pub fn mplace_downcast(
- &self,
- base: &MPlaceTy<'tcx, M::Provenance>,
+ base: &P,
variant: VariantIdx,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
+ assert!(!base.meta(self)?.has_meta());
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
- assert!(!base.meta.has_meta());
- let mut base = *base;
- base.layout = base.layout.for_variant(self, variant);
- Ok(base)
- }
-
- pub fn place_downcast(
- &self,
- base: &PlaceTy<'tcx, M::Provenance>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- // Downcast just changes the layout
- let mut base = base.clone();
- base.layout = base.layout.for_variant(self, variant);
- Ok(base)
- }
-
- pub fn operand_downcast(
- &self,
- base: &OpTy<'tcx, M::Provenance>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // Downcast just changes the layout
- let mut base = base.clone();
- base.layout = base.layout.for_variant(self, variant);
- Ok(base)
+ // So we just "offset" by 0.
+ let layout = base.layout().for_variant(self, variant);
+ if layout.abi.is_uninhabited() {
+ // `read_discriminant` should have excluded uninhabited variants... but ConstProp calls
+ // us on dead code.
+ throw_inval!(ConstPropNonsense)
+ }
+ // This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
+ base.offset(Size::ZERO, layout, self)
}
- //# Slice indexing
-
- #[inline(always)]
- pub fn operand_index(
+ /// Compute the offset and field layout for accessing the given index.
+ pub fn project_index<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
+ base: &P,
index: u64,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
// Not using the layout method because we want to compute on u64
- match base.layout.fields {
+ let (offset, field_layout) = match base.layout().fields {
abi::FieldsShape::Array { stride, count: _ } => {
// `count` is nonsense for slices, use the dynamic length instead.
let len = base.len(self)?;
@@ -196,63 +164,26 @@ where
}
let offset = stride * index; // `Size` multiplication
// All fields have the same layout.
- let field_layout = base.layout.field(self, 0);
- base.offset(offset, field_layout, self)
+ let field_layout = base.layout().field(self, 0);
+ (offset, field_layout)
}
_ => span_bug!(
self.cur_span(),
"`mplace_index` called on non-array type {:?}",
- base.layout.ty
+ base.layout().ty
),
- }
- }
-
- /// Iterates over all fields of an array. Much more efficient than doing the
- /// same by repeatedly calling `operand_index`.
- pub fn operand_array_fields<'a>(
- &self,
- base: &'a OpTy<'tcx, Prov>,
- ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
- let len = base.len(self)?; // also asserts that we have a type where this makes sense
- let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
- span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
};
- let field_layout = base.layout.field(self, 0);
- let dl = &self.tcx.data_layout;
- // `Size` multiplication
- Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
- }
-
- /// Index into an array.
- pub fn mplace_index(
- &self,
- base: &MPlaceTy<'tcx, M::Provenance>,
- index: u64,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
- }
- pub fn place_index(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- index: u64,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- // There's not a lot we can do here, since we cannot have a place to a part of a local. If
- // we are accessing the only element of a 1-element array, it's still the entire local...
- // that doesn't seem worth it.
- let base = self.force_allocation(base)?;
- Ok(self.mplace_index(&base, index)?.into())
+ base.offset(offset, field_layout, self)
}
- //# ConstantIndex support
-
- fn operand_constant_index(
+ fn project_constant_index<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
+ base: &P,
offset: u64,
min_length: u64,
from_end: bool,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
@@ -267,32 +198,38 @@ where
offset
};
- self.operand_index(base, index)
+ self.project_index(base, index)
}
- fn place_constant_index(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- offset: u64,
- min_length: u64,
- from_end: bool,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- let base = self.force_allocation(base)?;
- Ok(self
- .operand_constant_index(&base.into(), offset, min_length, from_end)?
- .assert_mem_place()
- .into())
+ /// Iterates over all fields of an array. Much more efficient than doing the
+ /// same by repeatedly calling `operand_index`.
+ pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
+ &self,
+ base: &'a P,
+ ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
+ where
+ 'tcx: 'a,
+ {
+ let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
+ span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
+ };
+ let len = base.len(self)?;
+ let field_layout = base.layout().field(self, 0);
+ let tcx: TyCtxt<'tcx> = *self.tcx;
+ // `Size` multiplication
+ Ok((0..len).map(move |i| {
+ base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
+ }))
}
- //# Subslicing
-
- fn operand_subslice(
+ /// Subslicing
+ fn project_subslice<P: Projectable<'tcx, M::Provenance>>(
&self,
- base: &OpTy<'tcx, M::Provenance>,
+ base: &P,
from: u64,
to: u64,
from_end: bool,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ ) -> InterpResult<'tcx, P> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end {
if from.checked_add(to).map_or(true, |to| to > len) {
@@ -306,16 +243,20 @@ where
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
- let from_offset = match base.layout.fields {
+ let from_offset = match base.layout().fields {
abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => {
- span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+ span_bug!(
+ self.cur_span(),
+ "unexpected layout of index access: {:#?}",
+ base.layout()
+ )
}
};
// Compute meta and new layout
let inner_len = actual_to.checked_sub(from).unwrap();
- let (meta, ty) = match base.layout.ty.kind() {
+ let (meta, ty) = match base.layout().ty.kind() {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
ty::Array(inner, _) => {
@@ -323,85 +264,43 @@ where
}
ty::Slice(..) => {
let len = Scalar::from_target_usize(inner_len, self);
- (MemPlaceMeta::Meta(len), base.layout.ty)
+ (MemPlaceMeta::Meta(len), base.layout().ty)
}
_ => {
- span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+ span_bug!(
+ self.cur_span(),
+ "cannot subslice non-array type: `{:?}`",
+ base.layout().ty
+ )
}
};
let layout = self.layout_of(ty)?;
- base.offset_with_meta(from_offset, meta, layout, self)
- }
-
- pub fn place_subslice(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- from: u64,
- to: u64,
- from_end: bool,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- let base = self.force_allocation(base)?;
- Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
- }
-
- //# Applying a general projection
- /// Projects into a place.
- #[instrument(skip(self), level = "trace")]
- pub fn place_projection(
- &mut self,
- base: &PlaceTy<'tcx, M::Provenance>,
- proj_elem: mir::PlaceElem<'tcx>,
- ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- use rustc_middle::mir::ProjectionElem::*;
- Ok(match proj_elem {
- OpaqueCast(ty) => {
- let mut place = base.clone();
- place.layout = self.layout_of(ty)?;
- place
- }
- Field(field, _) => self.place_field(base, field.index())?,
- Downcast(_, variant) => self.place_downcast(base, variant)?,
- Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
- Index(local) => {
- let layout = self.layout_of(self.tcx.types.usize)?;
- let n = self.local_to_op(self.frame(), local, Some(layout))?;
- let n = self.read_target_usize(&n)?;
- self.place_index(base, n)?
- }
- ConstantIndex { offset, min_length, from_end } => {
- self.place_constant_index(base, offset, min_length, from_end)?
- }
- Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
- })
+ base.offset_with_meta(from_offset, meta, layout, self)
}
+ /// Applying a general projection
#[instrument(skip(self), level = "trace")]
- pub fn operand_projection(
- &self,
- base: &OpTy<'tcx, M::Provenance>,
- proj_elem: mir::PlaceElem<'tcx>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ pub fn project<P>(&self, base: &P, proj_elem: mir::PlaceElem<'tcx>) -> InterpResult<'tcx, P>
+ where
+ P: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>> + std::fmt::Debug,
+ {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
- OpaqueCast(ty) => {
- let mut op = base.clone();
- op.layout = self.layout_of(ty)?;
- op
- }
- Field(field, _) => self.operand_field(base, field.index())?,
- Downcast(_, variant) => self.operand_downcast(base, variant)?,
- Deref => self.deref_operand(base)?.into(),
+ OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
+ Field(field, _) => self.project_field(base, field.index())?,
+ Downcast(_, variant) => self.project_downcast(base, variant)?,
+ Deref => self.deref_pointer(&base.to_op(self)?)?.into(),
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_target_usize(&n)?;
- self.operand_index(base, n)?
+ self.project_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
- self.operand_constant_index(base, offset, min_length, from_end)?
+ self.project_constant_index(base, offset, min_length, from_end)?
}
- Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
+ Subslice { from, to, from_end } => self.project_subslice(base, from, to, from_end)?,
})
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 619da8abb..0740894a4 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -8,7 +8,7 @@ use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::LayoutOf;
-use super::{ImmTy, InterpCx, Machine};
+use super::{ImmTy, InterpCx, Machine, Projectable};
use crate::util;
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@@ -178,7 +178,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, &val)?;
- assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+ assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
}
@@ -197,8 +197,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.get_place_alloc_mut(&dest)?;
} else {
// Write the src to the first element.
- let first = self.mplace_field(&dest, 0)?;
- self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
+ let first = self.project_index(&dest, 0)?;
+ self.copy_op(&src, &first, /*allow_transmute*/ false)?;
// This is performance-sensitive code for big static/const arrays! So we
// avoid writing each operand individually and instead just make many copies
@@ -208,13 +208,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let rest_ptr = first_ptr.offset(elem_size, self)?;
// For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
// that place might be more aligned than its type mandates (a `u8` array could
- // be 4-aligned if it sits at the right spot in a struct). Instead we use
- // `first.layout.align`, i.e., the alignment given by the type.
+ // be 4-aligned if it sits at the right spot in a struct). We have to also factor
+ // in element size.
self.mem_copy_repeatedly(
first_ptr,
- first.align,
+ dest.align,
rest_ptr,
- first.layout.align.abi,
+ dest.align.restrict_for_offset(elem_size),
elem_size,
length - 1,
/*nonoverlapping:*/ true,
@@ -224,8 +224,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Len(place) => {
let src = self.eval_place(place)?;
- let op = self.place_to_op(&src)?;
- let len = op.len(self)?;
+ let len = src.len(self)?;
self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
}
@@ -248,7 +247,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
AddressOf(_, place) => {
// Figure out whether this is an addr_of of an already raw place.
- let place_base_raw = if place.has_deref() {
+ let place_base_raw = if place.is_indirect_first_projection() {
let ty = self.frame().body.local_decls[place.local].ty;
ty.is_unsafe_ptr()
} else {
@@ -270,12 +269,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
let layout = self.layout_of(ty)?;
if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op && layout.is_unsized() {
- // FIXME: This should be a span_bug (#80742)
- self.tcx.sess.delay_span_bug(
+ span_bug!(
self.frame().current_span(),
- format!("{null_op:?} MIR operator called for unsized type {ty}"),
+ "{null_op:?} MIR operator called for unsized type {ty}",
);
- throw_inval!(SizeOfUnsizedType(ty));
}
let val = match null_op {
mir::NullOp::SizeOf => layout.size.bytes(),
@@ -302,8 +299,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Discriminant(place) => {
let op = self.eval_place_to_op(place, None)?;
- let discr_val = self.read_discriminant(&op)?.0;
- self.write_scalar(discr_val, &dest)?;
+ let variant = self.read_discriminant(&op)?;
+ let discr = self.discriminant_for_variant(op.layout, variant)?;
+ self.write_scalar(discr, &dest)?;
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 15823a597..3c03172bb 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -1,7 +1,8 @@
use std::borrow::Cow;
+use either::Either;
use rustc_ast::ast::InlineAsmOptions;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::Instance;
use rustc_middle::{
mir,
@@ -12,12 +13,63 @@ use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMo
use rustc_target::spec::abi::Abi;
use super::{
- FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
- PlaceTy, Scalar, StackPopCleanup,
+ AllocId, FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy,
+ Operand, PlaceTy, Provenance, Scalar, StackPopCleanup,
};
use crate::fluent_generated as fluent;
+/// An argment passed to a function.
+#[derive(Clone, Debug)]
+pub enum FnArg<'tcx, Prov: Provenance = AllocId> {
+ /// Pass a copy of the given operand.
+ Copy(OpTy<'tcx, Prov>),
+ /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and
+ /// make the place inaccessible for the duration of the function call.
+ InPlace(PlaceTy<'tcx, Prov>),
+}
+
+impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
+ pub fn layout(&self) -> &TyAndLayout<'tcx> {
+ match self {
+ FnArg::Copy(op) => &op.layout,
+ FnArg::InPlace(place) => &place.layout,
+ }
+ }
+}
+
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
+ /// original memory occurs.
+ pub fn copy_fn_arg(
+ &self,
+ arg: &FnArg<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ match arg {
+ FnArg::Copy(op) => Ok(op.clone()),
+ FnArg::InPlace(place) => self.place_to_op(&place),
+ }
+ }
+
+ /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
+ /// original memory occurs.
+ pub fn copy_fn_args(
+ &self,
+ args: &[FnArg<'tcx, M::Provenance>],
+ ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
+ args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
+ }
+
+ pub fn fn_arg_field(
+ &self,
+ arg: &FnArg<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
+ Ok(match arg {
+ FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
+ FnArg::InPlace(place) => FnArg::InPlace(self.project_field(place, field)?),
+ })
+ }
+
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
@@ -68,14 +120,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let old_stack = self.frame_idx();
let old_loc = self.frame().loc;
let func = self.eval_operand(func, None)?;
- let args = self.eval_operands(args)?;
+ let args = self.eval_fn_call_arguments(args)?;
let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
let fn_sig =
self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
let extra_args = &args[fn_sig.inputs().len()..];
let extra_args =
- self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout.ty));
+ self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
ty::FnPtr(_sig) => {
@@ -83,8 +135,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let fn_val = self.get_ptr_fn(fn_ptr)?;
(fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
}
- ty::FnDef(def_id, substs) => {
- let instance = self.resolve(def_id, substs)?;
+ ty::FnDef(def_id, args) => {
+ let instance = self.resolve(def_id, args)?;
(
FnVal::Instance(instance),
self.fn_abi_of_instance(instance, extra_args)?,
@@ -185,6 +237,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
+ /// Evaluate the arguments of a function call
+ pub(super) fn eval_fn_call_arguments(
+ &self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> InterpResult<'tcx, Vec<FnArg<'tcx, M::Provenance>>> {
+ ops.iter()
+ .map(|op| {
+ Ok(match op {
+ mir::Operand::Move(place) => FnArg::InPlace(self.eval_place(*place)?),
+ _ => FnArg::Copy(self.eval_operand(op, None)?),
+ })
+ })
+ .collect()
+ }
+
fn check_argument_compat(
caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
@@ -275,7 +342,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn pass_argument<'x, 'y>(
&mut self,
caller_args: &mut impl Iterator<
- Item = (&'x OpTy<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
+ Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_arg: &PlaceTy<'tcx, M::Provenance>,
@@ -295,35 +362,38 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Now, check
if !Self::check_argument_compat(caller_abi, callee_abi) {
let callee_ty = format!("{}", callee_arg.layout.ty);
- let caller_ty = format!("{}", caller_arg.layout.ty);
+ let caller_ty = format!("{}", caller_arg.layout().ty);
throw_ub_custom!(
fluent::const_eval_incompatible_types,
callee_ty = callee_ty,
caller_ty = caller_ty,
)
}
+ // We work with a copy of the argument for now; if this is in-place argument passing, we
+ // will later protect the source it comes from. This means the callee cannot observe if we
+ // did in-place of by-copy argument passing, except for pointer equality tests.
+ let caller_arg_copy = self.copy_fn_arg(&caller_arg)?;
// Special handling for unsized parameters.
- if caller_arg.layout.is_unsized() {
+ if caller_arg_copy.layout.is_unsized() {
// `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
- assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty);
+ assert_eq!(caller_arg_copy.layout.ty, callee_arg.layout.ty);
// We have to properly pre-allocate the memory for the callee.
- // So let's tear down some wrappers.
+ // So let's tear down some abstractions.
// This all has to be in memory, there are no immediate unsized values.
- let src = caller_arg.assert_mem_place();
+ let src = caller_arg_copy.assert_mem_place();
// The destination cannot be one of these "spread args".
- let (dest_frame, dest_local) = callee_arg.assert_local();
+ let (dest_frame, dest_local, dest_offset) = callee_arg
+ .as_mplace_or_local()
+ .right()
+ .expect("callee fn arguments must be locals");
// We are just initializing things, so there can't be anything here yet.
assert!(matches!(
*self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
Operand::Immediate(Immediate::Uninit)
));
+ assert_eq!(dest_offset, None);
// Allocate enough memory to hold `src`.
- let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
- span_bug!(self.cur_span(), "unsized fn arg with `extern` type tail should not be allowed")
- };
- let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
- let dest_place =
- MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), callee_arg.layout, src.meta);
+ let dest_place = self.allocate_dyn(src.layout, MemoryKind::Stack, src.meta)?;
// Update the local to be that new place.
*M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
}
@@ -331,7 +401,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
// is true for all `copy_op`, but there are a lot of special cases for argument passing
// specifically.)
- self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
+ self.copy_op(&caller_arg_copy, callee_arg, /*allow_transmute*/ true)?;
+ // If this was an in-place pass, protect the place it comes from for the duration of the call.
+ if let FnArg::InPlace(place) = caller_arg {
+ M::protect_in_place_function_argument(self, place)?;
+ }
+ Ok(())
}
/// Call this function -- pushing the stack frame and initializing the arguments.
@@ -346,7 +421,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
(caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
- args: &[OpTy<'tcx, M::Provenance>],
+ args: &[FnArg<'tcx, M::Provenance>],
with_caller_location: bool,
destination: &PlaceTy<'tcx, M::Provenance>,
target: Option<mir::BasicBlock>,
@@ -372,8 +447,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match instance.def {
ty::InstanceDef::Intrinsic(def_id) => {
assert!(self.tcx.is_intrinsic(def_id));
- // caller_fn_abi is not relevant here, we interpret the arguments directly for each intrinsic.
- M::call_intrinsic(self, instance, args, destination, target, unwind)
+ // FIXME: Should `InPlace` arguments be reset to uninit?
+ M::call_intrinsic(
+ self,
+ instance,
+ &self.copy_fn_args(args)?,
+ destination,
+ target,
+ unwind,
+ )
}
ty::InstanceDef::VTableShim(..)
| ty::InstanceDef::ReifyShim(..)
@@ -385,10 +467,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| ty::InstanceDef::ThreadLocalShim(..)
| ty::InstanceDef::Item(_) => {
// We need MIR for this fn
- let Some((body, instance)) =
- M::find_mir_or_eval_fn(self, instance, caller_abi, args, destination, target, unwind)? else {
- return Ok(());
- };
+ let Some((body, instance)) = M::find_mir_or_eval_fn(
+ self,
+ instance,
+ caller_abi,
+ args,
+ destination,
+ target,
+ unwind,
+ )?
+ else {
+ return Ok(());
+ };
// Compute callee information using the `instance` returned by
// `find_mir_or_eval_fn`.
@@ -409,6 +499,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ // Check that all target features required by the callee (i.e., from
+ // the attribute `#[target_feature(enable = ...)]`) are enabled at
+ // compile time.
+ self.check_fn_target_features(instance)?;
+
if !callee_fn_abi.can_unwind {
// The callee cannot unwind, so force the `Unreachable` unwind handling.
unwind = mir::UnwindAction::Unreachable;
@@ -428,7 +523,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
"caller ABI: {:?}, args: {:#?}",
caller_abi,
args.iter()
- .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
+ .map(|arg| (
+ arg.layout().ty,
+ match arg {
+ FnArg::Copy(op) => format!("copy({:?})", *op),
+ FnArg::InPlace(place) => format!("in-place({:?})", *place),
+ }
+ ))
.collect::<Vec<_>>()
);
trace!(
@@ -449,7 +550,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// last incoming argument. These two iterators do not have the same type,
// so to keep the code paths uniform we accept an allocation
// (for RustCall ABI only).
- let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
+ let caller_args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (untuple_arg, args) = args.split_last().unwrap();
@@ -458,11 +559,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
args.iter()
.map(|a| Ok(a.clone()))
.chain(
- (0..untuple_arg.layout.fields.count())
- .map(|i| self.operand_field(untuple_arg, i)),
+ (0..untuple_arg.layout().fields.count())
+ .map(|i| self.fn_arg_field(untuple_arg, i)),
)
- .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::Provenance>>>>(
- )?,
+ .collect::<InterpResult<'_, Vec<_>>>()?,
)
} else {
// Plain arg passing
@@ -491,7 +591,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if Some(local) == body.spread_arg {
// Must be a tuple
for i in 0..dest.layout.fields.count() {
- let dest = self.place_field(&dest, i)?;
+ let dest = self.project_field(&dest, i)?;
let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(&mut caller_args, callee_abi, &dest)?;
}
@@ -523,6 +623,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
caller_ty = caller_ty,
)
}
+ // Ensure the return place is aligned and dereferenceable, and protect it for
+ // in-place return value passing.
+ if let Either::Left(mplace) = destination.as_mplace_or_local() {
+ self.check_mplace(&mplace)?;
+ } else {
+ // Nothing to do for locals, they are always properly allocated and aligned.
+ }
+ M::protect_in_place_function_argument(self, destination)?;
};
match res {
Err(err) => {
@@ -538,11 +646,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We have to implement all "object safe receivers". So we have to go search for a
// pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
// unwrap those newtypes until we are there.
- let mut receiver = args[0].clone();
+ // An `InPlace` does nothing here, we keep the original receiver intact. We can't
+ // really pass the argument in-place anyway, and we are constructing a new
+ // `Immediate` receiver.
+ let mut receiver = self.copy_fn_arg(&args[0])?;
let receiver_place = loop {
match receiver.layout.ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => {
- // We do *not* use `deref_operand` here: we don't want to conceptually
+ // We do *not* use `deref_pointer` here: we don't want to conceptually
// create a place that must be dereferenceable, since the receiver might
// be a raw pointer and (for `*const dyn Trait`) we don't need to
// actually access memory to resolve this method.
@@ -562,7 +673,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Not there yet, search for the only non-ZST field.
let mut non_zst_field = None;
for i in 0..receiver.layout.fields.count() {
- let field = self.operand_field(&receiver, i)?;
+ let field = self.project_field(&receiver, i)?;
let zst =
field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
if !zst {
@@ -588,12 +699,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
receiver_place.layout.ty.kind()
{
- let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
+ let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() {
throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
}
- let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
(vptr, dyn_ty, recv.ptr)
} else {
@@ -603,8 +713,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.tcx
.struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
let ty::Dynamic(data, _, ty::Dyn) = receiver_tail.kind() else {
- span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
- };
+ span_bug!(
+ self.cur_span(),
+ "dynamic call on non-`dyn` type {}",
+ receiver_tail
+ )
+ };
assert!(receiver_place.layout.is_unsized());
// Get the required information from the vtable.
@@ -622,7 +736,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Now determine the actual method to call. We can do that in two different ways and
// compare them to ensure everything fits.
- let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
+ let Some(ty::VtblEntry::Method(fn_inst)) =
+ self.get_vtable_entries(vptr)?.get(idx).copied()
+ else {
// FIXME(fee1-dead) these could be variants of the UB info enum instead of this
throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
};
@@ -632,7 +748,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let trait_def_id = tcx.trait_of_item(def_id).unwrap();
let virtual_trait_ref =
- ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
+ ty::TraitRef::from_method(tcx, trait_def_id, instance.args);
let existential_trait_ref =
ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
@@ -641,18 +757,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
tcx,
self.param_env,
def_id,
- instance.substs.rebase_onto(tcx, trait_def_id, concrete_trait_ref.substs),
+ instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
)
.unwrap();
assert_eq!(fn_inst, concrete_method);
}
// Adjust receiver argument. Layout can be any (thin) ptr.
- args[0] = ImmTy::from_immediate(
- Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
- self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
- )
- .into();
+ args[0] = FnArg::Copy(
+ ImmTy::from_immediate(
+ Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
+ self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
+ )
+ .into(),
+ );
trace!("Patched receiver operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(
@@ -668,6 +786,31 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
+ let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ if attrs
+ .target_features
+ .iter()
+ .any(|feature| !self.tcx.sess.target_features.contains(feature))
+ {
+ throw_ub_custom!(
+ fluent::const_eval_unavailable_target_features_for_fn,
+ unavailable_feats = attrs
+ .target_features
+ .iter()
+ .filter(|&feature| !self.tcx.sess.target_features.contains(feature))
+ .fold(String::new(), |mut s, feature| {
+ if !s.is_empty() {
+ s.push_str(", ");
+ }
+ s.push_str(feature.as_str());
+ s
+ }),
+ );
+ }
+ Ok(())
+ }
+
fn drop_in_place(
&mut self,
place: &PlaceTy<'tcx, M::Provenance>,
@@ -688,7 +831,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
ty::Dynamic(_, _, ty::DynStar) => {
// Dropping a `dyn*`. Need to find actual drop fn.
- self.unpack_dyn_star(&place.into())?.0.assert_mem_place()
+ self.unpack_dyn_star(&place)?.0
}
_ => {
debug_assert_eq!(
@@ -701,16 +844,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
- let arg = ImmTy::from_immediate(
- place.to_ref(self),
- self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, place.layout.ty))?,
- );
+ let arg = self.mplace_to_ref(&place)?;
let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
self.eval_fn_call(
FnVal::Instance(instance),
(Abi::Rust, fn_abi),
- &[arg.into()],
+ &[FnArg::Copy(arg.into())],
false,
&ret.into(),
Some(target),
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index 22bdd4d2c..b33194423 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -33,12 +33,12 @@ where
match *ty.kind() {
ty::Param(_) => ControlFlow::Break(FoundParam),
- ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, ..)
- | ty::FnDef(def_id, substs) => {
+ ty::Closure(def_id, args)
+ | ty::Generator(def_id, args, ..)
+ | ty::FnDef(def_id, args) => {
let instance = ty::InstanceDef::Item(def_id);
let unused_params = self.tcx.unused_generic_params(instance);
- for (index, subst) in substs.into_iter().enumerate() {
+ for (index, subst) in args.into_iter().enumerate() {
let index = index
.try_into()
.expect("more generic parameters than can fit into a `u32`");
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 21c655988..d3f05af1c 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -25,13 +25,17 @@ use rustc_target::abi::{
use std::hash::Hash;
-// for the validation errors
-use super::UndefinedBehaviorInfo::*;
use super::{
AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
- Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor,
+ Machine, MemPlaceMeta, OpTy, Pointer, Projectable, Scalar, ValueVisitor,
};
+// for the validation errors
+use super::InterpError::UndefinedBehavior as Ub;
+use super::InterpError::Unsupported as Unsup;
+use super::UndefinedBehaviorInfo::*;
+use super::UnsupportedOpInfo::*;
+
macro_rules! throw_validation_failure {
($where:expr, $kind: expr) => {{
let where_ = &$where;
@@ -43,7 +47,7 @@ macro_rules! throw_validation_failure {
None
};
- throw_ub!(Validation(ValidationErrorInfo { path, kind: $kind }))
+ throw_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
}};
}
@@ -85,16 +89,16 @@ macro_rules! try_validation {
Ok(x) => x,
// We catch the error and turn it into a validation failure. We are okay with
// allocation here as this can only slow down builds that fail anyway.
- Err(e) => match e.into_parts() {
+ Err(e) => match e.kind() {
$(
- (InterpError::UndefinedBehavior($($p)|+), _) =>
+ $($p)|+ =>
throw_validation_failure!(
$where,
$kind
)
),+,
#[allow(unreachable_patterns)]
- (e, rest) => Err::<!, _>($crate::interpret::InterpErrorInfo::from_parts(e, rest))?,
+ _ => Err::<!, _>(e)?,
}
}
}};
@@ -136,19 +140,19 @@ pub struct RefTracking<T, PATH = ()> {
pub todo: Vec<(T, PATH)>,
}
-impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+impl<T: Clone + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
pub fn empty() -> Self {
RefTracking { seen: FxHashSet::default(), todo: vec![] }
}
pub fn new(op: T) -> Self {
let mut ref_tracking_for_consts =
- RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+ RefTracking { seen: FxHashSet::default(), todo: vec![(op.clone(), PATH::default())] };
ref_tracking_for_consts.seen.insert(op);
ref_tracking_for_consts
}
pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
- if self.seen.insert(op) {
+ if self.seen.insert(op.clone()) {
trace!("Recursing below ptr {:#?}", op);
let path = path();
// Remember to come back to this later.
@@ -164,14 +168,14 @@ fn write_path(out: &mut String, path: &[PathElem]) {
for elem in path.iter() {
match elem {
- Field(name) => write!(out, ".{}", name),
+ Field(name) => write!(out, ".{name}"),
EnumTag => write!(out, ".<enum-tag>"),
- Variant(name) => write!(out, ".<enum-variant({})>", name),
+ Variant(name) => write!(out, ".<enum-variant({name})>"),
GeneratorTag => write!(out, ".<generator-tag>"),
GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
- CapturedVar(name) => write!(out, ".<captured-var({})>", name),
- TupleElem(idx) => write!(out, ".{}", idx),
- ArrayElem(idx) => write!(out, "[{}]", idx),
+ CapturedVar(name) => write!(out, ".<captured-var({name})>"),
+ TupleElem(idx) => write!(out, ".{idx}"),
+ ArrayElem(idx) => write!(out, "[{idx}]"),
// `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
// some of the other items here also are not Rust syntax. Actually we can't
// even use the usual syntax because we are just showing the projections,
@@ -294,7 +298,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Ok(try_validation!(
self.ecx.read_immediate(op),
self.path,
- InvalidUninitBytes(None) => Uninit { expected }
+ Ub(InvalidUninitBytes(None)) =>
+ Uninit { expected },
+ // The `Unsup` cases can only occur during CTFE
+ Unsup(ReadPointerAsInt(_)) =>
+ PointerAsInt { expected },
+ Unsup(ReadPartialPointer(_)) =>
+ PartialPointer,
))
}
@@ -319,8 +329,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let (_ty, _trait) = try_validation!(
self.ecx.get_ptr_vtable(vtable),
self.path,
- DanglingIntPointer(..) |
- InvalidVTablePointer(..) => InvalidVTablePtr { value: format!("{vtable}") }
+ Ub(DanglingIntPointer(..) | InvalidVTablePointer(..)) =>
+ InvalidVTablePtr { value: format!("{vtable}") }
);
// FIXME: check if the type/trait match what ty::Dynamic says?
}
@@ -345,6 +355,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
value: &OpTy<'tcx, M::Provenance>,
ptr_kind: PointerKind,
) -> InterpResult<'tcx> {
+ // Not using `deref_pointer` since we do the dereferenceable check ourselves below.
let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
// Handle wide pointers.
// Check metadata early, for better diagnostics
@@ -355,7 +366,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let size_and_align = try_validation!(
self.ecx.size_and_align_of_mplace(&place),
self.path,
- InvalidMeta(msg) => match msg {
+ Ub(InvalidMeta(msg)) => match msg {
InvalidMetaKind::SliceTooBig => InvalidMetaSliceTooLarge { ptr_kind },
InvalidMetaKind::TooBig => InvalidMetaTooLarge { ptr_kind },
}
@@ -374,23 +385,23 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
- AlignmentCheckFailed { required, has } => UnalignedPtr {
+ Ub(AlignmentCheckFailed { required, has }) => UnalignedPtr {
ptr_kind,
required_bytes: required.bytes(),
found_bytes: has.bytes()
},
- DanglingIntPointer(0, _) => NullPtr { ptr_kind },
- DanglingIntPointer(i, _) => DanglingPtrNoProvenance {
+ Ub(DanglingIntPointer(0, _)) => NullPtr { ptr_kind },
+ Ub(DanglingIntPointer(i, _)) => DanglingPtrNoProvenance {
ptr_kind,
// FIXME this says "null pointer" when null but we need translate
- pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i))
+ pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(*i))
},
- PointerOutOfBounds { .. } => DanglingPtrOutOfBounds {
+ Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds {
ptr_kind
},
// This cannot happen during const-eval (because interning already detects
// dangling pointers), but it can happen in Miri.
- PointerUseAfterFree(..) => DanglingPtrUseAfterFree {
+ Ub(PointerUseAfterFree(..)) => DanglingPtrUseAfterFree {
ptr_kind,
},
);
@@ -462,6 +473,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
/// Check if this is a value of primitive type, and if yes check the validity of the value
/// at that type. Return `true` if the type is indeed primitive.
+ ///
+ /// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
fn try_visit_primitive(
&mut self,
value: &OpTy<'tcx, M::Provenance>,
@@ -474,7 +487,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
try_validation!(
value.to_bool(),
self.path,
- InvalidBool(..) => ValidationErrorKind::InvalidBool {
+ Ub(InvalidBool(..)) => ValidationErrorKind::InvalidBool {
value: format!("{value:x}"),
}
);
@@ -485,7 +498,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
try_validation!(
value.to_char(),
self.path,
- InvalidChar(..) => ValidationErrorKind::InvalidChar {
+ Ub(InvalidChar(..)) => ValidationErrorKind::InvalidChar {
value: format!("{value:x}"),
}
);
@@ -494,7 +507,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
// NOTE: Keep this in sync with the array optimization for int/float
// types below!
- let value = self.read_scalar(
+ self.read_scalar(
value,
if matches!(ty.kind(), ty::Float(..)) {
ExpectedKind::Float
@@ -502,20 +515,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
ExpectedKind::Int
},
)?;
- // As a special exception we *do* match on a `Scalar` here, since we truly want
- // to know its underlying representation (and *not* cast it to an integer).
- if matches!(value, Scalar::Ptr(..)) {
- throw_validation_failure!(
- self.path,
- ExpectedNonPtr { value: format!("{value:x}") }
- )
- }
Ok(true)
}
ty::RawPtr(..) => {
- // We are conservative with uninit for integers, but try to
- // actually enforce the strict rules for raw pointers (mostly because
- // that lets us re-use `ref_to_mplace`).
let place =
self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
if place.layout.is_unsized() {
@@ -546,10 +548,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let _fn = try_validation!(
self.ecx.get_ptr_fn(ptr),
self.path,
- DanglingIntPointer(..) |
- InvalidFunctionPointer(..) => InvalidFnPtr {
- value: format!("{ptr}"),
- },
+ Ub(DanglingIntPointer(..) | InvalidFunctionPointer(..)) =>
+ InvalidFnPtr { value: format!("{ptr}") },
);
// FIXME: Check if the signature matches
} else {
@@ -657,13 +657,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Ok(try_validation!(
this.ecx.read_discriminant(op),
this.path,
- InvalidTag(val) => InvalidEnumTag {
+ Ub(InvalidTag(val)) => InvalidEnumTag {
value: format!("{val:x}"),
},
-
- InvalidUninitBytes(None) => UninitEnumTag,
- )
- .1)
+ Ub(UninhabitedEnumVariantRead(_)) => UninhabitedEnumVariant,
+ // Uninit / bad provenance are not possible since the field was already previously
+ // checked at its integer type.
+ ))
})
}
@@ -733,60 +733,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
}
}
- // Recursively walk the value at its type.
- self.walk_value(op)?;
-
- // *After* all of this, check the ABI. We need to check the ABI to handle
- // types like `NonNull` where the `Scalar` info is more restrictive than what
- // the fields say (`rustc_layout_scalar_valid_range_start`).
- // But in most cases, this will just propagate what the fields say,
- // and then we want the error to point at the field -- so, first recurse,
- // then check ABI.
- //
- // FIXME: We could avoid some redundant checks here. For newtypes wrapping
- // scalars, we do the same check on every "level" (e.g., first we check
- // MyNewtype and then the scalar in there).
- match op.layout.abi {
- Abi::Uninhabited => {
- let ty = op.layout.ty;
- throw_validation_failure!(self.path, UninhabitedVal { ty });
- }
- Abi::Scalar(scalar_layout) => {
- if !scalar_layout.is_uninit_valid() {
- // There is something to check here.
- let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
- self.visit_scalar(scalar, scalar_layout)?;
- }
- }
- Abi::ScalarPair(a_layout, b_layout) => {
- // We can only proceed if *both* scalars need to be initialized.
- // FIXME: find a way to also check ScalarPair when one side can be uninit but
- // the other must be init.
- if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
- let (a, b) =
- self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
- self.visit_scalar(a, a_layout)?;
- self.visit_scalar(b, b_layout)?;
- }
- }
- Abi::Vector { .. } => {
- // No checks here, we assume layout computation gets this right.
- // (This is harder to check since Miri does not represent these as `Immediate`. We
- // also cannot use field projections since this might be a newtype around a vector.)
- }
- Abi::Aggregate { .. } => {
- // Nothing to do.
- }
- }
-
- Ok(())
- }
-
- fn visit_aggregate(
- &mut self,
- op: &OpTy<'tcx, M::Provenance>,
- fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
+ // Recursively walk the value at its type. Apply optimizations for some large types.
match op.layout.ty.kind() {
ty::Str => {
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
@@ -794,7 +741,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
try_validation!(
self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)),
self.path,
- InvalidUninitBytes(..) => { UninitStr },
+ Ub(InvalidUninitBytes(..)) => Uninit { expected: ExpectedKind::Str },
+ Unsup(ReadPointerAsInt(_)) => PointerAsInt { expected: ExpectedKind::Str }
);
}
ty::Array(tys, ..) | ty::Slice(tys)
@@ -806,6 +754,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
=>
{
+ let expected = if tys.is_integral() { ExpectedKind::Int } else { ExpectedKind::Float };
// Optimized handling for arrays of integer/float type.
// This is the length of the array/slice.
@@ -824,7 +773,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Left(mplace) => mplace,
Right(imm) => match *imm {
Immediate::Uninit =>
- throw_validation_failure!(self.path, UninitVal),
+ throw_validation_failure!(self.path, Uninit { expected }),
Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
bug!("arrays/slices can never have Scalar/ScalarPair layout"),
}
@@ -850,17 +799,21 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// For some errors we might be able to provide extra information.
// (This custom logic does not fit the `try_validation!` macro.)
match err.kind() {
- err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
+ Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
// Some byte was uninitialized, determine which
// element that byte belongs to so we can
// provide an index.
let i = usize::try_from(
- access.uninit.start.bytes() / layout.size.bytes(),
+ access.bad.start.bytes() / layout.size.bytes(),
)
.unwrap();
self.path.push(PathElem::ArrayElem(i));
- throw_validation_failure!(self.path, UninitVal)
+ if matches!(err.kind(), Ub(InvalidUninitBytes(_))) {
+ throw_validation_failure!(self.path, Uninit { expected })
+ } else {
+ throw_validation_failure!(self.path, PointerAsInt { expected })
+ }
}
// Propagate upwards (that will also check for unexpected errors).
@@ -874,12 +827,58 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// ZST type, so either validation fails for all elements or none.
ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
// Validate just the first element (if any).
- self.walk_aggregate(op, fields.take(1))?
+ if op.len(self.ecx)? > 0 {
+ self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
+ }
}
_ => {
- self.walk_aggregate(op, fields)? // default handler
+ self.walk_value(op)?; // default handler
+ }
+ }
+
+ // *After* all of this, check the ABI. We need to check the ABI to handle
+ // types like `NonNull` where the `Scalar` info is more restrictive than what
+ // the fields say (`rustc_layout_scalar_valid_range_start`).
+ // But in most cases, this will just propagate what the fields say,
+ // and then we want the error to point at the field -- so, first recurse,
+ // then check ABI.
+ //
+ // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+ // scalars, we do the same check on every "level" (e.g., first we check
+ // MyNewtype and then the scalar in there).
+ match op.layout.abi {
+ Abi::Uninhabited => {
+ let ty = op.layout.ty;
+ throw_validation_failure!(self.path, UninhabitedVal { ty });
+ }
+ Abi::Scalar(scalar_layout) => {
+ if !scalar_layout.is_uninit_valid() {
+ // There is something to check here.
+ let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
+ self.visit_scalar(scalar, scalar_layout)?;
+ }
+ }
+ Abi::ScalarPair(a_layout, b_layout) => {
+ // We can only proceed if *both* scalars need to be initialized.
+ // FIXME: find a way to also check ScalarPair when one side can be uninit but
+ // the other must be init.
+ if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
+ let (a, b) =
+ self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
+ self.visit_scalar(a, a_layout)?;
+ self.visit_scalar(b, b_layout)?;
+ }
+ }
+ Abi::Vector { .. } => {
+ // No checks here, we assume layout computation gets this right.
+ // (This is harder to check since Miri does not represent these as `Immediate`. We
+ // also cannot use field projections since this might be a newtype around a vector.)
+ }
+ Abi::Aggregate { .. } => {
+ // Nothing to do.
}
}
+
Ok(())
}
}
@@ -900,17 +899,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Run it.
match visitor.visit_value(&op) {
Ok(()) => Ok(()),
- // Pass through validation failures.
- Err(err) if matches!(err.kind(), err_ub!(Validation { .. })) => Err(err),
- // Complain about any other kind of UB error -- those are bad because we'd like to
+ // Pass through validation failures and "invalid program" issues.
+ Err(err)
+ if matches!(
+ err.kind(),
+ err_ub!(ValidationError { .. }) | InterpError::InvalidProgram(_)
+ ) =>
+ {
+ Err(err)
+ }
+ // Complain about any other kind of error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies.
- Err(err) if matches!(err.kind(), InterpError::UndefinedBehavior(_)) => {
+ Err(err) => {
let (err, backtrace) = err.into_parts();
backtrace.print_backtrace();
bug!("Unexpected Undefined Behavior error during validation: {err:?}");
}
- // Pass through everything else.
- Err(err) => Err(err),
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 7a1445939..531e2bd3e 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -1,544 +1,202 @@
//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
//! types until we arrive at the leaves, with custom handling for primitive types.
+use rustc_index::IndexVec;
use rustc_middle::mir::interpret::InterpResult;
use rustc_middle::ty;
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::FieldIdx;
use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
use std::num::NonZeroUsize;
-use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy};
+use super::{InterpCx, MPlaceTy, Machine, Projectable};
-/// A thing that we can project into, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
- /// Gets this value's layout.
- fn layout(&self) -> TyAndLayout<'tcx>;
+/// How to traverse a value and what to do when we are at the leaves.
+pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+ type V: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>>;
- /// Makes this into an `OpTy`, in a cheap way that is good for reading.
- fn to_op_for_read(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
- /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
- fn to_op_for_proj(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- self.to_op_for_read(ecx)
- }
-
- /// Creates this from an `OpTy`.
- ///
- /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
- /// Projects to the given enum variant.
- fn project_downcast(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self>;
-
- /// Projects to the n-th field.
- fn project_field(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self>;
-}
-
-/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait ValueMut<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
- /// Gets this value's layout.
- fn layout(&self) -> TyAndLayout<'tcx>;
-
- /// Makes this into an `OpTy`, in a cheap way that is good for reading.
- fn to_op_for_read(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
- /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
- fn to_op_for_proj(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
- /// Creates this from an `OpTy`.
- ///
- /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
- /// Projects to the given enum variant.
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self>;
-
- /// Projects to the n-th field.
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self>;
-}
-
-// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
-// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
-// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
-// double-impl, that would barely make the code shorter, if at all.)
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.clone())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- op.clone()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
- for OpTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.clone())
- }
-
- #[inline(always)]
- fn to_op_for_proj(
- &self,
- _ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.clone())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- op.clone()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.operand_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
- for MPlaceTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.into())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- // assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
- op.assert_mem_place()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
- for MPlaceTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.into())
- }
-
- #[inline(always)]
- fn to_op_for_proj(
- &self,
- _ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- Ok(self.into())
- }
-
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- // assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
- op.assert_mem_place()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.mplace_field(self, field)
- }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
- for PlaceTy<'tcx, M::Provenance>
-{
- #[inline(always)]
- fn layout(&self) -> TyAndLayout<'tcx> {
- self.layout
- }
-
- #[inline(always)]
- fn to_op_for_read(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // No need for `force_allocation` since we are just going to read from this.
- ecx.place_to_op(self)
- }
+ /// The visitor must have an `InterpCx` in it.
+ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
+ /// `read_discriminant` can be hooked for better error messages.
#[inline(always)]
- fn to_op_for_proj(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // We `force_allocation` here so that `from_op` below can work.
- Ok(ecx.force_allocation(self)?.into())
+ fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
+ Ok(self.ecx().read_discriminant(&v.to_op(self.ecx())?)?)
}
- #[inline(always)]
- fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
- // assert is justified because our `to_op` only ever produces `Indirect` operands.
- op.assert_mem_place().into()
- }
-
- #[inline(always)]
- fn project_downcast(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- variant: VariantIdx,
- ) -> InterpResult<'tcx, Self> {
- ecx.place_downcast(self, variant)
- }
-
- #[inline(always)]
- fn project_field(
- &self,
- ecx: &mut InterpCx<'mir, 'tcx, M>,
- field: usize,
- ) -> InterpResult<'tcx, Self> {
- ecx.place_field(self, field)
- }
-}
-
-macro_rules! make_value_visitor {
- ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
- /// How to traverse a value and what to do when we are at the leaves.
- pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
- type V: $value_trait<'mir, 'tcx, M>;
-
- /// The visitor must have an `InterpCx` in it.
- fn ecx(&$($mutability)? self)
- -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
-
- /// `read_discriminant` can be hooked for better error messages.
- #[inline(always)]
- fn read_discriminant(
- &mut self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, VariantIdx> {
- Ok(self.ecx().read_discriminant(op)?.1)
- }
-
- // Recursive actions, ready to be overloaded.
- /// Visits the given value, dispatching as appropriate to more specialized visitors.
- #[inline(always)]
- fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
- {
- self.walk_value(v)
- }
- /// Visits the given value as a union. No automatic recursion can happen here.
- #[inline(always)]
- fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
- {
- Ok(())
- }
- /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
- /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
- /// pointee type is the actual `T`.
- #[inline(always)]
- fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx>
- {
- Ok(())
+ /// This function provides the chance to reorder the order in which fields are visited for
+ /// `FieldsShape::Aggregate`: The order of fields will be
+ /// `(0..num_fields).map(aggregate_field_order)`.
+ ///
+ /// The default means we iterate in source declaration order; alternative this can do an inverse
+ /// lookup in `memory_index` to use memory field order instead.
+ #[inline(always)]
+ fn aggregate_field_order(_memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
+ idx
+ }
+
+ // Recursive actions, ready to be overloaded.
+ /// Visits the given value, dispatching as appropriate to more specialized visitors.
+ #[inline(always)]
+ fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+ self.walk_value(v)
+ }
+ /// Visits the given value as a union. No automatic recursion can happen here.
+ #[inline(always)]
+ fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> {
+ Ok(())
+ }
+ /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+ /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
+ /// pointee type is the actual `T`.
+ #[inline(always)]
+ fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called each time we recurse down to a field of a "product-like" aggregate
+ /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+ /// and new (inner) value.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_field(
+ &mut self,
+ _old_val: &Self::V,
+ _field: usize,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+ /// Called when recursing into an enum variant.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_variant(
+ &mut self,
+ _old_val: &Self::V,
+ _variant: VariantIdx,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+
+ fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+ let ty = v.layout().ty;
+ trace!("walk_value: type: {ty}");
+
+ // Special treatment for special types, where the (static) layout is not sufficient.
+ match *ty.kind() {
+ // If it is a trait object, switch to the real type that was used to create it.
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+ // vtable stored in the place metadata.
+ // unsized values are never immediate, so we can assert_mem_place
+ let op = v.to_op(self.ecx())?;
+ let dest = op.assert_mem_place();
+ let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
+ trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+ // recurse with the inner type
+ return self.visit_field(&v, 0, &inner_mplace.into());
}
- /// Visits this value as an aggregate, you are getting an iterator yielding
- /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
- /// Recurses into the fields.
- #[inline(always)]
- fn visit_aggregate(
- &mut self,
- v: &Self::V,
- fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
- self.walk_aggregate(v, fields)
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // DynStar types. Very different from a dyn type (but strangely part of the
+ // same variant in `TyKind`): These are pairs where the 2nd component is the
+ // vtable, and the first component is the data (which must be ptr-sized).
+ let data = self.ecx().unpack_dyn_star(v)?.0;
+ return self.visit_field(&v, 0, &data);
}
-
- /// Called each time we recurse down to a field of a "product-like" aggregate
- /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
- /// and new (inner) value.
- /// This gives the visitor the chance to track the stack of nested fields that
- /// we are descending through.
- #[inline(always)]
- fn visit_field(
- &mut self,
- _old_val: &Self::V,
- _field: usize,
- new_val: &Self::V,
- ) -> InterpResult<'tcx> {
- self.visit_value(new_val)
+ // Slices do not need special handling here: they have `Array` field
+ // placement with length 0, so we enter the `Array` case below which
+ // indirectly uses the metadata to determine the actual length.
+
+ // However, `Box`... let's talk about `Box`.
+ ty::Adt(def, ..) if def.is_box() => {
+ // `Box` is a hybrid primitive-library-defined type that one the one hand is
+ // a dereferenceable pointer, on the other hand has *basically arbitrary
+ // user-defined layout* since the user controls the 'allocator' field. So it
+ // cannot be treated like a normal pointer, since it does not fit into an
+ // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+ // something with "all boxed pointers", so we handle this mess for them.
+ //
+ // When we hit a `Box`, we do not do the usual field recursion; instead,
+ // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+ // allocator field. We also assert tons of things to ensure we do not miss
+ // any other fields.
+
+ // `Box` has two fields: the pointer we care about, and the allocator.
+ assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+ let (unique_ptr, alloc) =
+ (self.ecx().project_field(v, 0)?, self.ecx().project_field(v, 1)?);
+ // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+ // (which means another 2 fields, the second of which is a `PhantomData`)
+ assert_eq!(unique_ptr.layout().fields.count(), 2);
+ let (nonnull_ptr, phantom) = (
+ self.ecx().project_field(&unique_ptr, 0)?,
+ self.ecx().project_field(&unique_ptr, 1)?,
+ );
+ assert!(
+ phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+ "2nd field of `Unique` should be PhantomData but is {:?}",
+ phantom.layout().ty,
+ );
+ // ... that contains a `NonNull`... (gladly, only a single field here)
+ assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+ let raw_ptr = self.ecx().project_field(&nonnull_ptr, 0)?; // the actual raw ptr
+ // ... whose only field finally is a raw ptr we can dereference.
+ self.visit_box(&raw_ptr)?;
+
+ // The second `Box` field is the allocator, which we recursively check for validity
+ // like in regular structs.
+ self.visit_field(v, 1, &alloc)?;
+
+ // We visited all parts of this one.
+ return Ok(());
}
- /// Called when recursing into an enum variant.
- /// This gives the visitor the chance to track the stack of nested fields that
- /// we are descending through.
- #[inline(always)]
- fn visit_variant(
- &mut self,
- _old_val: &Self::V,
- _variant: VariantIdx,
- new_val: &Self::V,
- ) -> InterpResult<'tcx> {
- self.visit_value(new_val)
+ _ => {}
+ };
+
+ // Visit the fields of this value.
+ match &v.layout().fields {
+ FieldsShape::Primitive => {}
+ &FieldsShape::Union(fields) => {
+ self.visit_union(v, fields)?;
}
-
- // Default recursors. Not meant to be overloaded.
- fn walk_aggregate(
- &mut self,
- v: &Self::V,
- fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
- ) -> InterpResult<'tcx> {
- // Now iterate over it.
- for (idx, field_val) in fields.enumerate() {
- self.visit_field(v, idx, &field_val?)?;
+ FieldsShape::Arbitrary { offsets, memory_index } => {
+ for idx in 0..offsets.len() {
+ let idx = Self::aggregate_field_order(memory_index, idx);
+ let field = self.ecx().project_field(v, idx)?;
+ self.visit_field(v, idx, &field)?;
}
- Ok(())
}
- fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
- {
- let ty = v.layout().ty;
- trace!("walk_value: type: {ty}");
-
- // Special treatment for special types, where the (static) layout is not sufficient.
- match *ty.kind() {
- // If it is a trait object, switch to the real type that was used to create it.
- ty::Dynamic(_, _, ty::Dyn) => {
- // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
- // vtable stored in the place metadata.
- // unsized values are never immediate, so we can assert_mem_place
- let op = v.to_op_for_read(self.ecx())?;
- let dest = op.assert_mem_place();
- let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
- trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
- // recurse with the inner type
- return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
- },
- ty::Dynamic(_, _, ty::DynStar) => {
- // DynStar types. Very different from a dyn type (but strangely part of the
- // same variant in `TyKind`): These are pairs where the 2nd component is the
- // vtable, and the first component is the data (which must be ptr-sized).
- let op = v.to_op_for_proj(self.ecx())?;
- let data = self.ecx().unpack_dyn_star(&op)?.0;
- return self.visit_field(&v, 0, &$value_trait::from_op(&data));
- }
- // Slices do not need special handling here: they have `Array` field
- // placement with length 0, so we enter the `Array` case below which
- // indirectly uses the metadata to determine the actual length.
-
- // However, `Box`... let's talk about `Box`.
- ty::Adt(def, ..) if def.is_box() => {
- // `Box` is a hybrid primitive-library-defined type that one the one hand is
- // a dereferenceable pointer, on the other hand has *basically arbitrary
- // user-defined layout* since the user controls the 'allocator' field. So it
- // cannot be treated like a normal pointer, since it does not fit into an
- // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
- // something with "all boxed pointers", so we handle this mess for them.
- //
- // When we hit a `Box`, we do not do the usual `visit_aggregate`; instead,
- // we (a) call `visit_box` on the pointer value, and (b) recurse on the
- // allocator field. We also assert tons of things to ensure we do not miss
- // any other fields.
-
- // `Box` has two fields: the pointer we care about, and the allocator.
- assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
- let (unique_ptr, alloc) =
- (v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?);
- // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
- // (which means another 2 fields, the second of which is a `PhantomData`)
- assert_eq!(unique_ptr.layout().fields.count(), 2);
- let (nonnull_ptr, phantom) = (
- unique_ptr.project_field(self.ecx(), 0)?,
- unique_ptr.project_field(self.ecx(), 1)?,
- );
- assert!(
- phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
- "2nd field of `Unique` should be PhantomData but is {:?}",
- phantom.layout().ty,
- );
- // ... that contains a `NonNull`... (gladly, only a single field here)
- assert_eq!(nonnull_ptr.layout().fields.count(), 1);
- let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr
- // ... whose only field finally is a raw ptr we can dereference.
- self.visit_box(&raw_ptr)?;
-
- // The second `Box` field is the allocator, which we recursively check for validity
- // like in regular structs.
- self.visit_field(v, 1, &alloc)?;
-
- // We visited all parts of this one.
- return Ok(());
- }
- _ => {},
- };
-
- // Visit the fields of this value.
- match &v.layout().fields {
- FieldsShape::Primitive => {}
- &FieldsShape::Union(fields) => {
- self.visit_union(v, fields)?;
- }
- FieldsShape::Arbitrary { offsets, .. } => {
- // FIXME: We collect in a vec because otherwise there are lifetime
- // errors: Projecting to a field needs access to `ecx`.
- let fields: Vec<InterpResult<'tcx, Self::V>> =
- (0..offsets.len()).map(|i| {
- v.project_field(self.ecx(), i)
- })
- .collect();
- self.visit_aggregate(v, fields.into_iter())?;
- }
- FieldsShape::Array { .. } => {
- // Let's get an mplace (or immediate) first.
- // This might `force_allocate` if `v` is a `PlaceTy`, but `place_index` does that anyway.
- let op = v.to_op_for_proj(self.ecx())?;
- // Now we can go over all the fields.
- // This uses the *run-time length*, i.e., if we are a slice,
- // the dynamic info from the metadata is used.
- let iter = self.ecx().operand_array_fields(&op)?
- .map(|f| f.and_then(|f| {
- Ok($value_trait::from_op(&f))
- }));
- self.visit_aggregate(v, iter)?;
- }
+ FieldsShape::Array { .. } => {
+ for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
+ self.visit_field(v, idx, &field?)?;
}
+ }
+ }
- match v.layout().variants {
- // If this is a multi-variant layout, find the right variant and proceed
- // with *its* fields.
- Variants::Multiple { .. } => {
- let op = v.to_op_for_read(self.ecx())?;
- let idx = self.read_discriminant(&op)?;
- let inner = v.project_downcast(self.ecx(), idx)?;
- trace!("walk_value: variant layout: {:#?}", inner.layout());
- // recurse with the inner type
- self.visit_variant(v, idx, &inner)
- }
- // For single-variant layouts, we already did anything there is to do.
- Variants::Single { .. } => Ok(())
- }
+ match v.layout().variants {
+ // If this is a multi-variant layout, find the right variant and proceed
+ // with *its* fields.
+ Variants::Multiple { .. } => {
+ let idx = self.read_discriminant(v)?;
+ // There are 3 cases where downcasts can turn a Scalar/ScalarPair into a different ABI which
+ // could be a problem for `ImmTy` (see layout_sanity_check):
+ // - variant.size == Size::ZERO: works fine because `ImmTy::offset` has a special case for
+ // zero-sized layouts.
+ // - variant.fields.count() == 0: works fine because `ImmTy::offset` has a special case for
+ // zero-field aggregates.
+ // - variant.abi.is_uninhabited(): triggers UB in `read_discriminant` so we never get here.
+ let inner = self.ecx().project_downcast(v, idx)?;
+ trace!("walk_value: variant layout: {:#?}", inner.layout());
+ // recurse with the inner type
+ self.visit_variant(v, idx, &inner)?;
}
+ // For single-variant layouts, we already did anything there is to do.
+ Variants::Single { .. } => {}
}
+
+ Ok(())
}
}
-
-make_value_visitor!(ValueVisitor, Value,);
-make_value_visitor!(MutValueVisitor, ValueMut, mut);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 14540e8df..fae047bff 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -8,8 +8,9 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_middle::ty::{TraitRef, TypeVisitableExt};
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
@@ -20,7 +21,7 @@ use std::mem;
use std::ops::Deref;
use super::ops::{self, NonConstOp, Status};
-use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop};
use super::resolver::FlowSensitiveAnalysis;
use super::{ConstCx, Qualif};
use crate::const_eval::is_unstable_const_fn;
@@ -33,7 +34,7 @@ type QualifResults<'mir, 'tcx, Q> =
pub struct Qualifs<'mir, 'tcx> {
has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
- needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+ // needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
}
impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
@@ -76,15 +77,17 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
local: Local,
location: Location,
) -> bool {
+ // FIXME(effects) replace with `NeedsNonconstDrop` after const traits work again
+ /*
let ty = ccx.body.local_decls[local].ty;
- if !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+ if !NeedsDrop::in_any_value_of_ty(ccx, ty) {
return false;
}
let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
let ConstCx { tcx, body, .. } = *ccx;
- FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+ FlowSensitiveAnalysis::new(NeedsDrop, ccx)
.into_engine(tcx, &body)
.iterate_to_fixpoint()
.into_results_cursor(&body)
@@ -92,6 +95,9 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
needs_non_const_drop.seek_before_primary_effect(location);
needs_non_const_drop.get().contains(local)
+ */
+
+ self.needs_drop(ccx, local, location)
}
/// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
@@ -701,8 +707,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let fn_ty = func.ty(body, tcx);
- let (mut callee, mut substs) = match *fn_ty.kind() {
- ty::FnDef(def_id, substs) => (def_id, substs),
+ let (mut callee, mut fn_args) = match *fn_ty.kind() {
+ ty::FnDef(def_id, fn_args) => (def_id, fn_args),
ty::FnPtr(_) => {
self.check_op(ops::FnCallIndirect);
@@ -721,7 +727,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(&infcx);
- let predicates = tcx.predicates_of(callee).instantiate(tcx, substs);
+ let predicates = tcx.predicates_of(callee).instantiate(tcx, fn_args);
let cause = ObligationCause::new(
terminator.source_info.span,
self.body.source.def_id().expect_local(),
@@ -740,13 +746,14 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
// Attempting to call a trait method?
+ // FIXME(effects) do we need this?
if let Some(trait_id) = tcx.trait_of_item(callee) {
trace!("attempting to call a trait method");
if !self.tcx.features().const_trait_impl {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: Some(sym::const_trait_impl),
@@ -754,8 +761,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
return;
}
- let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
- let trait_ref = trait_ref.with_constness(ty::BoundConstness::ConstIfConst);
+ let trait_ref = TraitRef::from_method(tcx, trait_id, fn_args);
let obligation =
Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref);
@@ -766,7 +772,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
};
match implsrc {
- Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+ Ok(Some(ImplSource::Param(_))) if tcx.features().effects => {
debug!(
"const_trait_impl: provided {:?} via where-clause in {:?}",
trait_ref, param_env
@@ -774,12 +780,11 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
return;
}
// Closure: Fn{Once|Mut}
- Ok(Some(ImplSource::Builtin(_)))
+ Ok(Some(ImplSource::Builtin(BuiltinImplSource::Misc, _)))
if trait_ref.self_ty().is_closure()
&& tcx.fn_trait_kind_from_def_id(trait_id).is_some() =>
{
- let ty::Closure(closure_def_id, substs) =
- *trait_ref.self_ty().kind()
+ let ty::Closure(closure_def_id, fn_args) = *trait_ref.self_ty().kind()
else {
unreachable!()
};
@@ -787,7 +792,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
@@ -798,28 +803,29 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
Ok(Some(ImplSource::UserDefined(data))) => {
let callee_name = tcx.item_name(callee);
- if let Some(&did) = tcx
- .associated_item_def_ids(data.impl_def_id)
- .iter()
- .find(|did| tcx.item_name(**did) == callee_name)
- {
- // using internal substs is ok here, since this is only
- // used for the `resolve` call below
- substs = InternalSubsts::identity_for_item(tcx, did);
- callee = did;
- }
if let hir::Constness::NotConst = tcx.constness(data.impl_def_id) {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
});
return;
}
+
+ if let Some(&did) = tcx
+ .associated_item_def_ids(data.impl_def_id)
+ .iter()
+ .find(|did| tcx.item_name(**did) == callee_name)
+ {
+ // using internal args is ok here, since this is only
+ // used for the `resolve` call below
+ fn_args = GenericArgs::identity_for_item(tcx, did);
+ callee = did;
+ }
}
_ if !tcx.is_const_fn_raw(callee) => {
// At this point, it is only legal when the caller is in a trait
@@ -829,7 +835,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
&& tcx.has_attr(callee_trait, sym::const_trait)
&& Some(callee_trait) == tcx.trait_of_item(caller.to_def_id())
// Can only call methods when it's `<Self as TheTrait>::f`.
- && tcx.types.self_param == substs.type_at(0)
+ && tcx.types.self_param == fn_args.type_at(0)
{
nonconst_call_permission = true;
}
@@ -856,7 +862,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
@@ -869,7 +875,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Resolve a trait method call to its concrete implementation, which may be in a
// `const` trait impl.
- let instance = Instance::resolve(tcx, param_env, callee, substs);
+ let instance = Instance::resolve(tcx, param_env, callee, fn_args);
debug!("Resolving ({:?}) -> {:?}", callee, instance);
if let Ok(Some(func)) = instance {
if let InstanceDef::Item(def) = func.def {
@@ -916,7 +922,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::FnCallNonConst {
caller,
callee,
- substs,
+ args: fn_args,
span: *fn_span,
call_source: *call_source,
feature: None,
@@ -996,8 +1002,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let mut err_span = self.span;
let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+ // FIXME(effects) replace with `NeedsNonConstDrop` once we fix const traits
let ty_needs_non_const_drop =
- qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+ qualifs::NeedsDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
index 8ebfee887..e51082e1e 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -68,11 +68,11 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
pub fn fn_sig(&self) -> PolyFnSig<'tcx> {
let did = self.def_id().to_def_id();
if self.tcx.is_closure(did) {
- let ty = self.tcx.type_of(did).subst_identity();
- let ty::Closure(_, substs) = ty.kind() else { bug!("type_of closure not ty::Closure") };
- substs.as_closure().sig()
+ let ty = self.tcx.type_of(did).instantiate_identity();
+ let ty::Closure(_, args) = ty.kind() else { bug!("type_of closure not ty::Closure") };
+ args.as_closure().sig()
} else {
- self.tcx.fn_sig(did).subst_identity()
+ self.tcx.fn_sig(did).instantiate_identity()
}
}
}
@@ -127,15 +127,8 @@ fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
let Some(parent) = tcx.hir().opt_parent_id(hir_id) else { return false };
- let parent_def = tcx.hir().get(parent);
-
- if !matches!(
- parent_def,
- hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
- ..
- })
- ) {
+
+ if !tcx.is_const_trait_impl_raw(parent.owner.def_id.to_def_id()) {
return false;
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 4eb278252..1f3cda35c 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -9,9 +9,9 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
use rustc_middle::mir::{self, CallSource};
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::TraitRef;
use rustc_middle::ty::{suggest_constraining_type_param, Adt, Closure, FnDef, FnPtr, Param, Ty};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_middle::util::{call_kind, CallDesugaringKind, CallKind};
use rustc_session::parse::feature_err;
use rustc_span::symbol::sym;
@@ -98,7 +98,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
pub struct FnCallNonConst<'tcx> {
pub caller: LocalDefId,
pub callee: DefId,
- pub substs: SubstsRef<'tcx>,
+ pub args: GenericArgsRef<'tcx>,
pub span: Span,
pub call_source: CallSource,
pub feature: Option<Symbol>,
@@ -110,11 +110,11 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
ccx: &ConstCx<'_, 'tcx>,
_: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let FnCallNonConst { caller, callee, substs, span, call_source, feature } = *self;
+ let FnCallNonConst { caller, callee, args, span, call_source, feature } = *self;
let ConstCx { tcx, param_env, .. } = *ccx;
let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
- let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+ let trait_ref = TraitRef::from_method(tcx, trait_id, args);
match self_ty.kind() {
Param(param_ty) => {
@@ -145,8 +145,11 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
let implsrc = selcx.select(&obligation);
if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
- let span = tcx.def_span(data.impl_def_id);
- err.subdiagnostic(errors::NonConstImplNote { span });
+ // FIXME(effects) revisit this
+ if !tcx.is_const_trait_impl_raw(data.impl_def_id) {
+ let span = tcx.def_span(data.impl_def_id);
+ err.subdiagnostic(errors::NonConstImplNote { span });
+ }
}
}
_ => {}
@@ -154,7 +157,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
};
let call_kind =
- call_kind(tcx, ccx.param_env, callee, substs, span, call_source.from_hir_call(), None);
+ call_kind(tcx, ccx.param_env, callee, args, span, call_source.from_hir_call(), None);
debug!(?call_kind);
@@ -226,7 +229,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
let mut sugg = None;
if Some(trait_id) == ccx.tcx.lang_items().eq_trait() {
- match (substs[0].unpack(), substs[1].unpack()) {
+ match (args[0].unpack(), args[1].unpack()) {
(GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty))
if self_ty == rhs_ty
&& self_ty.is_ref()
@@ -297,7 +300,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
.create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() }),
_ => ccx.tcx.sess.create_err(errors::NonConstFnCall {
span,
- def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs),
+ def_path_str: ccx.tcx.def_path_str_with_args(callee, args),
kind: ccx.const_kind(),
}),
};
@@ -310,8 +313,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
if let Some(feature) = feature && ccx.tcx.sess.is_nightly_build() {
err.help(format!(
- "add `#![feature({})]` to the crate attributes to enable",
- feature,
+ "add `#![feature({feature})]` to the crate attributes to enable",
));
}
@@ -346,10 +348,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
err.help("const-stable functions can only call other const-stable functions");
} else if ccx.tcx.sess.is_nightly_build() {
if let Some(feature) = feature {
- err.help(format!(
- "add `#![feature({})]` to the crate attributes to enable",
- feature
- ));
+ err.help(format!("add `#![feature({feature})]` to the crate attributes to enable"));
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index 1f1640fd8..e3377bd10 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -5,7 +5,7 @@ use rustc_span::{symbol::sym, Span};
use super::check::Qualifs;
use super::ops::{self, NonConstOp};
-use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::qualifs::{NeedsDrop, Qualif};
use super::ConstCx;
/// Returns `true` if we should use the more precise live drop checker that runs after drop
@@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
match &terminator.kind {
mir::TerminatorKind::Drop { place: dropped_place, .. } => {
let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
- if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+
+ // FIXME(effects) use `NeedsNonConstDrop`
+ if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
// Instead of throwing a bug, we just return here. This is because we have to
// run custom `const Drop` impls.
return;
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 015a4aa94..b1b2859ef 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -7,7 +7,8 @@ use rustc_hir::LangItem;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::mir;
use rustc_middle::mir::*;
-use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_middle::traits::BuiltinImplSource;
+use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty};
use rustc_trait_selection::traits::{
self, ImplSource, Obligation, ObligationCause, ObligationCtxt, SelectionContext,
};
@@ -22,7 +23,8 @@ pub fn in_any_value_of_ty<'tcx>(
ConstQualifs {
has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
- needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+ // FIXME(effects)
+ needs_non_const_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
tainted_by_errors,
}
@@ -72,7 +74,7 @@ pub trait Qualif {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> bool;
}
@@ -97,7 +99,7 @@ impl Qualif for HasMutInterior {
fn in_adt_inherently<'tcx>(
_cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- _: SubstsRef<'tcx>,
+ _: GenericArgsRef<'tcx>,
) -> bool {
// Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
// It arises structurally for all other types.
@@ -127,7 +129,7 @@ impl Qualif for NeedsDrop {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- _: SubstsRef<'tcx>,
+ _: GenericArgsRef<'tcx>,
) -> bool {
adt.has_dtor(cx.tcx)
}
@@ -153,12 +155,12 @@ impl Qualif for NeedsNonConstDrop {
return false;
}
+ // FIXME(effects) constness
let obligation = Obligation::new(
cx.tcx,
ObligationCause::dummy_with_span(cx.body.span),
cx.param_env,
- ty::TraitRef::from_lang_item(cx.tcx, LangItem::Destruct, cx.body.span, [ty])
- .with_constness(ty::BoundConstness::ConstIfConst),
+ ty::TraitRef::from_lang_item(cx.tcx, LangItem::Destruct, cx.body.span, [ty]),
);
let infcx = cx.tcx.infer_ctxt().build();
@@ -172,7 +174,7 @@ impl Qualif for NeedsNonConstDrop {
if !matches!(
impl_src,
- ImplSource::Builtin(_) | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ImplSource::Builtin(BuiltinImplSource::Misc, _) | ImplSource::Param(_)
) {
// If our const destruct candidate is not ConstDestruct or implied by the param env,
// then it's bad
@@ -193,7 +195,7 @@ impl Qualif for NeedsNonConstDrop {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
adt: AdtDef<'tcx>,
- _: SubstsRef<'tcx>,
+ _: GenericArgsRef<'tcx>,
) -> bool {
adt.has_non_const_dtor(cx.tcx)
}
@@ -221,9 +223,9 @@ impl Qualif for CustomEq {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
def: AdtDef<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> bool {
- let ty = Ty::new_adt(cx.tcx, def, substs);
+ let ty = Ty::new_adt(cx.tcx, def, args);
!ty.is_structural_eq_shallow(cx.tcx)
}
}
@@ -276,9 +278,9 @@ where
Rvalue::Aggregate(kind, operands) => {
// Return early if we know that the struct or enum being constructed is always
// qualified.
- if let AggregateKind::Adt(adt_did, _, substs, ..) = **kind {
+ if let AggregateKind::Adt(adt_did, _, args, ..) = **kind {
let def = cx.tcx.adt_def(adt_did);
- if Q::in_adt_inherently(cx, def, substs) {
+ if Q::in_adt_inherently(cx, def, args) {
return true;
}
if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
@@ -360,7 +362,7 @@ where
ConstantKind::Val(..) => None,
};
- if let Some(mir::UnevaluatedConst { def, substs: _, promoted }) = uneval {
+ if let Some(mir::UnevaluatedConst { def, args: _, promoted }) = uneval {
// Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
// only for `NeedsNonConstDrop` with precise drop checking. This is the only const
// check performed after the promotion. Verify that with an assertion.
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
index 3a869f7f5..a137f84b7 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -4,10 +4,12 @@
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementKind};
+use rustc_middle::mir::{
+ self, BasicBlock, CallReturnPlaces, Local, Location, Statement, StatementKind, TerminatorEdges,
+};
use rustc_mir_dataflow::fmt::DebugWithContext;
use rustc_mir_dataflow::JoinSemiLattice;
-use rustc_mir_dataflow::{Analysis, AnalysisDomain, CallReturnPlaces};
+use rustc_mir_dataflow::{Analysis, AnalysisDomain};
use std::fmt;
use std::marker::PhantomData;
@@ -345,13 +347,14 @@ where
self.transfer_function(state).visit_statement(statement, location);
}
- fn apply_terminator_effect(
+ fn apply_terminator_effect<'mir>(
&mut self,
state: &mut Self::Domain,
- terminator: &mir::Terminator<'tcx>,
+ terminator: &'mir mir::Terminator<'tcx>,
location: Location,
- ) {
+ ) -> TerminatorEdges<'mir, 'tcx> {
self.transfer_function(state).visit_terminator(terminator, location);
+ terminator.edges()
}
fn apply_call_return_effect(
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index 1b39a76e4..d79c65f1d 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -16,7 +16,7 @@ use rustc_hir as hir;
use rustc_middle::mir;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, List, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::Span;
@@ -759,11 +759,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let (mut rvalue, source_info) = {
let statement = &mut self.source[loc.block].statements[loc.statement_index];
let StatementKind::Assign(box (_, rhs)) = &mut statement.kind else {
- span_bug!(
- statement.source_info.span,
- "{:?} is not an assignment",
- statement
- );
+ span_bug!(statement.source_info.span, "{:?} is not an assignment", statement);
};
(
@@ -845,8 +841,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let mut promoted_operand = |ty, span| {
promoted.span = span;
promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
- let substs = tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def));
- let uneval = mir::UnevaluatedConst { def, substs, promoted: Some(promoted_id) };
+ let args = tcx.erase_regions(GenericArgs::identity_for_item(tcx, def));
+ let uneval = mir::UnevaluatedConst { def, args, promoted: Some(promoted_id) };
Operand::Constant(Box::new(Constant {
span,
@@ -859,7 +855,9 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let local_decls = &mut self.source.local_decls;
let loc = candidate.location;
let statement = &mut blocks[loc.block].statements[loc.statement_index];
- let StatementKind::Assign(box (_, Rvalue::Ref(region, borrow_kind, place))) = &mut statement.kind else {
+ let StatementKind::Assign(box (_, Rvalue::Ref(region, borrow_kind, place))) =
+ &mut statement.kind
+ else {
bug!()
};
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index 4cc923cd9..783b52d00 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -18,6 +18,7 @@ use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
use rustc_target::abi::{Size, FIRST_VARIANT};
+use rustc_target::spec::abi::Abi;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum EdgeKind {
@@ -58,25 +59,48 @@ impl<'tcx> MirPass<'tcx> for Validator {
.iterate_to_fixpoint()
.into_results_cursor(body);
- let mut checker = TypeChecker {
+ let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) {
+ // In this case `AbortUnwindingCalls` haven't yet been executed.
+ true
+ } else if !tcx.def_kind(def_id).is_fn_like() {
+ true
+ } else {
+ let body_ty = tcx.type_of(def_id).skip_binder();
+ let body_abi = match body_ty.kind() {
+ ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
+ ty::Closure(..) => Abi::RustCall,
+ ty::Generator(..) => Abi::Rust,
+ _ => {
+ span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase)
+ }
+ };
+
+ ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi)
+ };
+
+ let mut cfg_checker = CfgChecker {
when: &self.when,
body,
tcx,
- param_env,
mir_phase,
unwind_edge_count: 0,
reachable_blocks: traversal::reachable_as_bitset(body),
storage_liveness,
place_cache: FxHashSet::default(),
value_cache: FxHashSet::default(),
+ can_unwind,
};
- checker.visit_body(body);
- checker.check_cleanup_control_flow();
+ cfg_checker.visit_body(body);
+ cfg_checker.check_cleanup_control_flow();
+
+ for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body) {
+ cfg_checker.fail(location, msg);
+ }
if let MirPhase::Runtime(_) = body.phase {
if let ty::InstanceDef::Item(_) = body.source.instance {
if body.has_free_regions() {
- checker.fail(
+ cfg_checker.fail(
Location::START,
format!("Free regions in optimized {} MIR", body.phase.name()),
);
@@ -86,20 +110,22 @@ impl<'tcx> MirPass<'tcx> for Validator {
}
}
-struct TypeChecker<'a, 'tcx> {
+struct CfgChecker<'a, 'tcx> {
when: &'a str,
body: &'a Body<'tcx>,
tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
mir_phase: MirPhase,
unwind_edge_count: usize,
reachable_blocks: BitSet<BasicBlock>,
storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive<'static>>,
place_cache: FxHashSet<PlaceRef<'tcx>>,
value_cache: FxHashSet<u128>,
+ // If `false`, then the MIR must not contain `UnwindAction::Continue` or
+ // `TerminatorKind::Resume`.
+ can_unwind: bool,
}
-impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
#[track_caller]
fn fail(&self, location: Location, msg: impl AsRef<str>) {
let span = self.body.source_info(location).span;
@@ -147,7 +173,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
} else {
- self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+ self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
}
}
@@ -214,16 +240,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
stack.clear();
stack.insert(bb);
loop {
- let Some(parent)= parent[bb].take() else {
- break
- };
+ let Some(parent) = parent[bb].take() else { break };
let no_cycle = stack.insert(parent);
if !no_cycle {
self.fail(
Location { block: bb, statement_index: 0 },
format!(
- "Cleanup control flow violation: Cycle involving edge {:?} -> {:?}",
- bb, parent,
+ "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
),
);
break;
@@ -238,47 +261,30 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
match unwind {
UnwindAction::Cleanup(unwind) => {
if is_cleanup {
- self.fail(location, "unwind on cleanup block");
+ self.fail(location, "`UnwindAction::Cleanup` in cleanup block");
}
self.check_edge(location, unwind, EdgeKind::Unwind);
}
UnwindAction::Continue => {
if is_cleanup {
- self.fail(location, "unwind on cleanup block");
+ self.fail(location, "`UnwindAction::Continue` in cleanup block");
+ }
+
+ if !self.can_unwind {
+ self.fail(location, "`UnwindAction::Continue` in no-unwind function");
}
}
UnwindAction::Unreachable | UnwindAction::Terminate => (),
}
}
-
- /// Check if src can be assigned into dest.
- /// This is not precise, it will accept some incorrect assignments.
- fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
- // Fast path before we normalize.
- if src == dest {
- // Equal types, all is good.
- return true;
- }
-
- // We sometimes have to use `defining_opaque_types` for subtyping
- // to succeed here and figuring out how exactly that should work
- // is annoying. It is harmless enough to just not validate anything
- // in that case. We still check this after analysis as all opaque
- // types have been revealed at this point.
- if (src, dest).has_opaque_types() {
- return true;
- }
-
- crate::util::is_subtype(self.tcx, self.param_env, src, dest)
- }
}
-impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
if self.body.local_decls.get(local).is_none() {
self.fail(
location,
- format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+ format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
);
}
@@ -293,11 +299,286 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.storage_liveness.seek_after_primary_effect(location);
let locals_with_storage = self.storage_liveness.get();
if !locals_with_storage.contains(local) {
- self.fail(location, format!("use of local {:?}, which has no storage here", local));
+ self.fail(location, format!("use of local {local:?}, which has no storage here"));
+ }
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match &statement.kind {
+ StatementKind::Assign(box (dest, rvalue)) => {
+ // FIXME(JakobDegen): Check this for all rvalues, not just this one.
+ if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
+ // The sides of an assignment must not alias. Currently this just checks whether
+ // the places are identical.
+ if dest == src {
+ self.fail(
+ location,
+ "encountered `Assign` statement with overlapping memory",
+ );
+ }
+ }
+ }
+ StatementKind::AscribeUserType(..) => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`AscribeUserType` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::FakeRead(..) => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`FakeRead` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::SetDiscriminant { .. } => {
+ if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Deinit(..) => {
+ if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(location, "`Deinit`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Retag(kind, _) => {
+ // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+ // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+ // seem to fail to set their `MirPhase` correctly.
+ if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
+ self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+ }
+ }
+ StatementKind::StorageLive(local) => {
+ // We check that the local is not live when entering a `StorageLive` for it.
+ // Technically, violating this restriction is only UB and not actually indicative
+ // of not well-formed MIR. This means that an optimization which turns MIR that
+ // already has UB into MIR that fails this check is not necessarily wrong. However,
+ // we have no such optimizations at the moment, and so we include this check anyway
+ // to help us catch bugs. If you happen to write an optimization that might cause
+ // this to incorrectly fire, feel free to remove this check.
+ if self.reachable_blocks.contains(location.block) {
+ self.storage_liveness.seek_before_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if locals_with_storage.contains(*local) {
+ self.fail(
+ location,
+ format!("StorageLive({local:?}) which already has storage here"),
+ );
+ }
+ }
+ }
+ StatementKind::StorageDead(_)
+ | StatementKind::Intrinsic(_)
+ | StatementKind::Coverage(_)
+ | StatementKind::ConstEvalCounter
+ | StatementKind::PlaceMention(..)
+ | StatementKind::Nop => {}
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ match &terminator.kind {
+ TerminatorKind::Goto { target } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ TerminatorKind::SwitchInt { targets, discr: _ } => {
+ for (_, target) in targets.iter() {
+ self.check_edge(location, target, EdgeKind::Normal);
+ }
+ self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+ self.value_cache.clear();
+ self.value_cache.extend(targets.iter().map(|(value, _)| value));
+ let has_duplicates = targets.iter().len() != self.value_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "duplicated values in `SwitchInt` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Drop { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::Call { args, destination, target, unwind, .. } => {
+ if let Some(target) = target {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ self.check_unwind_edge(location, *unwind);
+
+ // The call destination place and Operand::Move place used as an argument might be
+ // passed by a reference to the callee. Consequently they must be non-overlapping.
+ // Currently this simply checks for duplicate places.
+ self.place_cache.clear();
+ self.place_cache.insert(destination.as_ref());
+ let mut has_duplicates = false;
+ for arg in args {
+ if let Operand::Move(place) = arg {
+ has_duplicates |= !self.place_cache.insert(place.as_ref());
+ }
+ }
+
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "encountered overlapping memory in `Call` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Assert { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::Yield { resume, drop, .. } => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`Yield` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(location, "`Yield` should have been replaced by generator lowering");
+ }
+ self.check_edge(location, *resume, EdgeKind::Normal);
+ if let Some(drop) = drop {
+ self.check_edge(location, *drop, EdgeKind::Normal);
+ }
+ }
+ TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`FalseEdge` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+ }
+ TerminatorKind::FalseUnwind { real_target, unwind } => {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`FalseUnwind` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::InlineAsm { destination, unwind, .. } => {
+ if let Some(destination) = destination {
+ self.check_edge(location, *destination, EdgeKind::Normal);
+ }
+ self.check_unwind_edge(location, *unwind);
+ }
+ TerminatorKind::GeneratorDrop => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ self.fail(
+ location,
+ "`GeneratorDrop` should have been replaced by generator lowering",
+ );
+ }
+ }
+ TerminatorKind::Resume => {
+ let bb = location.block;
+ if !self.body.basic_blocks[bb].is_cleanup {
+ self.fail(location, "Cannot `Resume` from non-cleanup basic block")
+ }
+ if !self.can_unwind {
+ self.fail(location, "Cannot `Resume` in a function that cannot unwind")
+ }
+ }
+ TerminatorKind::Terminate => {
+ let bb = location.block;
+ if !self.body.basic_blocks[bb].is_cleanup {
+ self.fail(location, "Cannot `Terminate` from non-cleanup basic block")
+ }
+ }
+ TerminatorKind::Return => {
+ let bb = location.block;
+ if self.body.basic_blocks[bb].is_cleanup {
+ self.fail(location, "Cannot `Return` from cleanup basic block")
+ }
}
+ TerminatorKind::Unreachable => {}
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_source_scope(&mut self, scope: SourceScope) {
+ if self.body.source_scopes.get(scope).is_none() {
+ self.tcx.sess.diagnostic().delay_span_bug(
+ self.body.span,
+ format!(
+ "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+ self.body.source.instance, self.when, scope,
+ ),
+ );
}
}
+}
+
+pub fn validate_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mir_phase: MirPhase,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+) -> Vec<(Location, String)> {
+ let mut type_checker = TypeChecker { body, tcx, param_env, mir_phase, failures: Vec::new() };
+ type_checker.visit_body(body);
+ type_checker.failures
+}
+
+struct TypeChecker<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ mir_phase: MirPhase,
+ failures: Vec<(Location, String)>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ fn fail(&mut self, location: Location, msg: impl Into<String>) {
+ self.failures.push((location, msg.into()));
+ }
+
+ /// Check if src can be assigned into dest.
+ /// This is not precise, it will accept some incorrect assignments.
+ fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+ // Fast path before we normalize.
+ if src == dest {
+ // Equal types, all is good.
+ return true;
+ }
+ // We sometimes have to use `defining_opaque_types` for subtyping
+ // to succeed here and figuring out how exactly that should work
+ // is annoying. It is harmless enough to just not validate anything
+ // in that case. We still check this after analysis as all opaque
+ // types have been revealed at this point.
+ if (src, dest).has_opaque_types() {
+ return true;
+ }
+
+ crate::util::is_subtype(self.tcx, self.param_env, src, dest)
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
// This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
if self.tcx.sess.opts.unstable_opts.validate_mir
@@ -308,7 +589,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
let ty = place.ty(&self.body.local_decls, self.tcx).ty;
if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
- self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+ self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
}
}
}
@@ -327,7 +608,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
ProjectionElem::Index(index) => {
let index_ty = self.body.local_decls[index].ty;
if index_ty != self.tcx.types.usize {
- self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+ self.fail(location, format!("bad index ({index_ty:?} != usize)"))
}
}
ProjectionElem::Deref
@@ -338,30 +619,29 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
if base_ty.is_box() {
self.fail(
location,
- format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
+ format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"),
)
}
}
ProjectionElem::Field(f, ty) => {
let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
- let fail_out_of_bounds = |this: &Self, location| {
- this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
+ let fail_out_of_bounds = |this: &mut Self, location| {
+ this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
};
- let check_equal = |this: &Self, location, f_ty| {
+ let check_equal = |this: &mut Self, location, f_ty| {
if !this.mir_assign_valid_types(ty, f_ty) {
this.fail(
location,
format!(
- "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
- place_ref, f, ty, f_ty
+ "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`"
)
)
}
};
let kind = match parent_ty.ty.kind() {
- &ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- self.tcx.type_of(def_id).subst(self.tcx, substs).kind()
+ &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+ self.tcx.type_of(def_id).instantiate(self.tcx, args).kind()
}
kind => kind,
};
@@ -374,23 +654,23 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
check_equal(self, location, *f_ty);
}
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
let Some(field) = adt_def.variant(var).fields.get(f) else {
fail_out_of_bounds(self, location);
return;
};
- check_equal(self, location, field.ty(self.tcx, substs));
+ check_equal(self, location, field.ty(self.tcx, args));
}
- ty::Closure(_, substs) => {
- let substs = substs.as_closure();
- let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
+ ty::Closure(_, args) => {
+ let args = args.as_closure();
+ let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, f_ty);
}
- &ty::Generator(def_id, substs, _) => {
+ &ty::Generator(def_id, args, _) => {
let f_ty = if let Some(var) = parent_ty.variant_index {
let gen_body = if def_id == self.body.source.def_id() {
self.body
@@ -399,7 +679,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
let Some(layout) = gen_body.generator_layout() else {
- self.fail(location, format!("No generator layout for {:?}", parent_ty));
+ self.fail(
+ location,
+ format!("No generator layout for {parent_ty:?}"),
+ );
return;
};
@@ -409,13 +692,17 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
let Some(f_ty) = layout.field_tys.get(local) else {
- self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
+ self.fail(
+ location,
+ format!("Out of bounds local {local:?} for {parent_ty:?}"),
+ );
return;
};
- f_ty.ty
+ ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
} else {
- let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
+ let Some(&f_ty) = args.as_generator().prefix_tys().get(f.index())
+ else {
fail_out_of_bounds(self, location);
return;
};
@@ -436,9 +723,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
- let check_place = |place: Place<'_>| {
+ let check_place = |this: &mut Self, place: Place<'_>| {
if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
- self.fail(
+ this.fail(
START_BLOCK.start_location(),
format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
);
@@ -447,21 +734,15 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
match debuginfo.value {
VarDebugInfoContents::Const(_) => {}
VarDebugInfoContents::Place(place) => {
- check_place(place);
- if debuginfo.references != 0 && place.projection.last() == Some(&PlaceElem::Deref) {
- self.fail(
- START_BLOCK.start_location(),
- format!("debuginfo {:?}, has both ref and deref", debuginfo),
- );
- }
+ check_place(self, place);
}
VarDebugInfoContents::Composite { ty, ref fragments } => {
for f in fragments {
- check_place(f.contents);
+ check_place(self, f.contents);
if ty.is_union() || ty.is_enum() {
self.fail(
START_BLOCK.start_location(),
- format!("invalid type {:?} for composite debuginfo", ty),
+ format!("invalid type {ty:?} for composite debuginfo"),
);
}
if f.projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
@@ -488,7 +769,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
&& cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
&& place.projection[1..].contains(&ProjectionElem::Deref)
{
- self.fail(location, format!("{:?}, has deref at the wrong place", place));
+ self.fail(location, format!("{place:?}, has deref at the wrong place"));
}
self.super_place(place, cntxt, location);
@@ -548,7 +829,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
Offset => {
check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
if b != self.tcx.types.isize && b != self.tcx.types.usize {
- self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
+ self.fail(location, format!("Cannot offset by non-isize type {b:?}"));
}
}
Eq | Lt | Le | Ne | Ge | Gt => {
@@ -613,13 +894,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(
location,
format!(
- "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
- a, b
+ "Cannot perform checked arithmetic on unequal types {a:?} and {b:?}"
),
);
}
}
- _ => self.fail(location, format!("There is no checked version of {:?}", op)),
+ _ => self.fail(location, format!("There is no checked version of {op:?}")),
}
}
Rvalue::UnaryOp(op, operand) => {
@@ -714,7 +994,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
Rvalue::NullaryOp(NullOp::OffsetOf(fields), container) => {
- let fail_out_of_bounds = |this: &Self, location, field, ty| {
+ let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
};
@@ -730,7 +1010,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
}
- ty::Adt(adt_def, substs) => {
+ ty::Adt(adt_def, args) => {
if adt_def.is_enum() {
self.fail(
location,
@@ -744,7 +1024,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
return;
};
- let f_ty = field.ty(self.tcx, substs);
+ let f_ty = field.ty(self.tcx, args);
current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
}
_ => {
@@ -824,7 +1104,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
if !ty.is_bool() {
self.fail(
location,
- format!("`assume` argument must be `bool`, but got: `{}`", ty),
+ format!("`assume` argument must be `bool`, but got: `{ty}`"),
);
}
}
@@ -837,7 +1117,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} else {
self.fail(
location,
- format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+ format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
);
return;
};
@@ -847,19 +1127,19 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} else {
self.fail(
location,
- format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+ format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
);
return;
};
// since CopyNonOverlapping is parametrized by 1 type,
// we only need to check that they are equal and not keep an extra parameter.
if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
- self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+ self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})"));
}
let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
if op_cnt_ty != self.tcx.types.usize {
- self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+ self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)"))
}
}
StatementKind::SetDiscriminant { place, .. } => {
@@ -871,8 +1151,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(
location,
format!(
- "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
- pty
+ "`SetDiscriminant` is only allowed on ADTs and generators, not {pty:?}"
),
);
}
@@ -887,29 +1166,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// DropsLowered`. However, this causes ICEs with generation of drop shims, which
// seem to fail to set their `MirPhase` correctly.
if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
- self.fail(location, format!("explicit `{:?}` is forbidden", kind));
+ self.fail(location, format!("explicit `{kind:?}` is forbidden"));
}
}
- StatementKind::StorageLive(local) => {
- // We check that the local is not live when entering a `StorageLive` for it.
- // Technically, violating this restriction is only UB and not actually indicative
- // of not well-formed MIR. This means that an optimization which turns MIR that
- // already has UB into MIR that fails this check is not necessarily wrong. However,
- // we have no such optimizations at the moment, and so we include this check anyway
- // to help us catch bugs. If you happen to write an optimization that might cause
- // this to incorrectly fire, feel free to remove this check.
- if self.reachable_blocks.contains(location.block) {
- self.storage_liveness.seek_before_primary_effect(location);
- let locals_with_storage = self.storage_liveness.get();
- if locals_with_storage.contains(*local) {
- self.fail(
- location,
- format!("StorageLive({local:?}) which already has storage here"),
- );
- }
- }
- }
- StatementKind::StorageDead(_)
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
| StatementKind::Coverage(_)
| StatementKind::ConstEvalCounter
| StatementKind::PlaceMention(..)
@@ -921,9 +1182,6 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
match &terminator.kind {
- TerminatorKind::Goto { target } => {
- self.check_edge(location, *target, EdgeKind::Normal);
- }
TerminatorKind::SwitchInt { targets, discr } => {
let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
@@ -937,164 +1195,49 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
other => bug!("unhandled type: {:?}", other),
});
- for (value, target) in targets.iter() {
+ for (value, _) in targets.iter() {
if Scalar::<()>::try_from_uint(value, size).is_none() {
self.fail(
location,
- format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+ format!("the value {value:#x} is not a proper {switch_ty:?}"),
)
}
-
- self.check_edge(location, target, EdgeKind::Normal);
- }
- self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
-
- self.value_cache.clear();
- self.value_cache.extend(targets.iter().map(|(value, _)| value));
- let has_duplicates = targets.iter().len() != self.value_cache.len();
- if has_duplicates {
- self.fail(
- location,
- format!(
- "duplicated values in `SwitchInt` terminator: {:?}",
- terminator.kind,
- ),
- );
}
}
- TerminatorKind::Drop { target, unwind, .. } => {
- self.check_edge(location, *target, EdgeKind::Normal);
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::Call { func, args, destination, target, unwind, .. } => {
+ TerminatorKind::Call { func, .. } => {
let func_ty = func.ty(&self.body.local_decls, self.tcx);
match func_ty.kind() {
ty::FnPtr(..) | ty::FnDef(..) => {}
_ => self.fail(
location,
- format!("encountered non-callable type {} in `Call` terminator", func_ty),
+ format!("encountered non-callable type {func_ty} in `Call` terminator"),
),
}
- if let Some(target) = target {
- self.check_edge(location, *target, EdgeKind::Normal);
- }
- self.check_unwind_edge(location, *unwind);
-
- // The call destination place and Operand::Move place used as an argument might be
- // passed by a reference to the callee. Consequently they must be non-overlapping.
- // Currently this simply checks for duplicate places.
- self.place_cache.clear();
- self.place_cache.insert(destination.as_ref());
- let mut has_duplicates = false;
- for arg in args {
- if let Operand::Move(place) = arg {
- has_duplicates |= !self.place_cache.insert(place.as_ref());
- }
- }
-
- if has_duplicates {
- self.fail(
- location,
- format!(
- "encountered overlapping memory in `Call` terminator: {:?}",
- terminator.kind,
- ),
- );
- }
}
- TerminatorKind::Assert { cond, target, unwind, .. } => {
+ TerminatorKind::Assert { cond, .. } => {
let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
if cond_ty != self.tcx.types.bool {
self.fail(
location,
format!(
- "encountered non-boolean condition of type {} in `Assert` terminator",
- cond_ty
+ "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
),
);
}
- self.check_edge(location, *target, EdgeKind::Normal);
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::Yield { resume, drop, .. } => {
- if self.body.generator.is_none() {
- self.fail(location, "`Yield` cannot appear outside generator bodies");
- }
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(location, "`Yield` should have been replaced by generator lowering");
- }
- self.check_edge(location, *resume, EdgeKind::Normal);
- if let Some(drop) = drop {
- self.check_edge(location, *drop, EdgeKind::Normal);
- }
}
- TerminatorKind::FalseEdge { real_target, imaginary_target } => {
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(
- location,
- "`FalseEdge` should have been removed after drop elaboration",
- );
- }
- self.check_edge(location, *real_target, EdgeKind::Normal);
- self.check_edge(location, *imaginary_target, EdgeKind::Normal);
- }
- TerminatorKind::FalseUnwind { real_target, unwind } => {
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(
- location,
- "`FalseUnwind` should have been removed after drop elaboration",
- );
- }
- self.check_edge(location, *real_target, EdgeKind::Normal);
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::InlineAsm { destination, unwind, .. } => {
- if let Some(destination) = destination {
- self.check_edge(location, *destination, EdgeKind::Normal);
- }
- self.check_unwind_edge(location, *unwind);
- }
- TerminatorKind::GeneratorDrop => {
- if self.body.generator.is_none() {
- self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
- }
- if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(
- location,
- "`GeneratorDrop` should have been replaced by generator lowering",
- );
- }
- }
- TerminatorKind::Resume | TerminatorKind::Terminate => {
- let bb = location.block;
- if !self.body.basic_blocks[bb].is_cleanup {
- self.fail(
- location,
- "Cannot `Resume` or `Terminate` from non-cleanup basic block",
- )
- }
- }
- TerminatorKind::Return => {
- let bb = location.block;
- if self.body.basic_blocks[bb].is_cleanup {
- self.fail(location, "Cannot `Return` from cleanup basic block")
- }
- }
- TerminatorKind::Unreachable => {}
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Resume
+ | TerminatorKind::Terminate
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable => {}
}
self.super_terminator(terminator, location);
}
-
- fn visit_source_scope(&mut self, scope: SourceScope) {
- if self.body.source_scopes.get(scope).is_none() {
- self.tcx.sess.diagnostic().delay_span_bug(
- self.body.span,
- format!(
- "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
- self.body.source.instance, self.when, scope,
- ),
- );
- }
- }
}
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index d6a2ffb75..83376c8e9 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -56,8 +56,16 @@ pub fn is_subtype<'tcx>(
// With `Reveal::All`, opaque types get normalized away, with `Reveal::UserFacing`
// we would get unification errors because we're unable to look into opaque types,
// even if they're constrained in our current function.
- //
- // It seems very unlikely that this hides any bugs.
- let _ = infcx.take_opaque_types();
+ for (key, ty) in infcx.take_opaque_types() {
+ let hidden_ty = tcx.type_of(key.def_id).instantiate(tcx, key.args);
+ if hidden_ty != ty.hidden_type.ty {
+ span_bug!(
+ ty.hidden_type.span,
+ "{}, {}",
+ tcx.type_of(key.def_id).instantiate(tcx, key.args),
+ ty.hidden_type.ty
+ );
+ }
+ }
errors.is_empty()
}
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index 4f01e0a24..14a840ad1 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -4,8 +4,7 @@ use rustc_hir::definitions::DisambiguatedDefPathData;
use rustc_middle::ty::{
self,
print::{PrettyPrinter, Print, Printer},
- subst::{GenericArg, GenericArgKind},
- Ty, TyCtxt,
+ GenericArg, GenericArgKind, Ty, TyCtxt,
};
use std::fmt::Write;
@@ -56,11 +55,11 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
}
// Types with identity (print the module path).
- ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
- | ty::FnDef(def_id, substs)
- | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, substs, .. })
- | ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), args)
+ | ty::FnDef(def_id, args)
+ | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. })
+ | ty::Closure(def_id, args)
+ | ty::Generator(def_id, args, _) => self.print_def_path(def_id, args),
ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"),