summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/interpret
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler/rustc_const_eval/src/interpret
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs138
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs20
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs277
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs25
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs45
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs60
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs7
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs11
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs338
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs147
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs384
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs89
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs445
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs5
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs20
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs5
18 files changed, 1174 insertions, 852 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 98e853dc4..b9f88cf63 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -24,41 +24,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: Ty<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
+ // `cast_ty` will often be the same as `dest.ty`, but not always, since subtyping is still
+ // possible.
+ let cast_layout =
+ if cast_ty == dest.layout.ty { dest.layout } else { self.layout_of(cast_ty)? };
// FIXME: In which cases should we trigger UB when the source is uninit?
match cast_kind {
CastKind::PointerCoercion(PointerCoercion::Unsize) => {
- let cast_ty = self.layout_of(cast_ty)?;
- self.unsize_into(src, cast_ty, dest)?;
+ self.unsize_into(src, cast_layout, dest)?;
}
CastKind::PointerExposeAddress => {
let src = self.read_immediate(src)?;
- let res = self.pointer_expose_address_cast(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.pointer_expose_address_cast(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::PointerFromExposedAddress => {
let src = self.read_immediate(src)?;
- let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.pointer_from_exposed_address_cast(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::IntToInt | CastKind::IntToFloat => {
let src = self.read_immediate(src)?;
- let res = self.int_to_int_or_float(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.int_to_int_or_float(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let src = self.read_immediate(src)?;
- let res = self.float_to_float_or_int(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.float_to_float_or_int(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.read_immediate(src)?;
- let res = self.ptr_to_ptr(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.ptr_to_ptr(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::PointerCoercion(
@@ -84,10 +87,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
.ok_or_else(|| err_inval!(TooGeneric))?;
- let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}
- _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+ _ => span_bug!(self.cur_span(), "reify fn pointer on {}", src.layout.ty),
}
}
@@ -98,7 +101,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// No change to value
self.write_immediate(*src, dest)?;
}
- _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+ _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {}", cast_ty),
}
}
@@ -116,10 +119,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::ClosureKind::FnOnce,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
- let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}
- _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+ _ => span_bug!(self.cur_span(), "closure fn pointer on {}", src.layout.ty),
}
}
@@ -140,6 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CastKind::Transmute => {
assert!(src.layout.is_sized());
assert!(dest.layout.is_sized());
+ assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
if src.layout.size != dest.layout.size {
let src_bytes = src.layout.size.bytes();
let dest_bytes = dest.layout.size.bytes();
@@ -164,62 +168,61 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn int_to_int_or_float(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
- assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
+ assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
- Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into())
+ Ok(ImmTy::from_scalar(
+ self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
+ cast_to,
+ ))
}
/// Handles 'FloatToFloat' and 'FloatToInt' casts.
pub fn float_to_float_or_int(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_type_ir::sty::TyKind::*;
- match src.layout.ty.kind() {
+ let val = match src.layout.ty.kind() {
// Floating point
- Float(FloatTy::F32) => {
- return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into());
- }
- Float(FloatTy::F64) => {
- return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
- }
+ Float(FloatTy::F32) => self.cast_from_float(src.to_scalar().to_f32()?, cast_to.ty),
+ Float(FloatTy::F64) => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
_ => {
- bug!("Can't cast 'Float' type into {:?}", cast_ty);
+ bug!("Can't cast 'Float' type into {}", cast_to.ty);
}
- }
+ };
+ Ok(ImmTy::from_scalar(val, cast_to))
}
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
pub fn ptr_to_ptr(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_any_ptr());
- assert!(cast_ty.is_unsafe_ptr());
+ assert!(cast_to.ty.is_unsafe_ptr());
// Handle casting any ptr to raw ptr (might be a fat ptr).
- let dest_layout = self.layout_of(cast_ty)?;
- if dest_layout.size == src.layout.size {
+ if cast_to.size == src.layout.size {
// Thin or fat pointer that just hast the ptr kind of target type changed.
- return Ok(**src);
+ return Ok(ImmTy::from_immediate(**src, cast_to));
} else {
// Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.pointer_size());
- assert_eq!(dest_layout.size, self.pointer_size());
+ assert_eq!(cast_to.size, self.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr());
return match **src {
- Immediate::ScalarPair(data, _) => Ok(data.into()),
+ Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
Immediate::Scalar(..) => span_bug!(
self.cur_span(),
- "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+ "{:?} input to a fat-to-thin cast ({} -> {})",
*src,
src.layout.ty,
- cast_ty
+ cast_to.ty
),
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
};
@@ -229,10 +232,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn pointer_expose_address_cast(
&mut self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
- assert!(cast_ty.is_integral());
+ assert!(cast_to.ty.is_integral());
let scalar = src.to_scalar();
let ptr = scalar.to_pointer(self)?;
@@ -240,16 +243,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(ptr) => M::expose_ptr(self, ptr)?,
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
};
- Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+ Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
}
pub fn pointer_from_exposed_address_cast(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral());
- assert_matches!(cast_ty.kind(), ty::RawPtr(_));
+ assert_matches!(cast_to.ty.kind(), ty::RawPtr(_));
// First cast to usize.
let scalar = src.to_scalar();
@@ -258,12 +261,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(&self, addr)?;
- Ok(Scalar::from_maybe_pointer(ptr, self).into())
+ Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
}
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
/// type (basically everything with a scalar layout) to int/float/char types.
- pub fn cast_from_int_like(
+ fn cast_from_int_like(
&self,
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
src_layout: TyAndLayout<'tcx>,
@@ -298,7 +301,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// Casts to bool are not permitted by rustc, no need to handle them here.
- _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+ _ => span_bug!(self.cur_span(), "invalid int to {} cast", cast_ty),
})
}
@@ -331,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// float -> f64
Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
// That's it.
- _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+ _ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
}
}
@@ -351,7 +354,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
(&ty::Array(_, length), &ty::Slice(_)) => {
- let ptr = self.read_scalar(src)?;
+ let ptr = self.read_pointer(src)?;
// u64 cast is from usize to u64, which is always good
let val = Immediate::new_slice(
ptr,
@@ -367,6 +370,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return self.write_immediate(*val, dest);
}
let (old_data, old_vptr) = val.to_scalar_pair();
+ let old_data = old_data.to_pointer(self)?;
let old_vptr = old_vptr.to_pointer(self)?;
let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
if old_trait != data_a.principal() {
@@ -378,7 +382,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(_, &ty::Dynamic(data, _, ty::Dyn)) => {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
- let ptr = self.read_scalar(src)?;
+ let ptr = self.read_pointer(src)?;
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
self.write_immediate(val, dest)
}
@@ -389,7 +393,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span_bug!(
self.cur_span(),
- "invalid pointer unsizing {:?} -> {:?}",
+ "invalid pointer unsizing {} -> {}",
src.layout.ty,
cast_ty
)
@@ -403,28 +407,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: TyAndLayout<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+ trace!("Unsizing {:?} of type {} into {}", *src, src.layout.ty, cast_ty.ty);
match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
(&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
| (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
self.unsize_into_ptr(src, dest, *s, *c)
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
- assert_eq!(def_a, def_b);
+ assert_eq!(def_a, def_b); // implies same number of fields
- // unsizing of generic struct with pointer fields
- // Example: `Arc<T>` -> `Arc<Trait>`
- // here we need to increase the size of every &T thin ptr field to a fat ptr
+ // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`.
+ // There can be extra fields as long as they don't change their type or are 1-ZST.
+ // There might also be no field that actually needs unsizing.
+ let mut found_cast_field = false;
for i in 0..src.layout.fields.count() {
let cast_ty_field = cast_ty.field(self, i);
- if cast_ty_field.is_zst() {
- continue;
- }
let src_field = self.project_field(src, i)?;
let dst_field = self.project_field(dest, i)?;
- if src_field.layout.ty == cast_ty_field.ty {
+ if src_field.layout.is_1zst() && cast_ty_field.is_1zst() {
+ // Skip 1-ZST fields.
+ } else if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else {
+ if found_cast_field {
+ span_bug!(self.cur_span(), "unsize_into: more than one field to cast");
+ }
+ found_cast_field = true;
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 6c35fb01a..49e01728f 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -76,7 +76,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
- let tag_val = self.binary_op(
+ let tag_val = self.wrapping_binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
@@ -153,19 +153,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to.
let index = match *tag_encoding {
TagEncoding::Direct => {
- let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
- let tag_bits = scalar
+ let tag_bits = tag_val
+ .to_scalar()
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
- let discr_val =
- self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
- let discr_bits = discr_val.assert_bits(discr_layout.size);
+ let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
+ let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *ty.kind() {
ty::Adt(adt, _) => {
@@ -208,7 +207,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
- self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+ self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
@@ -247,9 +246,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
layout: TyAndLayout<'tcx>,
variant: VariantIdx,
- ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
- Ok(match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+ let discr_value = match layout.ty.discriminant_for_variant(*self.tcx, variant) {
Some(discr) => {
// This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty);
@@ -260,6 +259,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(variant.as_u32(), 0);
Scalar::from_uint(variant.as_u32(), discr_layout.size)
}
- })
+ };
+ Ok(ImmTy::from_scalar(discr_value, discr_layout))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 3ac6f07e8..af7dfbef2 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -7,13 +7,13 @@ use hir::CRATE_HIR_ID;
use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
use rustc_index::IndexVec;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::{ErrorHandled, InterpError, InvalidMetaKind, ReportedErrorInfo};
+use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
};
-use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_session::Limit;
use rustc_span::Span;
@@ -21,12 +21,12 @@ use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayou
use super::{
AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
- MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
- Scalar, StackPopJump,
+ MemPlaceMeta, Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic,
+ Projectable, Provenance, Scalar, StackPopJump,
};
-use crate::errors::{self, ErroneousConstUsed};
-use crate::fluent_generated as fluent;
+use crate::errors;
use crate::util;
+use crate::{fluent_generated as fluent, ReportErrorExt};
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance.
@@ -155,16 +155,26 @@ pub enum StackPopCleanup {
}
/// State of a local variable including a memoized layout
-#[derive(Clone, Debug)]
+#[derive(Clone)]
pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
- pub value: LocalValue<Prov>,
- /// Don't modify if `Some`, this is only used to prevent computing the layout twice
- pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+ value: LocalValue<Prov>,
+ /// Don't modify if `Some`, this is only used to prevent computing the layout twice.
+ /// Avoids computing the layout of locals that are never actually initialized.
+ layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("LocalState")
+ .field("value", &self.value)
+ .field("ty", &self.layout.get().map(|l| l.ty))
+ .finish()
+ }
}
/// Current value of a local variable
#[derive(Copy, Clone, Debug)] // Miri debug-prints these
-pub enum LocalValue<Prov: Provenance = AllocId> {
+pub(super) enum LocalValue<Prov: Provenance = AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// A normal, live local.
@@ -175,10 +185,27 @@ pub enum LocalValue<Prov: Provenance = AllocId> {
Live(Operand<Prov>),
}
-impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
+ pub fn make_live_uninit(&mut self) {
+ self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ }
+
+ /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState`
+ /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type
+ /// private.
+ pub fn as_mplace_or_imm(
+ &self,
+ ) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> {
+ match self.value {
+ LocalValue::Dead => None,
+ LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))),
+ LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)),
+ }
+ }
+
/// Read the local's value or error if the local is not yet live or not live anymore.
- #[inline]
- pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
+ #[inline(always)]
+ pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
@@ -188,10 +215,10 @@ impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
/// Overwrite the local. If the local can be overwritten in place, return a reference
/// to do so; otherwise return the `MemPlace` to consult instead.
///
- /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
- /// anywhere else. You may be invalidating machine invariants if you do!
- #[inline]
- pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
+ /// Note: Before calling this, call the `before_access_local_mut` machine hook! You may be
+ /// invalidating machine invariants otherwise!
+ #[inline(always)]
+ pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
@@ -357,7 +384,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences.
- if util::is_subtype(tcx, param_env, src.ty, dest.ty) {
+ if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
// Make sure the layout is equal, too -- just to be safe. Miri really
// needs layout equality. For performance reason we skip this check when
// the types are equal. Equal types *can* have different layouts when
@@ -389,7 +416,7 @@ pub(super) fn from_known_layout<'tcx>(
if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
span_bug!(
tcx.span,
- "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+ "expected type differs from actual type.\nexpected: {}\nactual: {}",
known_layout.ty,
check_layout.ty,
);
@@ -432,6 +459,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id))
}
+ /// Turn the given error into a human-readable string. Expects the string to be printed, so if
+ /// `RUSTC_CTFE_BACKTRACE` is set this will show a backtrace of the rustc internals that
+ /// triggered the error.
+ ///
+ /// This is NOT the preferred way to render an error; use `report` from `const_eval` instead.
+ /// However, this is useful when error messages appear in ICEs.
+ pub fn format_error(&self, e: InterpErrorInfo<'tcx>) -> String {
+ let (e, backtrace) = e.into_parts();
+ backtrace.print_backtrace();
+ // FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the
+ // label and arguments from the InterpError.
+ let handler = &self.tcx.sess.parse_sess.span_diagnostic;
+ #[allow(rustc::untranslatable_diagnostic)]
+ let mut diag = self.tcx.sess.struct_allow("");
+ let msg = e.diagnostic_message();
+ e.add_args(handler, &mut diag);
+ let s = handler.eagerly_translate_to_string(msg, diag.args());
+ diag.cancel();
+ s
+ }
+
#[inline(always)]
pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
M::stack(self)
@@ -462,7 +510,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
#[inline(always)]
- pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+ pub fn body(&self) -> &'mir mir::Body<'tcx> {
self.frame().body
}
@@ -508,7 +556,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
>(
&self,
value: T,
- ) -> Result<T, InterpError<'tcx>> {
+ ) -> Result<T, ErrorHandled> {
self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
}
@@ -518,15 +566,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
value: T,
- ) -> Result<T, InterpError<'tcx>> {
+ ) -> Result<T, ErrorHandled> {
frame
.instance
- .try_subst_mir_and_normalize_erasing_regions(
+ .try_instantiate_mir_and_normalize_erasing_regions(
*self.tcx,
self.param_env,
ty::EarlyBinder::bind(value),
)
- .map_err(|_| err_inval!(TooGeneric))
+ .map_err(|_| ErrorHandled::TooGeneric(self.cur_span()))
}
/// The `args` are assumed to already be in our interpreter "universe" (param_env).
@@ -664,7 +712,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::Foreign(_) => Ok(None),
- _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+ _ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
}
}
#[inline]
@@ -672,7 +720,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
- self.size_and_align_of(&mplace.meta, &mplace.layout)
+ self.size_and_align_of(&mplace.meta(), &mplace.layout)
}
#[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
@@ -684,15 +732,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
+ let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
+ let locals = IndexVec::from_elem(dead_local, &body.local_decls);
// First push a stack frame so we have access to the local args
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block,
return_place: return_place.clone(),
- // empty local array, we fill it in below, after we are inside the stack frame and
- // all methods actually know about the frame
- locals: IndexVec::new(),
+ locals,
instance,
tracing_span: SpanGuard::new(),
extra: (),
@@ -701,25 +749,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.stack_mut().push(frame);
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
- for ct in &body.required_consts {
- let span = ct.span;
- let ct = self.subst_from_current_frame_and_normalize_erasing_regions(ct.literal)?;
- self.eval_mir_constant(&ct, Some(span), None)?;
+ if M::POST_MONO_CHECKS {
+ // `ctfe_query` does some error message decoration that we want to be in effect here.
+ self.ctfe_query(None, |tcx| {
+ body.post_mono_checks(*tcx, self.param_env, |c| {
+ self.subst_from_current_frame_and_normalize_erasing_regions(c)
+ })
+ })?;
}
- // Most locals are initially dead.
- let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
- let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
-
- // Now mark those locals as live that have no `Storage*` annotations.
- let always_live = always_storage_live_locals(self.body());
- for local in locals.indices() {
- if always_live.contains(local) {
- locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
- }
- }
// done
- self.frame_mut().locals = locals;
M::after_stack_push(self)?;
self.frame_mut().loc = Left(mir::Location::START);
@@ -756,6 +795,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
///
/// If `target` is `UnwindAction::Unreachable`, that indicates the function does not allow
/// unwinding, and doing so is UB.
+ #[cold] // usually we have normal returns, not unwinding
pub fn unwind_to_block(&mut self, target: mir::UnwindAction) -> InterpResult<'tcx> {
self.frame_mut().loc = match target {
mir::UnwindAction::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
@@ -763,9 +803,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
mir::UnwindAction::Unreachable => {
throw_ub_custom!(fluent::const_eval_unreachable_unwind);
}
- mir::UnwindAction::Terminate => {
+ mir::UnwindAction::Terminate(reason) => {
self.frame_mut().loc = Right(self.frame_mut().body.span);
- M::abort(self, "panic in a function that cannot unwind".to_owned())?;
+ M::unwind_terminate(self, reason)?;
+ // This might have pushed a new stack frame, or it terminated execution.
+ // Either way, `loc` will not be updated.
+ return Ok(());
}
};
Ok(())
@@ -812,7 +855,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.expect("return place should always be live");
let dest = self.frame().return_place.clone();
let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
- trace!("return value: {:?}", self.dump_place(*dest));
+ trace!("return value: {:?}", self.dump_place(&dest));
// We delay actually short-circuiting on this error until *after* the stack frame is
// popped, since we want this error to be attributed to the caller, whose type defines
// this transmute.
@@ -865,6 +908,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
panic!("encountered StackPopCleanup::Root when unwinding!")
}
};
+ // This must be the very last thing that happens, since it can in fact push a new stack frame.
self.unwind_to_block(unwind)
} else {
// Follow the normal return edge.
@@ -881,12 +925,95 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Mark a storage as live, killing the previous content.
- pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
- assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+ /// In the current stack frame, mark all locals as live that are not arguments and don't have
+ /// `Storage*` annotations (this includes the return place).
+ pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
+ self.storage_live(mir::RETURN_PLACE)?;
+
+ let body = self.body();
+ let always_live = always_storage_live_locals(body);
+ for local in body.vars_and_temps_iter() {
+ if always_live.contains(local) {
+ self.storage_live(local)?;
+ }
+ }
+ Ok(())
+ }
+
+ pub fn storage_live_dyn(
+ &mut self,
+ local: mir::Local,
+ meta: MemPlaceMeta<M::Provenance>,
+ ) -> InterpResult<'tcx> {
trace!("{:?} is now live", local);
- let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ // We avoid `ty.is_trivially_sized` since that (a) cannot assume WF, so it recurses through
+ // all fields of a tuple, and (b) does something expensive for ADTs.
+ fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_) => true,
+
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+ ty::Tuple(tys) => tys.last().iter().all(|ty| is_very_trivially_sized(**ty)),
+
+ // We don't want to do any queries, so there is not much we can do with ADTs.
+ ty::Adt(..) => false,
+
+ ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
+
+ ty::Infer(ty::TyVar(_)) => false,
+
+ ty::Bound(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
+ }
+ }
+ }
+
+ // This is a hot function, we avoid computing the layout when possible.
+ // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
+ let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
+ None
+ } else {
+ // We need the layout.
+ let layout = self.layout_of_local(self.frame(), local, None)?;
+ if layout.is_sized() { None } else { Some(layout) }
+ };
+
+ let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
+ if !meta.has_meta() {
+ throw_unsup!(UnsizedLocal);
+ }
+ // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
+ let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
+ Operand::Indirect(*dest_place.mplace())
+ } else {
+ assert!(!meta.has_meta()); // we're dropping the metadata
+ // Just make this an efficient immediate.
+ // Note that not calling `layout_of` here does have one real consequence:
+ // if the type is too big, we'll only notice this when the local is actually initialized,
+ // which is a bit too late -- we should ideally notice this alreayd here, when the memory
+ // is conceptually allocated. But given how rare that error is and that this is a hot function,
+ // we accept this downside for now.
+ Operand::Immediate(Immediate::Uninit)
+ });
+
// StorageLive expects the local to be dead, and marks it live.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
if !matches!(old, LocalValue::Dead) {
@@ -895,6 +1022,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
+ /// Mark a storage as live, killing the previous content.
+ #[inline(always)]
+ pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+ self.storage_live_dyn(local, MemPlaceMeta::None)
+ }
+
pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
@@ -926,28 +1059,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
span: Option<Span>,
query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
- ) -> InterpResult<'tcx, T> {
+ ) -> Result<T, ErrorHandled> {
// Use a precise span for better cycle errors.
query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
- match err {
- ErrorHandled::Reported(err) => {
- if !err.is_tainted_by_errors() && let Some(span) = span {
- // To make it easier to figure out where this error comes from, also add a note at the current location.
- self.tcx.sess.emit_note(ErroneousConstUsed { span });
- }
- err_inval!(AlreadyReported(err))
- }
- ErrorHandled::TooGeneric => err_inval!(TooGeneric),
- }
- .into()
+ err.emit_note(*self.tcx);
+ err
})
}
pub fn eval_global(
&self,
- gid: GlobalId<'tcx>,
- span: Option<Span>,
+ instance: ty::Instance<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let gid = GlobalId { instance, promoted: None };
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
// and thus don't care about the parameter environment. While we could just use
// `self.param_env`, that would mean we invoke the query to evaluate the static
@@ -958,13 +1082,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} else {
self.param_env
};
- let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
+ let val = self.ctfe_query(None, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
+ pub fn eval_mir_constant(
+ &self,
+ val: &mir::Const<'tcx>,
+ span: Option<Span>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let const_val = self.ctfe_query(span, |tcx| val.eval(*tcx, self.param_env, span))?;
+ self.const_val_to_op(const_val, val.ty(), layout)
+ }
+
#[must_use]
- pub fn dump_place(&self, place: Place<M::Provenance>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
- PlacePrinter { ecx: self, place }
+ pub fn dump_place(
+ &self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+ PlacePrinter { ecx: self, place: *place.place() }
}
#[must_use]
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 910c3ca5d..8c0009cfd 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -24,7 +24,7 @@ use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
use rustc_ast::Mutability;
use super::{
- AllocId, Allocation, ConstAllocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy,
+ AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, Projectable,
ValueVisitor,
};
use crate::const_eval;
@@ -177,7 +177,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
if let ty::Dynamic(_, _, ty::Dyn) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{
- let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
+ let ptr = mplace.meta().unwrap_meta().to_pointer(&tcx)?;
if let Some(alloc_id) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable.
@@ -191,7 +191,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
// Check if we have encountered this pointer+layout combination before.
// Only recurse for allocation-backed pointers.
- if let Some(alloc_id) = mplace.ptr.provenance {
+ if let Some(alloc_id) = mplace.ptr().provenance {
// Compute the mode with which we intern this. Our goal here is to make as many
// statics as we can immutable so they can be placed in read-only memory by LLVM.
let ref_mode = match self.mode {
@@ -267,7 +267,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
- if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size, align)? {
if !alloc.has_provenance() {
return Ok(false);
}
@@ -353,7 +353,7 @@ pub fn intern_const_alloc_recursive<
leftover_allocations,
// The outermost allocation must exist, because we allocated it with
// `Memory::allocate`.
- ret.ptr.provenance.unwrap(),
+ ret.ptr().provenance.unwrap(),
base_intern_mode,
Some(ret.layout.ty),
);
@@ -378,7 +378,8 @@ pub fn intern_const_alloc_recursive<
ecx.tcx.sess.delay_span_bug(
ecx.tcx.span,
format!(
- "error during interning should later cause validation failure: {error:?}"
+ "error during interning should later cause validation failure: {}",
+ ecx.format_error(error),
),
);
}
@@ -454,7 +455,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
{
/// A helper function that allocates memory for the layout given and gives you access to mutate
/// it. Once your own mutation code is done, the backing `Allocation` is removed from the
- /// current `Memory` and returned.
+ /// current `Memory` and interned as read-only into the global memory.
pub fn intern_with_temp_alloc(
&mut self,
layout: TyAndLayout<'tcx>,
@@ -462,11 +463,15 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
&mut InterpCx<'mir, 'tcx, M>,
&PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ()>,
- ) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
+ ) -> InterpResult<'tcx, AllocId> {
+ // `allocate` picks a fresh AllocId that we will associate with its data below.
let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest.clone().into())?;
- let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
+ let mut alloc = self.memory.alloc_map.remove(&dest.ptr().provenance.unwrap()).unwrap().1;
alloc.mutability = Mutability::Not;
- Ok(self.tcx.mk_const_alloc(alloc))
+ let alloc = self.tcx.mk_const_alloc(alloc);
+ let alloc_id = dest.ptr().provenance.unwrap(); // this was just allocated, it must have provenance
+ self.tcx.set_alloc_id_memory(alloc_id, alloc);
+ Ok(alloc_id)
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index f22cd919c..2c0ba9b26 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -5,10 +5,8 @@
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
- interpret::{
- Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
- },
- BinOp, NonDivergingIntrinsic,
+ interpret::{Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar},
+ BinOp, ConstValue, NonDivergingIntrinsic,
};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
@@ -64,7 +62,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
sym::type_name => {
ensure_monomorphic_enough(tcx, tp_ty)?;
let alloc = alloc_type_name(tcx, tp_ty);
- ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
+ ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
}
sym::needs_drop => {
ensure_monomorphic_enough(tcx, tp_ty)?;
@@ -102,8 +100,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
| ty::Dynamic(_, _, _)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(_, _)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
@@ -125,15 +122,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, bool> {
let instance_args = instance.args;
let intrinsic_name = self.tcx.item_name(instance.def_id());
-
- // First handle intrinsics without return place.
- let ret = match ret {
- None => match intrinsic_name {
- sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
- // Unsupported diverging intrinsic.
- _ => return Ok(false),
- },
- Some(p) => p,
+ let Some(ret) = ret else {
+ // We don't support any intrinsic without return place.
+ return Ok(false);
};
match intrinsic_name {
@@ -228,7 +219,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let place = self.deref_pointer(&args[0])?;
let variant = self.read_discriminant(&place)?;
let discr = self.discriminant_for_variant(place.layout, variant)?;
- self.write_scalar(discr, dest)?;
+ self.write_immediate(*discr, dest)?;
}
sym::exact_div => {
let l = self.read_immediate(&args[0])?;
@@ -315,7 +306,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let dist = {
// Addresses are unsigned, so this is a `usize` computation. We have to do the
// overflow check separately anyway.
- let (val, overflowed, _ty) = {
+ let (val, overflowed) = {
let a_offset = ImmTy::from_uint(a_offset, usize_layout);
let b_offset = ImmTy::from_uint(b_offset, usize_layout);
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
@@ -332,7 +323,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart.
- let dist = val.to_target_isize(self)?;
+ let dist = val.to_scalar().to_target_isize(self)?;
if dist >= 0 {
throw_ub_custom!(
fluent::const_eval_offset_from_underflow,
@@ -342,7 +333,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dist
} else {
// b >= a
- let dist = val.to_target_isize(self)?;
+ let dist = val.to_scalar().to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
@@ -410,7 +401,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
};
- M::abort(self, msg)?;
+ M::panic_nounwind(self, &msg)?;
+ // Skip the `go_to_block` at the end.
+ return Ok(true);
}
}
sym::simd_insert => {
@@ -470,7 +463,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => return Ok(false),
}
- trace!("{:?}", self.dump_place(**dest));
+ trace!("{:?}", self.dump_place(dest));
self.go_to_block(ret);
Ok(true)
}
@@ -510,9 +503,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
- let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
+ let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow.
- if res.assert_bits(a.layout.size) != 0 {
+ if res.to_scalar().assert_bits(a.layout.size) != 0 {
throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"),
@@ -530,7 +523,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
- let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
+ let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let size = l.layout.size;
let num_bits = size.bits();
@@ -562,7 +555,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
} else {
- val
+ val.to_scalar()
})
}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index e101785b6..aaa674a59 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -9,7 +9,7 @@ use std::hash::Hash;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi;
@@ -18,7 +18,7 @@ use crate::const_eval::CheckAlignment;
use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
- InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar,
+ InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
};
/// Data returned by Machine::stack_pop,
@@ -130,6 +130,9 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Should the machine panic on allocation failures?
const PANIC_ON_ALLOC_FAIL: bool;
+ /// Should post-monomorphization checks be run when a stack frame is pushed?
+ const POST_MONO_CHECKS: bool = true;
+
/// Whether memory accesses should be alignment-checked.
fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment;
@@ -218,10 +221,14 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
unwind: mir::UnwindAction,
) -> InterpResult<'tcx>;
- /// Called to abort evaluation.
- fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
- throw_unsup_format!("aborting execution is not supported")
- }
+ /// Called to trigger a non-unwinding panic.
+ fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx>;
+
+ /// Called when unwinding reached a state where execution should be terminated.
+ fn unwind_terminate(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ reason: mir::UnwindTerminateReason,
+ ) -> InterpResult<'tcx>;
/// Called for all binary operations where the LHS has pointer type.
///
@@ -231,24 +238,24 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
- /// Called to write the specified `local` from the `frame`.
+ /// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked
/// for ZST reads.
///
/// Due to borrow checker trouble, we indicate the `frame` as an index rather than an `&mut
/// Frame`.
- #[inline]
- fn access_local_mut<'a>(
- ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
- frame: usize,
- local: mir::Local,
- ) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>
+ #[inline(always)]
+ fn before_access_local_mut<'a>(
+ _ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ _frame: usize,
+ _local: mir::Local,
+ ) -> InterpResult<'tcx>
where
'tcx: 'mir,
{
- ecx.stack_mut()[frame].locals[local].access_mut()
+ Ok(())
}
/// Called before a basic block terminator is executed.
@@ -461,6 +468,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
/// The `locals` have already been destroyed!
+ #[inline(always)]
fn after_stack_pop(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
@@ -470,6 +478,18 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
assert!(!unwinding);
Ok(StackPopJump::Normal)
}
+
+ /// Called immediately after actual memory was allocated for a local
+ /// but before the local's stack frame is updated to point to that memory.
+ #[inline(always)]
+ fn after_local_allocated(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _frame: usize,
+ _local: mir::Local,
+ _mplace: &MPlaceTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
}
/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
@@ -500,6 +520,14 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
}
#[inline(always)]
+ fn unwind_terminate(
+ _ecx: &mut InterpCx<$mir, $tcx, Self>,
+ _reason: mir::UnwindTerminateReason,
+ ) -> InterpResult<$tcx> {
+ unreachable!("unwinding cannot happen during compile-time evaluation")
+ }
+
+ #[inline(always)]
fn call_extra_fn(
_ecx: &mut InterpCx<$mir, $tcx, Self>,
fn_val: !,
@@ -527,7 +555,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
def_id: DefId,
) -> InterpResult<$tcx, Pointer> {
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
- Ok(Pointer::new(ecx.tcx.create_static_alloc(def_id), Size::ZERO))
+ Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id), Size::ZERO))
}
#[inline(always)]
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 11bffedf5..436c4d521 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -176,12 +176,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
M::adjust_alloc_base_pointer(self, ptr)
}
- pub fn create_fn_alloc_ptr(
- &mut self,
- fn_val: FnVal<'tcx, M::ExtraFnVal>,
- ) -> Pointer<M::Provenance> {
+ pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
let id = match fn_val {
- FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+ FnVal::Instance(instance) => self.tcx.reserve_and_set_fn_alloc(instance),
FnVal::Other(extra) => {
// FIXME(RalfJung): Should we have a cache here?
let id = self.tcx.reserve_alloc_id();
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index b0b553c45..69eb22028 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -20,16 +20,21 @@ mod visitor;
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
-pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup};
+pub use self::eval_context::{Frame, FrameInfo, InterpCx, StackPopCleanup};
pub use self::intern::{intern_const_alloc_recursive, InternKind};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
-pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
-pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
+pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
+pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
pub use self::projection::Projectable;
pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking};
pub use self::visitor::ValueVisitor;
+use self::{
+ operand::Operand,
+ place::{MemPlace, Place},
+};
+
pub(crate) use self::intrinsics::eval_nullary_intrinsic;
use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 6e57a56b4..a32ea204f 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -8,15 +8,13 @@ use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
-use rustc_middle::ty::{ConstInt, Ty, ValTree};
+use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty};
-use rustc_span::Span;
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
- alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
- InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
- Projectable, Provenance, Scalar,
+ alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult,
+ MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable, Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -33,7 +31,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`).
ScalarPair(Scalar<Prov>, Scalar<Prov>),
- /// A value of fully uninitialized memory. Can have arbitrary size and layout.
+ /// A value of fully uninitialized memory. Can have arbitrary size and layout, but must be sized.
Uninit,
}
@@ -45,24 +43,30 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
}
impl<Prov: Provenance> Immediate<Prov> {
- pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_pointer(p, cx))
+ pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(Scalar::from_pointer(ptr, cx))
}
- pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_maybe_pointer(p, cx))
+ pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(Scalar::from_maybe_pointer(ptr, cx))
}
- pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
- Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
+ pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
+ Immediate::ScalarPair(
+ Scalar::from_maybe_pointer(ptr, cx),
+ Scalar::from_target_usize(len, cx),
+ )
}
pub fn new_dyn_trait(
- val: Scalar<Prov>,
+ val: Pointer<Option<Prov>>,
vtable: Pointer<Option<Prov>>,
cx: &impl HasDataLayout,
) -> Self {
- Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx))
+ Immediate::ScalarPair(
+ Scalar::from_maybe_pointer(val, cx),
+ Scalar::from_maybe_pointer(vtable, cx),
+ )
}
#[inline]
@@ -88,7 +92,7 @@ impl<Prov: Provenance> Immediate<Prov> {
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
// as input for binary and cast operations.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
imm: Immediate<Prov>,
pub layout: TyAndLayout<'tcx>,
@@ -134,6 +138,16 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
}
}
+impl<Prov: Provenance> std::fmt::Debug for ImmTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("ImmTy")
+ .field("imm", &self.imm)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
type Target = Immediate<Prov>;
#[inline(always)]
@@ -142,64 +156,30 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
}
}
-/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
-/// or still in memory. The latter is an optimization, to delay reading that chunk of
-/// memory and to avoid having to store arbitrary-sized data here.
-#[derive(Copy, Clone, Debug)]
-pub enum Operand<Prov: Provenance = AllocId> {
- Immediate(Immediate<Prov>),
- Indirect(MemPlace<Prov>),
-}
-
-#[derive(Clone, Debug)]
-pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
- op: Operand<Prov>, // Keep this private; it helps enforce invariants.
- pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for an `OpTy`!
- /// `None` means "alignment does not matter since this is a by-value operand"
- /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
- /// Also CTFE ignores alignment anyway, so this is for Miri only.
- pub align: Option<Align>,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
- type Target = Operand<Prov>;
- #[inline(always)]
- fn deref(&self) -> &Operand<Prov> {
- &self.op
- }
-}
-
-impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
- }
-}
-
-impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(val: ImmTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
- }
-}
-
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
ImmTy { imm: val.into(), layout }
}
- #[inline]
+ #[inline(always)]
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(
+ match (imm, layout.abi) {
+ (Immediate::Scalar(..), Abi::Scalar(..)) => true,
+ (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ (Immediate::Uninit, _) if layout.is_sized() => true,
+ _ => false,
+ },
+ "immediate {imm:?} does not fit to layout {layout:?}",
+ );
ImmTy { imm, layout }
}
#[inline]
pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(layout.is_sized(), "immediates must be sized");
ImmTy { imm: Immediate::Uninit, layout }
}
@@ -223,6 +203,12 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
}
#[inline]
+ pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
+ let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
+ Self::from_scalar(Scalar::from_bool(b), layout)
+ }
+
+ #[inline]
pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral());
let int = self.to_scalar().assert_int();
@@ -239,6 +225,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
// if the entire value is uninit, then so is the field (can happen in ConstProp)
(Immediate::Uninit, _) => Immediate::Uninit,
// the field contains no information, can be left uninit
+ // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
_ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data
@@ -290,23 +277,21 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
- Ok(MemPlaceMeta::None)
+ #[inline(always)]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ debug_assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+ MemPlaceMeta::None
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
- Ok(self.offset_(offset, layout, cx))
+ Ok(self.offset_(offset, layout, ecx))
}
fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
@@ -317,49 +302,95 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
}
}
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Operand<Prov: Provenance = AllocId> {
+ Immediate(Immediate<Prov>),
+ Indirect(MemPlace<Prov>),
+}
+
+#[derive(Clone)]
+pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
+ op: Operand<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for an `OpTy`!
+ /// `None` means "alignment does not matter since this is a by-value operand"
+ /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
+ /// Also CTFE ignores alignment anyway, so this is for Miri only.
+ pub align: Option<Align>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("OpTy")
+ .field("op", &self.op)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(val: ImmTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy {
+ op: Operand::Indirect(*mplace.mplace()),
+ layout: mplace.layout,
+ align: Some(mplace.align),
+ }
+ }
+}
+
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
- // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
- pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
- Ok(if self.layout.is_unsized() {
- if matches!(self.op, Operand::Immediate(_)) {
- // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
- // However, ConstProp doesn't do that, so we can run into this nonsense situation.
- throw_inval!(ConstPropNonsense);
- }
- // There are no unsized immediates.
- self.assert_mem_place().meta
- } else {
- MemPlaceMeta::None
- })
+ #[inline(always)]
+ pub(super) fn op(&self) -> &Operand<Prov> {
+ &self.op
}
}
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- self.meta()
+ #[inline]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ match self.as_mplace_or_imm() {
+ Left(mplace) => mplace.meta(),
+ Right(_) => {
+ debug_assert!(self.layout.is_sized(), "unsized immediates are not a thing");
+ MemPlaceMeta::None
+ }
+ }
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
match self.as_mplace_or_imm() {
- Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+ Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()),
Right(imm) => {
- assert!(!meta.has_meta()); // no place to store metadata here
+ debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
+ assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
// Every part of an uninit is uninit.
- Ok(imm.offset(offset, layout, cx)?.into())
+ Ok(imm.offset_(offset, layout, ecx).into())
}
}
}
@@ -372,18 +403,19 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Pr
}
}
+/// The `Readable` trait describes interpreter values that one can read from.
pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
}
-impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
self.as_mplace_or_imm()
}
}
-impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
Left(self.clone())
@@ -430,7 +462,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(Size::ZERO, size),
/*read_provenance*/ matches!(s, abi::Pointer(_)),
)?;
- Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
+ Some(ImmTy::from_scalar(scalar, mplace.layout))
}
Abi::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
@@ -450,7 +482,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(b_offset, b_size),
/*read_provenance*/ matches!(b, abi::Pointer(_)),
)?;
- Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
+ Some(ImmTy::from_immediate(Immediate::ScalarPair(a_val, b_val), mplace.layout))
}
_ => {
// Neither a scalar nor scalar pair.
@@ -496,11 +528,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
) {
- span_bug!(
- self.cur_span(),
- "primitive read not possible for type: {:?}",
- op.layout().ty
- );
+ span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
}
let imm = self.read_immediate_raw(op)?.right().unwrap();
if matches!(*imm, Immediate::Uninit) {
@@ -545,7 +573,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
- let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len))?;
+ let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
Ok(str)
}
@@ -587,6 +615,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(frame, local, layout)?;
let op = *frame.locals[local].access()?;
+ if matches!(op, Operand::Immediate(_)) {
+ if layout.is_unsized() {
+ // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
+ // efficiently check whether they are sized. We have to catch that case here.
+ throw_inval!(ConstPropNonsense);
+ }
+ }
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
}
@@ -600,16 +635,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match place.as_mplace_or_local() {
Left(mplace) => Ok(mplace.into()),
Right((frame, local, offset)) => {
+ debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
let base = self.local_to_op(&self.stack()[frame], local, None)?;
- let mut field = if let Some(offset) = offset {
- // This got offset. We can be sure that the field is sized.
- base.offset(offset, place.layout, self)?
- } else {
- assert_eq!(place.layout, base.layout);
- // Unsized cases are possible here since an unsized local will be a
- // `Place::Local` until the first projection calls `place_to_op` to extract the
- // underlying mplace.
- base
+ let mut field = match offset {
+ Some(offset) => base.offset(offset, place.layout, self)?,
+ None => {
+ // In the common case this hasn't been projected.
+ debug_assert_eq!(place.layout, base.layout);
+ base
+ }
};
field.align = Some(place.align);
Ok(field)
@@ -634,7 +668,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
op = self.project(&op, elem)?
}
- trace!("eval_place_to_op: got {:?}", *op);
+ trace!("eval_place_to_op: got {:?}", op);
// Sanity-check the type we ended up with.
debug_assert!(
mir_assign_valid_types(
@@ -645,7 +679,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)?)?,
op.layout,
),
- "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
+ "eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
op.layout.ty,
);
@@ -668,7 +702,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Constant(constant) => {
let c =
- self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
+ self.subst_from_current_frame_and_normalize_erasing_regions(constant.const_)?;
// This can still fail:
// * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
@@ -677,61 +711,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.eval_mir_constant(&c, Some(constant.span), layout)?
}
};
- trace!("{:?}: {:?}", mir_op, *op);
+ trace!("{:?}: {:?}", mir_op, op);
Ok(op)
}
- fn eval_ty_constant(
- &self,
- val: ty::Const<'tcx>,
- span: Option<Span>,
- ) -> InterpResult<'tcx, ValTree<'tcx>> {
- Ok(match val.kind() {
- ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
- throw_inval!(TooGeneric)
- }
- // FIXME(generic_const_exprs): `ConstKind::Expr` should be able to be evaluated
- ty::ConstKind::Expr(_) => throw_inval!(TooGeneric),
- ty::ConstKind::Error(reported) => {
- throw_inval!(AlreadyReported(reported.into()))
- }
- ty::ConstKind::Unevaluated(uv) => {
- let instance = self.resolve(uv.def, uv.args)?;
- let cid = GlobalId { instance, promoted: None };
- self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
- .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
- }
- ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
- span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
- }
- ty::ConstKind::Value(valtree) => valtree,
- })
- }
-
- pub fn eval_mir_constant(
- &self,
- val: &mir::ConstantKind<'tcx>,
- span: Option<Span>,
- layout: Option<TyAndLayout<'tcx>>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- match *val {
- mir::ConstantKind::Ty(ct) => {
- let ty = ct.ty();
- let valtree = self.eval_ty_constant(ct, span)?;
- let const_val = self.tcx.valtree_to_const_val((ty, valtree));
- self.const_val_to_op(const_val, ty, layout)
- }
- mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
- mir::ConstantKind::Unevaluated(uv, _) => {
- let instance = self.resolve(uv.def, uv.args)?;
- Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
- }
- }
- }
-
pub(crate) fn const_val_to_op(
&self,
- val_val: ConstValue<'tcx>,
+ val_val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
@@ -744,25 +730,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
let op = match val_val {
- ConstValue::ByRef { alloc, offset } => {
- let id = self.tcx.create_memory_alloc(alloc);
+ mir::ConstValue::Indirect { alloc_id, offset } => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen.
- let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+ let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?;
Operand::Indirect(MemPlace::from_ptr(ptr.into()))
}
- ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
- ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
- ConstValue::Slice { data, start, end } => {
+ mir::ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
+ mir::ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+ mir::ConstValue::Slice { data, meta } => {
// We rely on mutability being set correctly in `data` to prevent writes
// where none should happen.
- let ptr = Pointer::new(
- self.tcx.create_memory_alloc(data),
- Size::from_bytes(start), // offset: `start`
- );
+ let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO);
Operand::Immediate(Immediate::new_slice(
- Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
- u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+ self.global_base_pointer(ptr)?.into(),
+ meta,
self,
))
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index eb0645780..b084864f3 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,7 +1,7 @@
use rustc_apfloat::Float;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
@@ -20,9 +20,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
+ let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
- Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
+ Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {op:?}",
);
@@ -30,7 +30,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do
// `force_allocation`).
- let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed));
+ let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
self.write_immediate(pair, dest)?;
} else {
assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
@@ -38,7 +38,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
let val_field = self.project_field(dest, 0)?;
- self.write_scalar(val, &val_field)?;
+ self.write_scalar(val.to_scalar(), &val_field)?;
let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
@@ -54,9 +54,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
- assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
- self.write_scalar(val, dest)
+ let val = self.wrapping_binary_op(op, left, right)?;
+ assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
+ self.write_immediate(*val, dest)
}
}
@@ -66,7 +66,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
l: char,
r: char,
- ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@@ -78,7 +78,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ge => l >= r,
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
};
- (Scalar::from_bool(res), false, self.tcx.types.bool)
+ (ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_bool_op(
@@ -86,7 +86,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
l: bool,
r: bool,
- ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@@ -101,33 +101,33 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitXor => l ^ r,
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
};
- (Scalar::from_bool(res), false, self.tcx.types.bool)
+ (ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
- ty: Ty<'tcx>,
+ layout: TyAndLayout<'tcx>,
l: F,
r: F,
- ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
- let (val, ty) = match bin_op {
- Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
- Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
- Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
- Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
- Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
- Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
- Add => ((l + r).value.into(), ty),
- Sub => ((l - r).value.into(), ty),
- Mul => ((l * r).value.into(), ty),
- Div => ((l / r).value.into(), ty),
- Rem => ((l % r).value.into(), ty),
+ let val = match bin_op {
+ Eq => ImmTy::from_bool(l == r, *self.tcx),
+ Ne => ImmTy::from_bool(l != r, *self.tcx),
+ Lt => ImmTy::from_bool(l < r, *self.tcx),
+ Le => ImmTy::from_bool(l <= r, *self.tcx),
+ Gt => ImmTy::from_bool(l > r, *self.tcx),
+ Ge => ImmTy::from_bool(l >= r, *self.tcx),
+ Add => ImmTy::from_scalar((l + r).value.into(), layout),
+ Sub => ImmTy::from_scalar((l - r).value.into(), layout),
+ Mul => ImmTy::from_scalar((l * r).value.into(), layout),
+ Div => ImmTy::from_scalar((l / r).value.into(), layout),
+ Rem => ImmTy::from_scalar((l % r).value.into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
- (val, false, ty)
+ (val, false)
}
fn binary_int_op(
@@ -138,7 +138,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
left_layout: TyAndLayout<'tcx>,
r: u128,
right_layout: TyAndLayout<'tcx>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
let throw_ub_on_overflow = match bin_op {
@@ -200,19 +200,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
);
}
- return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+ return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
// For the remaining ops, the types must be the same on both sides
if left_layout.ty != right_layout.ty {
span_bug!(
self.cur_span(),
- "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
- bin_op,
- l,
- left_layout.ty,
- r,
- right_layout.ty,
+ "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
+ l_ty = left_layout.ty,
+ r_ty = right_layout.ty,
)
}
@@ -230,7 +227,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Some(op) = op {
let l = self.sign_extend(l, left_layout) as i128;
let r = self.sign_extend(r, right_layout) as i128;
- return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+ return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
}
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
Div if r == 0 => throw_ub!(DivisionByZero),
@@ -267,22 +264,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
- return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
+ return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
}
- let (val, ty) = match bin_op {
- Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
- Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+ let val = match bin_op {
+ Eq => ImmTy::from_bool(l == r, *self.tcx),
+ Ne => ImmTy::from_bool(l != r, *self.tcx),
- Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
- Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
- Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
- Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+ Lt => ImmTy::from_bool(l < r, *self.tcx),
+ Le => ImmTy::from_bool(l <= r, *self.tcx),
+ Gt => ImmTy::from_bool(l > r, *self.tcx),
+ Ge => ImmTy::from_bool(l >= r, *self.tcx),
- BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
- BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
- BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+ BitOr => ImmTy::from_uint(l | r, left_layout),
+ BitAnd => ImmTy::from_uint(l & r, left_layout),
+ BitXor => ImmTy::from_uint(l ^ r, left_layout),
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
assert!(!left_layout.abi.is_signed());
@@ -304,12 +301,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
- return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
+ return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
_ => span_bug!(
self.cur_span(),
- "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+ "invalid binary op {:?}: {:?}, {:?} (both {})",
bin_op,
l,
r,
@@ -317,7 +314,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
),
};
- Ok((val, false, ty))
+ Ok((val, false))
}
fn binary_ptr_op(
@@ -325,7 +322,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
match bin_op {
@@ -336,7 +333,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
- Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty))
+ Ok((
+ ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
+ false,
+ ))
}
// Fall back to machine hook so Miri can support more pointer ops.
@@ -344,16 +344,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Returns the result of the specified operation, whether it overflowed, and
- /// the result type.
+ /// Returns the result of the specified operation, and whether it overflowed.
pub fn overflowing_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
trace!(
- "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+ "Running binary op {:?}: {:?} ({}), {:?} ({})",
bin_op,
*left,
left.layout.ty,
@@ -376,15 +375,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
- let ty = left.layout.ty;
+ let layout = left.layout;
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
FloatTy::F32 => {
- self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
+ self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
}
FloatTy::F64 => {
- self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
+ self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
}
})
}
@@ -392,7 +391,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the RHS type can be different, e.g. for shifts -- but it has to be integral, too
assert!(
right.layout.ty.is_integral(),
- "Unexpected types for BinOp: {:?} {:?} {:?}",
+ "Unexpected types for BinOp: {} {:?} {}",
left.layout.ty,
bin_op,
right.layout.ty
@@ -407,7 +406,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// (Even when both sides are pointers, their type might differ, see issue #91636)
assert!(
right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
- "Unexpected types for BinOp: {:?} {:?} {:?}",
+ "Unexpected types for BinOp: {} {:?} {}",
left.layout.ty,
bin_op,
right.layout.ty
@@ -417,22 +416,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(
self.cur_span(),
- "Invalid MIR: bad LHS type for binop: {:?}",
+ "Invalid MIR: bad LHS type for binop: {}",
left.layout.ty
),
}
}
- /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
#[inline]
- pub fn binary_op(
+ pub fn wrapping_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
- Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
+ Ok(val)
}
/// Returns the result of the specified operation, whether it overflowed, and
@@ -441,12 +439,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
let val = val.to_scalar();
- trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
+ trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
match layout.ty.kind() {
ty::Bool => {
@@ -455,7 +453,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
};
- Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
+ Ok((ImmTy::from_bool(res, *self.tcx), false))
}
ty::Float(fty) => {
let res = match (un_op, fty) {
@@ -463,7 +461,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
};
- Ok((res, false, layout.ty))
+ Ok((ImmTy::from_scalar(res, layout), false))
}
_ => {
assert!(layout.ty.is_integral());
@@ -482,17 +480,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(truncated, overflow || self.sign_extend(truncated, layout) != res)
}
};
- Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
+ Ok((ImmTy::from_uint(res, layout), overflow))
}
}
}
- pub fn unary_op(
+ #[inline]
+ pub fn wrapping_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
- Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
+ Ok(val)
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index daadb7589..503004cbb 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -9,16 +9,15 @@ use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_index::IndexSlice;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
-use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
+use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{
- alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
- ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
- Pointer, Projectable, Provenance, Readable, Scalar,
+ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, ImmTy,
+ Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, Pointer,
+ PointerArithmetic, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -41,37 +40,17 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> {
}
}
+ #[inline(always)]
pub fn has_meta(self) -> bool {
match self {
Self::Meta(_) => true,
Self::None => false,
}
}
-
- pub(crate) fn len<'tcx>(
- &self,
- layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
- ) -> InterpResult<'tcx, u64> {
- if layout.is_unsized() {
- // We need to consult `meta` metadata
- match layout.ty.kind() {
- ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx),
- _ => bug!("len not supported on unsized type {:?}", layout.ty),
- }
- } else {
- // Go through the layout. There are lots of types that support a length,
- // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
- match layout.fields {
- abi::FieldsShape::Array { count, .. } => Ok(count),
- _ => bug!("len not supported on sized type {:?}", layout.ty),
- }
- }
- }
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
-pub struct MemPlace<Prov: Provenance = AllocId> {
+pub(super) struct MemPlace<Prov: Provenance = AllocId> {
/// The pointer can be a pure integer, with the `None` provenance.
pub ptr: Pointer<Option<Prov>>,
/// Metadata for unsized places. Interpretation is up to the type.
@@ -80,66 +59,6 @@ pub struct MemPlace<Prov: Provenance = AllocId> {
pub meta: MemPlaceMeta<Prov>,
}
-/// A MemPlace with its layout. Constructing it is only possible in this module.
-#[derive(Clone, Hash, Eq, PartialEq, Debug)]
-pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
- mplace: MemPlace<Prov>,
- pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `MPlaceTy`!
- pub align: Align,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
- type Target = MemPlace<Prov>;
- #[inline(always)]
- fn deref(&self) -> &MemPlace<Prov> {
- &self.mplace
- }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum Place<Prov: Provenance = AllocId> {
- /// A place referring to a value allocated in the `Memory` system.
- Ptr(MemPlace<Prov>),
-
- /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
- /// where in the local this place is located; if it is `None`, no projection has been applied.
- /// Such projections are meaningful even if the offset is 0, since they can change layouts.
- /// (Without that optimization, we'd just always be a `MemPlace`.)
- /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
- /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
- Local { frame: usize, local: mir::Local, offset: Option<Size> },
-}
-
-#[derive(Clone, Debug)]
-pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
- place: Place<Prov>, // Keep this private; it helps enforce invariants.
- pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `PlaceTy`!
- pub align: Align,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
- type Target = Place<Prov>;
- #[inline(always)]
- fn deref(&self) -> &Place<Prov> {
- &self.place
- }
-}
-
-impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(*mplace), layout: mplace.layout, align: mplace.align }
- }
-}
-
impl<Prov: Provenance> MemPlace<Prov> {
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
@@ -157,7 +76,7 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
- #[inline(always)]
+ #[inline]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
match self.meta {
MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
@@ -183,6 +102,28 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
}
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Clone, Hash, Eq, PartialEq)]
+pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
+ mplace: MemPlace<Prov>,
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `MPlaceTy`!
+ pub align: Align,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("MPlaceTy")
+ .field("mplace", &self.mplace)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
/// Produces a MemPlace that works for ZST but nothing else.
/// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
@@ -212,30 +153,48 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
align: layout.align.abi,
}
}
+
+ /// Adjust the provenance of the main pointer (metadata is unaffected).
+ pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
+ MPlaceTy { mplace: self.mplace.map_provenance(f), ..self }
+ }
+
+ #[inline(always)]
+ pub(super) fn mplace(&self) -> &MemPlace<Prov> {
+ &self.mplace
+ }
+
+ #[inline(always)]
+ pub fn ptr(&self) -> Pointer<Option<Prov>> {
+ self.mplace.ptr
+ }
+
+ #[inline(always)]
+ pub fn to_ref(&self, cx: &impl HasDataLayout) -> Immediate<Prov> {
+ self.mplace.to_ref(cx)
+ }
}
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- Ok(self.meta)
+ #[inline(always)]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ self.mplace.meta
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
- mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
+ mplace: self.mplace.offset_with_meta_(offset, meta, ecx)?,
align: self.align.restrict_for_offset(offset),
layout,
})
@@ -249,31 +208,109 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx
}
}
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Place<Prov: Provenance = AllocId> {
+ /// A place referring to a value allocated in the `Memory` system.
+ Ptr(MemPlace<Prov>),
+
+ /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
+ /// where in the local this place is located; if it is `None`, no projection has been applied.
+ /// Such projections are meaningful even if the offset is 0, since they can change layouts.
+ /// (Without that optimization, we'd just always be a `MemPlace`.)
+ /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
+ /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
+ ///
+ /// This variant shall not be used for unsized types -- those must always live in memory.
+ Local { frame: usize, local: mir::Local, offset: Option<Size> },
+}
+
+#[derive(Clone)]
+pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
+ place: Place<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `PlaceTy`!
+ pub align: Align,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("PlaceTy")
+ .field("place", &self.place)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ pub(super) fn place(&self) -> &Place<Prov> {
+ &self.place
+ }
+
+ /// A place is either an mplace or some local.
+ #[inline(always)]
+ pub fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
+ match self.place {
+ Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Local { frame, local, offset } => Right((frame, local, offset)),
+ }
+ }
+
+ #[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
+ self.as_mplace_or_local().left().unwrap_or_else(|| {
+ bug!(
+ "PlaceTy of type {} was a local when it was expected to be an MPlace",
+ self.layout.ty
+ )
+ })
+ }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- ecx.place_meta(self)
+ #[inline]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ match self.as_mplace_or_local() {
+ Left(mplace) => mplace.meta(),
+ Right(_) => {
+ debug_assert!(self.layout.is_sized(), "unsized locals should live in memory");
+ MemPlaceMeta::None
+ }
+ }
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(match self.as_mplace_or_local() {
- Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(),
+ Left(mplace) => mplace.offset_with_meta(offset, meta, layout, ecx)?.into(),
Right((frame, local, old_offset)) => {
+ debug_assert!(layout.is_sized(), "unsized locals should live in memory");
assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
- let new_offset = cx
+ let new_offset = ecx
.data_layout()
.offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
PlaceTy {
@@ -301,11 +338,11 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx,
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
- match **self {
+ match self.op() {
Operand::Indirect(mplace) => {
- Left(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
+ Left(MPlaceTy { mplace: *mplace, layout: self.layout, align: self.align.unwrap() })
}
- Operand::Immediate(imm) => Right(ImmTy::from_immediate(imm, self.layout)),
+ Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
}
}
@@ -321,30 +358,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
- /// A place is either an mplace or some local.
- #[inline]
- pub fn as_mplace_or_local(
- &self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
- match **self {
- Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
- Place::Local { frame, local, offset } => Right((frame, local, offset)),
- }
- }
-
- #[inline(always)]
- #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
- self.as_mplace_or_local().left().unwrap_or_else(|| {
- bug!(
- "PlaceTy of type {} was a local when it was expected to be an MPlace",
- self.layout.ty
- )
- })
- }
-}
-
+/// The `Weiteable` trait describes interpreter values that can be written to.
pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
fn as_mplace_or_local(
&self,
@@ -356,7 +370,7 @@ pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
}
-impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
@@ -375,7 +389,7 @@ impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, P
}
}
-impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
@@ -396,23 +410,9 @@ impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx,
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
- Prov: Provenance + 'static,
+ Prov: Provenance,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
- /// Get the metadata of the given place.
- pub(super) fn place_meta(
- &self,
- place: &PlaceTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- if place.layout.is_unsized() {
- // For `Place::Local`, the metadata is stored with the local, not the place. So we have
- // to look that up first.
- self.place_to_op(place)?.meta()
- } else {
- Ok(MemPlaceMeta::None)
- }
- }
-
/// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
///
@@ -444,7 +444,7 @@ where
&self,
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let imm = mplace.to_ref(self);
+ let imm = mplace.mplace.to_ref(self);
let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
Ok(ImmTy::from_immediate(imm, layout))
}
@@ -460,7 +460,7 @@ where
trace!("deref to {} on {:?}", val.layout.ty, *val);
if val.layout.ty.is_box() {
- bug!("dereferencing {:?}", val.layout.ty);
+ bug!("dereferencing {}", val.layout.ty);
}
let mplace = self.ref_to_mplace(&val)?;
@@ -478,7 +478,7 @@ where
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
// Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc(mplace.ptr, size, mplace.align)
+ self.get_ptr_alloc(mplace.ptr(), size, mplace.align)
}
#[inline]
@@ -491,7 +491,7 @@ where
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
// Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc_mut(mplace.ptr, size, mplace.align)
+ self.get_ptr_alloc_mut(mplace.ptr(), size, mplace.align)
}
/// Check if this mplace is dereferenceable and sufficiently aligned.
@@ -502,7 +502,7 @@ where
// Due to packed places, only `mplace.align` matters.
let align =
if M::enforce_alignment(self).should_check() { mplace.align } else { Align::ONE };
- self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?;
+ self.check_ptr_access_align(mplace.ptr(), size, align, CheckInAllocMsg::DerefTest)?;
Ok(())
}
@@ -537,8 +537,24 @@ where
frame: usize,
local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
- let place = Place::Local { frame, local, offset: None };
+ // Other parts of the system rely on `Place::Local` never being unsized.
+ // So we eagerly check here if this local has an MPlace, and if yes we use it.
+ let frame_ref = &self.stack()[frame];
+ let layout = self.layout_of_local(frame_ref, local, None)?;
+ let place = if layout.is_sized() {
+ // We can just always use the `Local` for sized values.
+ Place::Local { frame, local, offset: None }
+ } else {
+ // Unsized `Local` isn't okay (we cannot store the metadata).
+ match frame_ref.locals[local].access()? {
+ Operand::Immediate(_) => {
+ // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
+ // efficiently check whether they are sized. We have to catch that case here.
+ throw_inval!(ConstPropNonsense);
+ }
+ Operand::Indirect(mplace) => Place::Ptr(*mplace),
+ }
+ };
Ok(PlaceTy { place, layout, align: layout.align.abi })
}
@@ -555,7 +571,7 @@ where
place = self.project(&place, elem)?
}
- trace!("{:?}", self.dump_place(place.place));
+ trace!("{:?}", self.dump_place(&place));
// Sanity-check the type we ended up with.
debug_assert!(
mir_assign_valid_types(
@@ -566,7 +582,7 @@ where
)?)?,
place.layout,
),
- "eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
+ "eval_place of a MIR place with type {:?} produced an interpreter place with type {}",
mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
place.layout.ty,
);
@@ -631,7 +647,8 @@ where
// just fall back to the indirect path.
dest.force_mplace(self)?
} else {
- match M::access_local_mut(self, frame, local)? {
+ M::before_access_local_mut(self, frame, local)?;
+ match self.stack_mut()[frame].locals[local].access_mut()? {
Operand::Immediate(local_val) => {
// Local can be updated in-place.
*local_val = src;
@@ -751,7 +768,8 @@ where
// FIXME: share the logic with `write_immediate_no_validate`.
dest.force_mplace(self)?
} else {
- match M::access_local_mut(self, frame, local)? {
+ M::before_access_local_mut(self, frame, local)?;
+ match self.stack_mut()[frame].locals[local].access_mut()? {
Operand::Immediate(local) => {
*local = Immediate::Uninit;
return Ok(());
@@ -782,6 +800,13 @@ where
dest: &impl Writeable<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
+ // Generally for transmutation, data must be valid both at the old and new type.
+ // But if the types are the same, the 2nd validation below suffices.
+ if src.layout().ty != dest.layout().ty && M::enforce_validity(self, src.layout()) {
+ self.validate_operand(&src.to_op(self)?)?;
+ }
+
+ // Do the actual copy.
self.copy_op_no_validate(src, dest, allow_transmute)?;
if M::enforce_validity(self, dest.layout()) {
@@ -810,7 +835,7 @@ where
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
- "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
+ "type mismatch when copying!\nsrc: {},\ndest: {}",
src.layout().ty,
dest.layout().ty,
);
@@ -845,7 +870,7 @@ where
*src_val,
src.layout(),
dest_mem.align,
- *dest_mem,
+ dest_mem.mplace,
)
};
}
@@ -872,7 +897,12 @@ where
// (Or as the `Assign` docs put it, assignments "not producing primitives" must be
// non-overlapping.)
self.mem_copy(
- src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ true,
+ src.ptr(),
+ src.align,
+ dest.ptr(),
+ dest.align,
+ dest_size,
+ /*nonoverlapping*/ true,
)
}
@@ -887,7 +917,8 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let mplace = match place.place {
Place::Local { frame, local, offset } => {
- let whole_local = match M::access_local_mut(self, frame, local)? {
+ M::before_access_local_mut(self, frame, local)?;
+ let whole_local = match self.stack_mut()[frame].locals[local].access_mut()? {
&mut Operand::Immediate(local_val) => {
// We need to make an allocation.
@@ -896,10 +927,8 @@ where
// that has different alignment than the outer field.
let local_layout =
self.layout_of_local(&self.stack()[frame], local, None)?;
- if local_layout.is_unsized() {
- throw_unsup_format!("unsized locals are not supported");
- }
- let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+ assert!(local_layout.is_sized(), "unsized locals cannot be immediate");
+ let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
// Preserve old value. (As an optimization, we can skip this if it was uninit.)
if !matches!(local_val, Immediate::Uninit) {
// We don't have to validate as we can assume the local was already
@@ -909,15 +938,16 @@ where
local_val,
local_layout,
local_layout.align.abi,
- mplace,
+ mplace.mplace,
)?;
}
+ M::after_local_allocated(self, frame, local, &mplace)?;
// Now we can call `access_mut` again, asserting it goes well, and actually
// overwrite things. This points to the entire allocation, not just the part
// the place refers to, i.e. we do this before we apply `offset`.
- *M::access_local_mut(self, frame, local).unwrap() =
- Operand::Indirect(mplace);
- mplace
+ *self.stack_mut()[frame].locals[local].access_mut().unwrap() =
+ Operand::Indirect(mplace.mplace);
+ mplace.mplace
}
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
};
@@ -1006,7 +1036,7 @@ where
pub fn raw_const_to_mplace(
&self,
- raw: ConstAlloc<'tcx>,
+ raw: mir::ConstAlloc<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// This must be an allocation in `tcx`
let _ = self.tcx.global_alloc(raw.alloc_id);
@@ -1025,12 +1055,12 @@ where
matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
"`unpack_dyn_trait` only makes sense on `dyn*` types"
);
- let vtable = mplace.meta.unwrap_meta().to_pointer(self)?;
+ let vtable = mplace.meta().unwrap_meta().to_pointer(self)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
let mplace = MPlaceTy {
- mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace },
+ mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace },
layout,
align: layout.align.abi,
};
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 882097ad2..70df3d8fd 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,12 +7,13 @@
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
+use std::marker::PhantomData;
+use std::ops::Range;
+
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
-use rustc_middle::ty::TyCtxt;
-use rustc_target::abi::HasDataLayout;
use rustc_target::abi::Size;
use rustc_target::abi::{self, VariantIdx};
@@ -24,44 +25,59 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
fn layout(&self) -> TyAndLayout<'tcx>;
/// Get the metadata of a wide value.
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
+ fn meta(&self) -> MemPlaceMeta<Prov>;
+ /// Get the length of a slice/string/array stored here.
fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, u64> {
- self.meta(ecx)?.len(self.layout(), ecx)
+ let layout = self.layout();
+ if layout.is_unsized() {
+ // We need to consult `meta` metadata
+ match layout.ty.kind() {
+ ty::Slice(..) | ty::Str => self.meta().unwrap_meta().to_target_usize(ecx),
+ _ => bug!("len not supported on unsized type {:?}", layout.ty),
+ }
+ } else {
+ // Go through the layout. There are lots of types that support a length,
+ // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+ match layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", layout.ty),
+ }
+ }
}
/// Offset the value by the given amount, replacing the layout and metadata.
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self>;
- fn offset(
+ #[inline]
+ fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, ecx)
}
- fn transmute(
+ #[inline]
+ fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
+ assert!(self.layout().is_sized() && layout.is_sized());
assert_eq!(self.layout().size, layout.size);
- self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
+ self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, ecx)
}
/// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
@@ -72,10 +88,30 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
}
+/// A type representing iteration over the elements of an array.
+pub struct ArrayIterator<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> {
+ base: &'a P,
+ range: Range<u64>,
+ stride: Size,
+ field_layout: TyAndLayout<'tcx>,
+ _phantom: PhantomData<Prov>, // otherwise it says `Prov` is never used...
+}
+
+impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx, 'a, Prov, P> {
+ /// Should be the same `ecx` on each call, and match the one used to create the iterator.
+ pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &mut self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, Option<(u64, P)>> {
+ let Some(idx) = self.range.next() else { return Ok(None) };
+ Ok(Some((idx, self.base.offset(self.stride * idx, self.field_layout, ecx)?)))
+ }
+}
+
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
- Prov: Provenance + 'static,
+ Prov: Provenance,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
@@ -104,7 +140,7 @@ where
// But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`)
throw_inval!(ConstPropNonsense);
}
- let base_meta = base.meta(self)?;
+ let base_meta = base.meta();
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
@@ -132,7 +168,7 @@ where
base: &P,
variant: VariantIdx,
) -> InterpResult<'tcx, P> {
- assert!(!base.meta(self)?.has_meta());
+ assert!(!base.meta().has_meta());
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
@@ -206,20 +242,13 @@ where
pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
&self,
base: &'a P,
- ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
- where
- 'tcx: 'a,
- {
+ ) -> InterpResult<'tcx, ArrayIterator<'tcx, 'a, M::Provenance, P>> {
let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
};
let len = base.len(self)?;
let field_layout = base.layout().field(self, 0);
- let tcx: TyCtxt<'tcx> = *self.tcx;
- // `Size` multiplication
- Ok((0..len).map(move |i| {
- base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
- }))
+ Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
}
/// Subslicing
@@ -287,7 +316,11 @@ where
{
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
- OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
+ OpaqueCast(ty) => {
+ span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck")
+ }
+ // We don't want anything happening here, this is here as a dummy.
+ Subtype(_) => base.transmute(base.layout(), self)?,
Field(field, _) => self.project_field(base, field.index())?,
Downcast(_, variant) => self.project_downcast(base, variant)?,
Deref => self.deref_pointer(&base.to_op(self)?)?.into(),
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 0740894a4..284e13407 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -177,7 +177,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
- let val = self.unary_op(un_op, &val)?;
+ let val = self.wrapping_unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
}
@@ -204,7 +204,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// avoid writing each operand individually and instead just make many copies
// of the first element.
let elem_size = first.layout.size;
- let first_ptr = first.ptr;
+ let first_ptr = first.ptr();
let rest_ptr = first_ptr.offset(elem_size, self)?;
// For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
// that place might be more aligned than its type mandates (a `u8` array could
@@ -301,11 +301,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let op = self.eval_place_to_op(place, None)?;
let variant = self.read_discriminant(&op)?;
let discr = self.discriminant_for_variant(op.layout, variant)?;
- self.write_scalar(discr, &dest)?;
+ self.write_immediate(*discr, &dest)?;
}
}
- trace!("{:?}", self.dump_place(*dest));
+ trace!("{:?}", self.dump_place(&dest));
Ok(())
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 3c03172bb..578dd6622 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -2,19 +2,25 @@ use std::borrow::Cow;
use either::Either;
use rustc_ast::ast::InlineAsmOptions;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
-use rustc_middle::ty::Instance;
use rustc_middle::{
mir,
- ty::{self, Ty},
+ ty::{
+ self,
+ layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout},
+ AdtDef, Instance, Ty,
+ },
+};
+use rustc_span::sym;
+use rustc_target::abi::{self, FieldIdx};
+use rustc_target::abi::{
+ call::{ArgAbi, FnAbi, PassMode},
+ Integer,
};
-use rustc_target::abi;
-use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
use rustc_target::spec::abi::Abi;
use super::{
- AllocId, FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy,
- Operand, PlaceTy, Provenance, Scalar, StackPopCleanup,
+ AllocId, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable,
+ Provenance, Scalar, StackPopCleanup,
};
use crate::fluent_generated as fluent;
@@ -92,14 +98,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
- let res = self
- .overflowing_binary_op(
- mir::BinOp::Eq,
- &discr,
- &ImmTy::from_uint(const_int, discr.layout),
- )?
- .0;
- if res.to_bool()? {
+ let res = self.wrapping_binary_op(
+ mir::BinOp::Eq,
+ &discr,
+ &ImmTy::from_uint(const_int, discr.layout),
+ )?;
+ if res.to_scalar().to_bool()? {
target_block = target;
break;
}
@@ -145,7 +149,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(
terminator.source_info.span,
- "invalid callee of type {:?}",
+ "invalid callee of type {}",
func.layout.ty
),
};
@@ -196,15 +200,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- Terminate => {
- // FIXME: maybe should call `panic_no_unwind` lang item instead.
- M::abort(self, "panic in a function that cannot unwind".to_owned())?;
+ UnwindTerminate(reason) => {
+ M::unwind_terminate(self, reason)?;
}
// When we encounter Resume, we've finished unwinding
// cleanup for the current stack frame. We pop it in order
// to continue unwinding the next frame
- Resume => {
+ UnwindResume => {
trace!("unwinding: resuming from cleanup");
// By definition, a Resume terminator means
// that we're unwinding
@@ -252,90 +255,172 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.collect()
}
- fn check_argument_compat(
- caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
- callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
- ) -> bool {
- // Heuristic for type comparison.
- let layout_compat = || {
- if caller_abi.layout.ty == callee_abi.layout.ty {
- // No question
- return true;
- }
- if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() {
- // No, no, no. We require the types to *exactly* match for unsized arguments. If
- // these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`),
- // then who knows what happens.
- return false;
+ /// Find the wrapped inner type of a transparent wrapper.
+ /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
+ ///
+ /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
+ fn unfold_transparent(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ may_unfold: impl Fn(AdtDef<'tcx>) -> bool,
+ ) -> TyAndLayout<'tcx> {
+ match layout.ty.kind() {
+ ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => {
+ assert!(!adt_def.is_enum());
+ // Find the non-1-ZST field, and recurse.
+ let (_, field) = layout.non_1zst_field(self).unwrap();
+ self.unfold_transparent(field, may_unfold)
}
- if caller_abi.layout.size != callee_abi.layout.size
- || caller_abi.layout.align.abi != callee_abi.layout.align.abi
- {
- // This cannot go well...
- return false;
+ // Not a transparent type, no further unfolding.
+ _ => layout,
+ }
+ }
+
+ /// Unwrap types that are guaranteed a null-pointer-optimization
+ fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ // Check if this is `Option` wrapping some type.
+ let inner = match layout.ty.kind() {
+ ty::Adt(def, args) if self.tcx.is_diagnostic_item(sym::Option, def.did()) => {
+ args[0].as_type().unwrap()
}
- // The rest *should* be okay, but we are extra conservative.
- match (caller_abi.layout.abi, callee_abi.layout.abi) {
- // Different valid ranges are okay (once we enforce validity,
- // that will take care to make it UB to leave the range, just
- // like for transmute).
- (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
- caller.primitive() == callee.primitive()
- }
- (
- abi::Abi::ScalarPair(caller1, caller2),
- abi::Abi::ScalarPair(callee1, callee2),
- ) => {
- caller1.primitive() == callee1.primitive()
- && caller2.primitive() == callee2.primitive()
- }
- // Be conservative
- _ => false,
+ _ => {
+ // Not an `Option`.
+ return Ok(layout);
}
};
- // When comparing the PassMode, we have to be smart about comparing the attributes.
- let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| {
- // There's only one regular attribute that matters for the call ABI: InReg.
- // Everything else is things like noalias, dereferenceable, nonnull, ...
- // (This also applies to pointee_size, pointee_align.)
- if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
- {
- return false;
+ let inner = self.layout_of(inner)?;
+ // Check if the inner type is one of the NPO-guaranteed ones.
+ // For that we first unpeel transparent *structs* (but not unions).
+ let is_npo = |def: AdtDef<'tcx>| {
+ self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
+ };
+ let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
+ // Stop at NPO tpyes so that we don't miss that attribute in the check below!
+ def.is_struct() && !is_npo(def)
+ });
+ Ok(match inner.ty.kind() {
+ ty::Ref(..) | ty::FnPtr(..) => {
+ // Option<&T> behaves like &T, and same for fn()
+ inner
}
- // We also compare the sign extension mode -- this could let the callee make assumptions
- // about bits that conceptually were not even passed.
- if a1.arg_ext != a2.arg_ext {
- return false;
+ ty::Adt(def, _) if is_npo(*def) => {
+ // Once we found a `nonnull_optimization_guaranteed` type, further strip off
+ // newtype structs from it to find the underlying ABI type.
+ self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct())
}
- return true;
- };
- let mode_compat = || match (&caller_abi.mode, &callee_abi.mode) {
- (PassMode::Ignore, PassMode::Ignore) => true,
- (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
- (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
- arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
+ _ => {
+ // Everything else we do not unfold.
+ layout
}
- (PassMode::Cast(c1, pad1), PassMode::Cast(c2, pad2)) => c1 == c2 && pad1 == pad2,
- (
- PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
- PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
- ) => arg_attr_compat(a1, a2) && s1 == s2,
- (
- PassMode::Indirect { attrs: a1, extra_attrs: Some(e1), on_stack: s1 },
- PassMode::Indirect { attrs: a2, extra_attrs: Some(e2), on_stack: s2 },
- ) => arg_attr_compat(a1, a2) && arg_attr_compat(e1, e2) && s1 == s2,
- _ => false,
+ })
+ }
+
+ /// Check if these two layouts look like they are fn-ABI-compatible.
+ /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
+ /// that only checking the `PassMode` is insufficient.)
+ fn layout_compat(
+ &self,
+ caller: TyAndLayout<'tcx>,
+ callee: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, bool> {
+ // Fast path: equal types are definitely compatible.
+ if caller.ty == callee.ty {
+ return Ok(true);
+ }
+ // 1-ZST are compatible with all 1-ZST (and with nothing else).
+ if caller.is_1zst() || callee.is_1zst() {
+ return Ok(caller.is_1zst() && callee.is_1zst());
+ }
+ // Unfold newtypes and NPO optimizations.
+ let unfold = |layout: TyAndLayout<'tcx>| {
+ self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true))
};
+ let caller = unfold(caller)?;
+ let callee = unfold(callee)?;
+ // Now see if these inner types are compatible.
+
+ // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)`
+ // things as compatible due to `DispatchFromDyn`. For instance, `Rc<i32>` and `*mut i32`
+ // must be compatible. So we just accept everything with Pointer ABI as compatible,
+ // even if this will accept some code that is not stably guaranteed to work.
+ // This also handles function pointers.
+ let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
+ abi::Abi::Scalar(s) => match s.primitive() {
+ abi::Primitive::Pointer(addr_space) => Some(addr_space),
+ _ => None,
+ },
+ _ => None,
+ };
+ if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
+ return Ok(caller == callee);
+ }
+ // For wide pointers we have to get the pointee type.
+ let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
+ // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
+ Ok(Some(match ty.kind() {
+ ty::Ref(_, ty, _) => *ty,
+ ty::RawPtr(mt) => mt.ty,
+ // We should only accept `Box` with the default allocator.
+ // It's hard to test for that though so we accept every 1-ZST allocator.
+ ty::Adt(def, args)
+ if def.is_box()
+ && self.layout_of(args[1].expect_ty()).is_ok_and(|l| l.is_1zst()) =>
+ {
+ args[0].expect_ty()
+ }
+ _ => return Ok(None),
+ }))
+ };
+ if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
+ // This is okay if they have the same metadata type.
+ let meta_ty = |ty: Ty<'tcx>| {
+ let (meta, only_if_sized) = ty.ptr_metadata_ty(*self.tcx, |ty| ty);
+ assert!(
+ !only_if_sized,
+ "there should be no more 'maybe has that metadata' types during interpretation"
+ );
+ meta
+ };
+ return Ok(meta_ty(caller) == meta_ty(callee));
+ }
+
+ // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
+ let int_ty = |ty: Ty<'tcx>| {
+ Some(match ty.kind() {
+ ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
+ ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
+ _ => return None,
+ })
+ };
+ if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
+ // This is okay if they are the same integer type.
+ return Ok(caller == callee);
+ }
+
+ // Fall back to exact equality.
+ // FIXME: We are missing the rules for "repr(C) wrapping compatible types".
+ Ok(caller == callee)
+ }
- if layout_compat() && mode_compat() {
- return true;
+ fn check_argument_compat(
+ &self,
+ caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) -> InterpResult<'tcx, bool> {
+ // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
+ // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
+ if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
+ // Ensure that our checks imply actual ABI compatibility for this concrete call.
+ assert!(caller_abi.eq_abi(&callee_abi));
+ return Ok(true);
+ } else {
+ trace!(
+ "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
+ caller_abi,
+ callee_abi
+ );
+ return Ok(false);
}
- trace!(
- "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
- caller_abi,
- callee_abi
- );
- return false;
}
/// Initialize a single callee argument, checking the types for compatibility.
@@ -345,63 +430,58 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
- callee_arg: &PlaceTy<'tcx, M::Provenance>,
+ callee_arg: &mir::Place<'tcx>,
+ callee_ty: Ty<'tcx>,
+ already_live: bool,
) -> InterpResult<'tcx>
where
'tcx: 'x,
'tcx: 'y,
{
+ assert_eq!(callee_ty, callee_abi.layout.ty);
if matches!(callee_abi.mode, PassMode::Ignore) {
- // This one is skipped.
+ // This one is skipped. Still must be made live though!
+ if !already_live {
+ self.storage_live(callee_arg.as_local().unwrap())?;
+ }
return Ok(());
}
// Find next caller arg.
let Some((caller_arg, caller_abi)) = caller_args.next() else {
throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
};
- // Now, check
- if !Self::check_argument_compat(caller_abi, callee_abi) {
- let callee_ty = format!("{}", callee_arg.layout.ty);
- let caller_ty = format!("{}", caller_arg.layout().ty);
- throw_ub_custom!(
- fluent::const_eval_incompatible_types,
- callee_ty = callee_ty,
- caller_ty = caller_ty,
- )
+ assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
+ // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
+ // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
+ // right type to print to the user.
+
+ // Check compatibility
+ if !self.check_argument_compat(caller_abi, callee_abi)? {
+ throw_ub!(AbiMismatchArgument {
+ caller_ty: caller_abi.layout.ty,
+ callee_ty: callee_abi.layout.ty
+ });
}
// We work with a copy of the argument for now; if this is in-place argument passing, we
// will later protect the source it comes from. This means the callee cannot observe if we
// did in-place of by-copy argument passing, except for pointer equality tests.
let caller_arg_copy = self.copy_fn_arg(&caller_arg)?;
- // Special handling for unsized parameters.
- if caller_arg_copy.layout.is_unsized() {
- // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
- assert_eq!(caller_arg_copy.layout.ty, callee_arg.layout.ty);
- // We have to properly pre-allocate the memory for the callee.
- // So let's tear down some abstractions.
- // This all has to be in memory, there are no immediate unsized values.
- let src = caller_arg_copy.assert_mem_place();
- // The destination cannot be one of these "spread args".
- let (dest_frame, dest_local, dest_offset) = callee_arg
- .as_mplace_or_local()
- .right()
- .expect("callee fn arguments must be locals");
- // We are just initializing things, so there can't be anything here yet.
- assert!(matches!(
- *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
- Operand::Immediate(Immediate::Uninit)
- ));
- assert_eq!(dest_offset, None);
- // Allocate enough memory to hold `src`.
- let dest_place = self.allocate_dyn(src.layout, MemoryKind::Stack, src.meta)?;
- // Update the local to be that new place.
- *M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
+ if !already_live {
+ let local = callee_arg.as_local().unwrap();
+ let meta = caller_arg_copy.meta();
+ // `check_argument_compat` ensures that if metadata is needed, both have the same type,
+ // so we know they will use the metadata the same way.
+ assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
+
+ self.storage_live_dyn(local, meta)?;
}
+ // Now we can finally actually evaluate the callee place.
+ let callee_arg = self.eval_place(*callee_arg)?;
// We allow some transmutes here.
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
// is true for all `copy_op`, but there are a lot of special cases for argument passing
// specifically.)
- self.copy_op(&caller_arg_copy, callee_arg, /*allow_transmute*/ true)?;
+ self.copy_op(&caller_arg_copy, &callee_arg, /*allow_transmute*/ true)?;
// If this was an in-place pass, protect the place it comes from for the duration of the call.
if let FnArg::InPlace(place) = caller_arg {
M::protect_in_place_function_argument(self, place)?;
@@ -584,21 +664,50 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// taking into account the `spread_arg`. If we could write
// this is a single iterator (that handles `spread_arg`), then
// `pass_argument` would be the loop body. It takes care to
- // not advance `caller_iter` for ZSTs.
+ // not advance `caller_iter` for ignored arguments.
let mut callee_args_abis = callee_fn_abi.args.iter();
for local in body.args_iter() {
- let dest = self.eval_place(mir::Place::from(local))?;
+ // Construct the destination place for this argument. At this point all
+ // locals are still dead, so we cannot construct a `PlaceTy`.
+ let dest = mir::Place::from(local);
+ // `layout_of_local` does more than just the substitution we need to get the
+ // type, but the result gets cached so this avoids calling the substitution
+ // query *again* the next time this local is accessed.
+ let ty = self.layout_of_local(self.frame(), local, None)?.ty;
if Some(local) == body.spread_arg {
+ // Make the local live once, then fill in the value field by field.
+ self.storage_live(local)?;
// Must be a tuple
- for i in 0..dest.layout.fields.count() {
- let dest = self.project_field(&dest, i)?;
+ let ty::Tuple(fields) = ty.kind() else {
+ span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
+ };
+ for (i, field_ty) in fields.iter().enumerate() {
+ let dest = dest.project_deeper(
+ &[mir::ProjectionElem::Field(
+ FieldIdx::from_usize(i),
+ field_ty,
+ )],
+ *self.tcx,
+ );
let callee_abi = callee_args_abis.next().unwrap();
- self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ self.pass_argument(
+ &mut caller_args,
+ callee_abi,
+ &dest,
+ field_ty,
+ /* already_live */ true,
+ )?;
}
} else {
- // Normal argument
+ // Normal argument. Cannot mark it as live yet, it might be unsized!
let callee_abi = callee_args_abis.next().unwrap();
- self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ self.pass_argument(
+ &mut caller_args,
+ callee_abi,
+ &dest,
+ ty,
+ /* already_live */ false,
+ )?;
}
}
// If the callee needs a caller location, pretend we consume one more argument from the ABI.
@@ -614,14 +723,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub_custom!(fluent::const_eval_too_many_caller_args);
}
// Don't forget to check the return type!
- if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
- let callee_ty = format!("{}", callee_fn_abi.ret.layout.ty);
- let caller_ty = format!("{}", caller_fn_abi.ret.layout.ty);
- throw_ub_custom!(
- fluent::const_eval_incompatible_return_types,
- callee_ty = callee_ty,
- caller_ty = caller_ty,
- )
+ if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
+ throw_ub!(AbiMismatchReturn {
+ caller_ty: caller_fn_abi.ret.layout.ty,
+ callee_ty: callee_fn_abi.ret.layout.ty
+ });
}
// Ensure the return place is aligned and dereferenceable, and protect it for
// in-place return value passing.
@@ -631,6 +737,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Nothing to do for locals, they are always properly allocated and aligned.
}
M::protect_in_place_function_argument(self, destination)?;
+
+ // Don't forget to mark "initially live" locals as live.
+ self.storage_live_for_always_live_locals()?;
};
match res {
Err(err) => {
@@ -640,7 +749,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(()) => Ok(()),
}
}
- // cannot use the shim here, because that will only result in infinite recursion
+ // `InstanceDef::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
+ // codegen'd / interpreted as virtual calls through the vtable.
ty::InstanceDef::Virtual(def_id, idx) => {
let mut args = args.to_vec();
// We have to implement all "object safe receivers". So we have to go search for a
@@ -671,26 +781,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => {
// Not there yet, search for the only non-ZST field.
- let mut non_zst_field = None;
- for i in 0..receiver.layout.fields.count() {
- let field = self.project_field(&receiver, i)?;
- let zst =
- field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
- if !zst {
- assert!(
- non_zst_field.is_none(),
- "multiple non-ZST fields in dyn receiver type {}",
- receiver.layout.ty
- );
- non_zst_field = Some(field);
- }
- }
- receiver = non_zst_field.unwrap_or_else(|| {
- panic!(
- "no non-ZST fields in dyn receiver type {}",
- receiver.layout.ty
- )
- });
+ // (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
+ let (idx, _) = receiver.layout.non_1zst_field(self).expect(
+ "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+ );
+ receiver = self.project_field(&receiver, idx)?;
}
}
};
@@ -705,7 +800,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
}
- (vptr, dyn_ty, recv.ptr)
+ (vptr, dyn_ty, recv.ptr())
} else {
// Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
// (For that reason we also cannot use `unpack_dyn_trait`.)
@@ -722,7 +817,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(receiver_place.layout.is_unsized());
// Get the required information from the vtable.
- let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
+ let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() {
throw_ub_custom!(fluent::const_eval_dyn_call_vtable_mismatch);
@@ -731,7 +826,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// It might be surprising that we use a pointer as the receiver even if this
// is a by-val case; this works because by-val passing of an unsized `dyn
// Trait` to a function is actually desugared to a pointer.
- (vptr, dyn_ty, receiver_place.ptr)
+ (vptr, dyn_ty, receiver_place.ptr())
};
// Now determine the actual method to call. We can do that in two different ways and
@@ -764,18 +859,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// Adjust receiver argument. Layout can be any (thin) ptr.
+ let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
args[0] = FnArg::Copy(
ImmTy::from_immediate(
Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
- self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
+ self.layout_of(receiver_ty)?,
)
.into(),
);
trace!("Patched receiver operand to {:#?}", args[0]);
+ // Need to also adjust the type in the ABI. Strangely, the layout there is actually
+ // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
+ // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
+ // type, so we just patch this up locally.
+ let mut caller_fn_abi = caller_fn_abi.clone();
+ caller_fn_abi.args[0].layout.ty = receiver_ty;
+
// recurse with concrete function
self.eval_fn_call(
FnVal::Instance(fn_inst),
- (caller_abi, caller_fn_abi),
+ (caller_abi, &caller_fn_abi),
&args,
with_caller_location,
destination,
@@ -818,7 +921,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
target: mir::BasicBlock,
unwind: mir::UnwindAction,
) -> InterpResult<'tcx> {
- trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
+ trace!("drop_in_place: {:?},\n instance={:?}", place, instance);
// We take the address of the object. This may well be unaligned, which is fine
// for us here. However, unaligned accesses will probably make the actual drop
// implementation fail -- a problem shared by rustc.
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index fa15d466a..a9ca268a2 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -27,7 +27,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ensure_monomorphic_enough(*self.tcx, ty)?;
ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
- let vtable_symbolic_allocation = self.tcx.create_vtable_alloc(ty, poly_trait_ref);
+ let vtable_symbolic_allocation = self.tcx.reserve_and_set_vtable_alloc(ty, poly_trait_ref);
let vtable_ptr = self.global_base_pointer(Pointer::from(vtable_symbolic_allocation))?;
Ok(vtable_ptr.into())
}
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index b33194423..eb639ded7 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -4,7 +4,7 @@ use rustc_middle::ty::{
};
use std::ops::ControlFlow;
-/// Checks whether a type contains generic parameters which require substitution.
+/// Checks whether a type contains generic parameters which must be instantiated.
///
/// In case it does, returns a `TooGeneric` const eval error. Note that due to polymorphization
/// types may be "concrete enough" even though they still contain generic parameters in
@@ -43,7 +43,8 @@ where
.try_into()
.expect("more generic parameters than can fit into a `u32`");
// Only recurse when generic parameters in fns, closures and generators
- // are used and require substitution.
+ // are used and have to be instantiated.
+ //
// Just in case there are closures or generators within this subst,
// recurse.
if unused_params.is_used(index) && subst.has_param() {
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index d3f05af1c..3e023a896 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -360,7 +360,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Handle wide pointers.
// Check metadata early, for better diagnostics
if place.layout.is_unsized() {
- self.check_wide_ptr_meta(place.meta, place.layout)?;
+ self.check_wide_ptr_meta(place.meta(), place.layout)?;
}
// Make sure this is dereferenceable and all.
let size_and_align = try_validation!(
@@ -379,7 +379,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
self.ecx.check_ptr_access_align(
- place.ptr,
+ place.ptr(),
size,
align,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
@@ -414,7 +414,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
- if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
+ if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) {
// Let's see what kind of memory this points to.
let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id);
match alloc_kind {
@@ -521,7 +521,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let place =
self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
if place.layout.is_unsized() {
- self.check_wide_ptr_meta(place.meta, place.layout)?;
+ self.check_wide_ptr_meta(place.meta(), place.layout)?;
}
Ok(true)
}
@@ -583,7 +583,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Bound(..)
| ty::Param(..)
| ty::Alias(..)
- | ty::GeneratorWitnessMIR(..)
| ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
}
}
@@ -739,7 +738,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
let len = mplace.len(self.ecx)?;
try_validation!(
- self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)),
+ self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len)),
self.path,
Ub(InvalidUninitBytes(..)) => Uninit { expected: ExpectedKind::Str },
Unsup(ReadPointerAsInt(_)) => PointerAsInt { expected: ExpectedKind::Str }
@@ -789,7 +788,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept uninit, for consistency with the slow path.
- let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
+ let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size, mplace.align)?.expect("we already excluded size 0");
match alloc.get_bytes_strip_provenance() {
// In the happy case, we needn't check anything else.
@@ -911,9 +910,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Complain about any other kind of error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies.
Err(err) => {
- let (err, backtrace) = err.into_parts();
- backtrace.print_backtrace();
- bug!("Unexpected Undefined Behavior error during validation: {err:?}");
+ bug!(
+ "Unexpected Undefined Behavior error during validation: {}",
+ self.format_error(err)
+ );
}
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 531e2bd3e..fc21ad1f1 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -170,8 +170,9 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
}
}
FieldsShape::Array { .. } => {
- for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
- self.visit_field(v, idx, &field?)?;
+ let mut iter = self.ecx().project_array_fields(v)?;
+ while let Some((idx, field)) = iter.next(self.ecx())? {
+ self.visit_field(v, idx.try_into().unwrap(), &field)?;
}
}
}