summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/interpret
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
commitef24de24a82fe681581cc130f342363c47c0969a (patch)
tree0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_const_eval/src/interpret
parentReleasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz
rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs41
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs19
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs92
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs42
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs47
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs128
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs25
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs182
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs123
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs32
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs279
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs36
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs15
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs21
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs62
17 files changed, 570 insertions, 586 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index b9f88cf63..f4cb12c8d 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -8,7 +8,7 @@ use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
use rustc_target::abi::Integer;
-use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::TyKind::*;
use super::{
util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
@@ -145,16 +145,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(dest.layout.is_sized());
assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
if src.layout.size != dest.layout.size {
- let src_bytes = src.layout.size.bytes();
- let dest_bytes = dest.layout.size.bytes();
- let src_ty = format!("{}", src.layout.ty);
- let dest_ty = format!("{}", dest.layout.ty);
throw_ub_custom!(
fluent::const_eval_invalid_transmute,
- src_bytes = src_bytes,
- dest_bytes = dest_bytes,
- src = src_ty,
- dest = dest_ty,
+ src_bytes = src.layout.size.bytes(),
+ dest_bytes = dest.layout.size.bytes(),
+ src = src.layout.ty,
+ dest = dest.layout.ty,
);
}
@@ -185,7 +181,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
src: &ImmTy<'tcx, M::Provenance>,
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- use rustc_type_ir::sty::TyKind::*;
+ use rustc_type_ir::TyKind::*;
let val = match src.layout.ty.kind() {
// Floating point
@@ -310,7 +306,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
where
F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
{
- use rustc_type_ir::sty::TyKind::*;
+ use rustc_type_ir::TyKind::*;
+
+ fn adjust_nan<
+ 'mir,
+ 'tcx: 'mir,
+ M: Machine<'mir, 'tcx>,
+ F1: rustc_apfloat::Float + FloatConvert<F2>,
+ F2: rustc_apfloat::Float,
+ >(
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ f1: F1,
+ f2: F2,
+ ) -> F2 {
+ if f2.is_nan() { M::generate_nan(ecx, &[f1]) } else { f2 }
+ }
+
match *dest_ty.kind() {
// float -> uint
Uint(t) => {
@@ -330,9 +341,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Scalar::from_int(v, size)
}
// float -> f32
- Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
+ Float(FloatTy::F32) => {
+ Scalar::from_f32(adjust_nan(self, f, f.convert(&mut false).value))
+ }
// float -> f64
- Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
+ Float(FloatTy::F64) => {
+ Scalar::from_f64(adjust_nan(self, f, f.convert(&mut false).value))
+ }
// That's it.
_ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
}
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 49e01728f..fd1736703 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,7 +1,8 @@
-//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
+//! Functions for reading and writing discriminants of multi-variant layouts (enums and coroutines).
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
-use rustc_middle::{mir, ty};
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::{self, Ty};
use rustc_target::abi::{self, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
@@ -170,11 +171,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::Adt(adt, _) => {
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
}
- ty::Generator(def_id, args, _) => {
- let args = args.as_generator();
+ ty::Coroutine(def_id, args, _) => {
+ let args = args.as_coroutine();
args.discriminants(def_id, *self.tcx).find(|(_, var)| var.val == discr_bits)
}
- _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+ _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-coroutine"),
}
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
@@ -244,11 +245,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn discriminant_for_variant(
&self,
- layout: TyAndLayout<'tcx>,
+ ty: Ty<'tcx>,
variant: VariantIdx,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
- let discr_value = match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+ let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
+ let discr_value = match ty.discriminant_for_variant(*self.tcx, variant) {
Some(discr) => {
// This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty);
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index af7dfbef2..07cab5e34 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -595,6 +595,50 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+ /// frame which is not `#[track_caller]`. This is the fancy version of `cur_span`.
+ pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
+ for frame in self.stack().iter().rev() {
+ debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+ // Assert that the frame we look at is actually executing code currently
+ // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
+ let loc = frame.loc.left().unwrap();
+
+ // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+ // (such as `box`). Use the normal span by default.
+ let mut source_info = *frame.body.source_info(loc);
+
+ // If this is a `Call` terminator, use the `fn_span` instead.
+ let block = &frame.body.basic_blocks[loc.block];
+ if loc.statement_index == block.statements.len() {
+ debug!(
+ "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+ block.terminator(),
+ block.terminator().kind,
+ );
+ if let mir::TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+ source_info.span = fn_span;
+ }
+ }
+
+ let caller_location = if frame.instance.def.requires_caller_location(*self.tcx) {
+ // We use `Err(())` as indication that we should continue up the call stack since
+ // this is a `#[track_caller]` function.
+ Some(Err(()))
+ } else {
+ None
+ };
+ if let Ok(span) =
+ frame.body.caller_location_span(source_info, caller_location, *self.tcx, Ok)
+ {
+ return span;
+ }
+ }
+
+ span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
+ }
+
#[inline(always)]
pub fn layout_of_local(
&self,
@@ -750,12 +794,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
if M::POST_MONO_CHECKS {
- // `ctfe_query` does some error message decoration that we want to be in effect here.
- self.ctfe_query(None, |tcx| {
- body.post_mono_checks(*tcx, self.param_env, |c| {
- self.subst_from_current_frame_and_normalize_erasing_regions(c)
- })
- })?;
+ for &const_ in &body.required_consts {
+ let c =
+ self.subst_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
+ c.eval(*self.tcx, self.param_env, Some(const_.span)).map_err(|err| {
+ err.emit_note(*self.tcx);
+ err
+ })?;
+ }
}
// done
@@ -961,8 +1007,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
- | ty::Generator(..)
- | ty::GeneratorWitness(..)
+ | ty::Coroutine(..)
+ | ty::CoroutineWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
@@ -1008,7 +1054,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Just make this an efficient immediate.
// Note that not calling `layout_of` here does have one real consequence:
// if the type is too big, we'll only notice this when the local is actually initialized,
- // which is a bit too late -- we should ideally notice this alreayd here, when the memory
+ // which is a bit too late -- we should ideally notice this already here, when the memory
// is conceptually allocated. But given how rare that error is and that this is a hot function,
// we accept this downside for now.
Operand::Immediate(Immediate::Uninit)
@@ -1054,14 +1100,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
- /// Call a query that can return `ErrorHandled`. If `span` is `Some`, point to that span when an error occurs.
+ /// Call a query that can return `ErrorHandled`. Should be used for statics and other globals.
+ /// (`mir::Const`/`ty::Const` have `eval` methods that can be used directly instead.)
pub fn ctfe_query<T>(
&self,
- span: Option<Span>,
query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
) -> Result<T, ErrorHandled> {
// Use a precise span for better cycle errors.
- query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
+ query(self.tcx.at(self.cur_span())).map_err(|err| {
err.emit_note(*self.tcx);
err
})
@@ -1072,17 +1118,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
instance: ty::Instance<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let gid = GlobalId { instance, promoted: None };
- // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
- // and thus don't care about the parameter environment. While we could just use
- // `self.param_env`, that would mean we invoke the query to evaluate the static
- // with different parameter environments, thus causing the static to be evaluated
- // multiple times.
- let param_env = if self.tcx.is_static(gid.instance.def_id()) {
- ty::ParamEnv::reveal_all()
+ let val = if self.tcx.is_static(gid.instance.def_id()) {
+ let alloc_id = self.tcx.reserve_and_set_static_alloc(gid.instance.def_id());
+
+ let ty = instance.ty(self.tcx.tcx, self.param_env);
+ mir::ConstAlloc { alloc_id, ty }
} else {
- self.param_env
+ self.ctfe_query(|tcx| tcx.eval_to_allocation_raw(self.param_env.and(gid)))?
};
- let val = self.ctfe_query(None, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
@@ -1092,7 +1135,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let const_val = self.ctfe_query(span, |tcx| val.eval(*tcx, self.param_env, span))?;
+ let const_val = val.eval(*self.tcx, self.param_env, span).map_err(|err| {
+ // FIXME: somehow this is reachable even when POST_MONO_CHECKS is on.
+ // Are we not always populating `required_consts`?
+ err.emit_note(*self.tcx);
+ err
+ })?;
self.const_val_to_op(const_val, val.ty(), layout)
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 8c0009cfd..3d90e95c0 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -161,7 +161,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
#[inline(always)]
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
- &self.ecx
+ self.ecx
}
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
@@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
// to avoid could be expensive: on the potentially larger types, arrays and slices,
// rather than on all aggregates unconditionally.
if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
- let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+ let Some((size, _align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
// We do the walk if we can't determine the size of the mplace: we may be
// dealing with extern types here in the future.
return Ok(true);
@@ -267,7 +267,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
- if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size, align)? {
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size)? {
if !alloc.has_provenance() {
return Ok(false);
}
@@ -450,6 +450,42 @@ pub fn intern_const_alloc_recursive<
Ok(())
}
+/// Intern `ret`. This function assumes that `ret` references no other allocation.
+#[instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_for_constprop<
+ 'mir,
+ 'tcx: 'mir,
+ T,
+ M: CompileTimeMachine<'mir, 'tcx, T>,
+>(
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ alloc_id: AllocId,
+) -> InterpResult<'tcx, ()> {
+ // Move allocation to `tcx`.
+ let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
+ // Pointer not found in local memory map. It is either a pointer to the global
+ // map, or dangling.
+ if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
+ throw_ub!(DeadLocal)
+ }
+ // The constant is already in global memory. Do nothing.
+ return Ok(());
+ };
+
+ alloc.mutability = Mutability::Not;
+
+ // We are not doing recursive interning, so we don't currently support provenance.
+ // (If this assertion ever triggers, we should just implement a
+ // proper recursive interning loop.)
+ assert!(alloc.provenance().ptrs().is_empty());
+
+ // Link the alloc id to the actual allocation
+ let alloc = ecx.tcx.mk_const_alloc(alloc);
+ ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
+
+ Ok(())
+}
+
impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
InterpCx<'mir, 'tcx, M>
{
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 2c0ba9b26..b23cafc19 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
-use rustc_target::abi::{Abi, Align, Primitive, Size};
+use rustc_target::abi::{Abi, Primitive, Size};
use super::{
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
@@ -22,8 +22,6 @@ use super::{
use crate::fluent_generated as fluent;
-mod caller_location;
-
fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
@@ -99,8 +97,8 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
| ty::FnPtr(_)
| ty::Dynamic(_, _, _)
| ty::Closure(_, _)
- | ty::Generator(_, _, _)
- | ty::GeneratorWitness(..)
+ | ty::Coroutine(_, _, _)
+ | ty::CoroutineWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
@@ -130,8 +128,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match intrinsic_name {
sym::caller_location => {
let span = self.find_closest_untracked_caller_location();
- let location = self.alloc_caller_location_for_span(span);
- self.write_immediate(location.to_ref(self), dest)?;
+ let val = self.tcx.span_as_caller_location(span);
+ let val =
+ self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
+ self.copy_op(&val, dest, /* allow_transmute */ false)?;
}
sym::min_align_of_val | sym::size_of_val => {
@@ -164,7 +164,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::type_name => Ty::new_static_str(self.tcx.tcx),
_ => bug!(),
};
- let val = self.ctfe_query(None, |tcx| {
+ let val = self.ctfe_query(|tcx| {
tcx.const_eval_global_id(self.param_env, gid, Some(tcx.span))
})?;
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
@@ -218,7 +218,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::discriminant_value => {
let place = self.deref_pointer(&args[0])?;
let variant = self.read_discriminant(&place)?;
- let discr = self.discriminant_for_variant(place.layout, variant)?;
+ let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
self.write_immediate(*discr, dest)?;
}
sym::exact_div => {
@@ -349,10 +349,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Check that the range between them is dereferenceable ("in-bounds or one past the
// end of the same allocation"). This is like the check in ptr_offset_inbounds.
let min_ptr = if dist >= 0 { b } else { a };
- self.check_ptr_access_align(
+ self.check_ptr_access(
min_ptr,
Size::from_bytes(dist.unsigned_abs()),
- Align::ONE,
CheckInAllocMsg::OffsetFromTest,
)?;
@@ -500,6 +499,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
b: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
+ assert_eq!(a.layout.ty, b.layout.ty);
+ assert!(matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
+
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
@@ -522,7 +524,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
l: &ImmTy<'tcx, M::Provenance>,
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ assert_eq!(l.layout.ty, r.layout.ty);
+ assert!(matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
+
let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let size = l.layout.size;
@@ -565,16 +570,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn ptr_offset_inbounds(
&self,
ptr: Pointer<Option<M::Provenance>>,
- pointee_ty: Ty<'tcx>,
- offset_count: i64,
+ offset_bytes: i64,
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
- // We cannot overflow i64 as a type's size must be <= isize::MAX.
- let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
- // The computed offset, in bytes, must not overflow an isize.
- // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
- // the difference to be noticeable.
- let offset_bytes =
- offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
// The offset being in bounds cannot rely on "wrapping around" the address space.
// So, first rule out overflows in the pointer arithmetic.
let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
@@ -583,10 +580,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// pointers to be properly aligned (unlike a read/write operation).
let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
// This call handles checking for integer/null pointers.
- self.check_ptr_access_align(
+ self.check_ptr_access(
min_ptr,
Size::from_bytes(offset_bytes.unsigned_abs()),
- Align::ONE,
CheckInAllocMsg::PointerArithmeticTest,
)?;
Ok(offset_ptr)
@@ -615,7 +611,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let src = self.read_pointer(src)?;
let dst = self.read_pointer(dst)?;
- self.mem_copy(src, align, dst, align, size, nonoverlapping)
+ self.check_ptr_align(src, align)?;
+ self.check_ptr_align(dst, align)?;
+
+ self.mem_copy(src, dst, size, nonoverlapping)
}
pub(crate) fn write_bytes_intrinsic(
@@ -671,7 +670,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
size|
-> InterpResult<'tcx, &[u8]> {
let ptr = this.read_pointer(op)?;
- let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
// zero-sized access
return Ok(&[]);
};
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
deleted file mode 100644
index 948bec746..000000000
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ /dev/null
@@ -1,128 +0,0 @@
-use rustc_ast::Mutability;
-use rustc_hir::lang_items::LangItem;
-use rustc_middle::mir::TerminatorKind;
-use rustc_middle::ty::layout::LayoutOf;
-use rustc_span::{Span, Symbol};
-
-use crate::interpret::{
- intrinsics::{InterpCx, Machine},
- MPlaceTy, MemoryKind, Scalar,
-};
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
- /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
- /// frame which is not `#[track_caller]`.
- pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
- for frame in self.stack().iter().rev() {
- debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
-
- // Assert that the frame we look at is actually executing code currently
- // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
- let loc = frame.loc.left().unwrap();
-
- // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
- // (such as `box`). Use the normal span by default.
- let mut source_info = *frame.body.source_info(loc);
-
- // If this is a `Call` terminator, use the `fn_span` instead.
- let block = &frame.body.basic_blocks[loc.block];
- if loc.statement_index == block.statements.len() {
- debug!(
- "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
- block.terminator(),
- block.terminator().kind
- );
- if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
- source_info.span = fn_span;
- }
- }
-
- // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
- // If so, the starting `source_info.span` is in the innermost inlined
- // function, and will be replaced with outer callsite spans as long
- // as the inlined functions were `#[track_caller]`.
- loop {
- let scope_data = &frame.body.source_scopes[source_info.scope];
-
- if let Some((callee, callsite_span)) = scope_data.inlined {
- // Stop inside the most nested non-`#[track_caller]` function,
- // before ever reaching its caller (which is irrelevant).
- if !callee.def.requires_caller_location(*self.tcx) {
- return source_info.span;
- }
- source_info.span = callsite_span;
- }
-
- // Skip past all of the parents with `inlined: None`.
- match scope_data.inlined_parent_scope {
- Some(parent) => source_info.scope = parent,
- None => break,
- }
- }
-
- // Stop inside the most nested non-`#[track_caller]` function,
- // before ever reaching its caller (which is irrelevant).
- if !frame.instance.def.requires_caller_location(*self.tcx) {
- return source_info.span;
- }
- }
-
- span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
- }
-
- /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
- pub(crate) fn alloc_caller_location(
- &mut self,
- filename: Symbol,
- line: u32,
- col: u32,
- ) -> MPlaceTy<'tcx, M::Provenance> {
- let loc_details = self.tcx.sess.opts.unstable_opts.location_detail;
- // This can fail if rustc runs out of memory right here. Trying to emit an error would be
- // pointless, since that would require allocating more memory than these short strings.
- let file = if loc_details.file {
- self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
- .unwrap()
- } else {
- // FIXME: This creates a new allocation each time. It might be preferable to
- // perform this allocation only once, and re-use the `MPlaceTy`.
- // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
- self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
- };
- let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
- let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
-
- // Allocate memory for `CallerLocation` struct.
- let loc_ty = self
- .tcx
- .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
- .instantiate(*self.tcx, self.tcx.mk_args(&[self.tcx.lifetimes.re_erased.into()]));
- let loc_layout = self.layout_of(loc_ty).unwrap();
- let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
-
- // Initialize fields.
- self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap())
- .expect("writing to memory we just allocated cannot fail");
- self.write_scalar(line, &self.project_field(&location, 1).unwrap())
- .expect("writing to memory we just allocated cannot fail");
- self.write_scalar(col, &self.project_field(&location, 2).unwrap())
- .expect("writing to memory we just allocated cannot fail");
-
- location
- }
-
- pub(crate) fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
- let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
- let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
- (
- Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
- u32::try_from(caller.line).unwrap(),
- u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
- )
- }
-
- pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::Provenance> {
- let (file, line, column) = self.location_triple_for_span(span);
- self.alloc_caller_location(file, line, column)
- }
-}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index aaa674a59..61fe9151d 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -6,16 +6,15 @@ use std::borrow::{Borrow, Cow};
use std::fmt::Debug;
use std::hash::Hash;
+use rustc_apfloat::{Float, FloatConvert};
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
-use rustc_target::abi::{Align, Size};
+use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi as CallAbi;
-use crate::const_eval::CheckAlignment;
-
use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
@@ -134,7 +133,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
const POST_MONO_CHECKS: bool = true;
/// Whether memory accesses should be alignment-checked.
- fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment;
+ fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
/// Whether, when checking alignment, we should look at the actual address and thus support
/// custom alignment logic based on whatever the integer address happens to be.
@@ -142,13 +141,6 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// If this returns true, Provenance::OFFSET_IS_ADDR must be true.
fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
- fn alignment_check_failed(
- ecx: &InterpCx<'mir, 'tcx, Self>,
- has: Align,
- required: Align,
- check: CheckAlignment,
- ) -> InterpResult<'tcx, ()>;
-
/// Whether to enforce the validity invariant for a specific layout.
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool;
@@ -240,6 +232,16 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
+ /// Generate the NaN returned by a float operation, given the list of inputs.
+ /// (This is all inputs, not just NaN inputs!)
+ fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _inputs: &[F1],
+ ) -> F2 {
+ // By default we always return the preferred NaN.
+ F2::NAN
+ }
+
/// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked
/// for ZST reads.
@@ -434,6 +436,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
place: &PlaceTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx> {
// Without an aliasing model, all we can do is put `Uninit` into the place.
+ // Conveniently this also ensures that the place actually points to suitable memory.
ecx.write_uninit(place)
}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 436c4d521..16905e93b 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -18,13 +18,12 @@ use rustc_middle::mir::display_allocation;
use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
use rustc_target::abi::{Align, HasDataLayout, Size};
-use crate::const_eval::CheckAlignment;
use crate::fluent_generated as fluent;
use super::{
- alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg,
- GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance,
- Scalar,
+ alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg,
+ CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Misalignment, Pointer,
+ PointerArithmetic, Provenance, Scalar,
};
#[derive(Debug, PartialEq, Copy, Clone)]
@@ -259,14 +258,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
None => self.get_alloc_raw(alloc_id)?.size(),
};
// This will also call the access hooks.
- self.mem_copy(
- ptr,
- Align::ONE,
- new_ptr.into(),
- Align::ONE,
- old_size.min(new_size),
- /*nonoverlapping*/ true,
- )?;
+ self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
self.deallocate_ptr(ptr, old_size_and_align, kind)?;
Ok(new_ptr)
@@ -368,13 +360,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
self.check_and_deref_ptr(
ptr,
size,
- align,
- M::enforce_alignment(self),
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
let (size, align) = self
@@ -384,43 +373,31 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
}
- /// Check if the given pointer points to live memory of given `size` and `align`
- /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
- /// out-of-bounds case.
+ /// Check if the given pointer points to live memory of the given `size`.
+ /// The caller can control the error message for the out-of-bounds case.
#[inline(always)]
- pub fn check_ptr_access_align(
+ pub fn check_ptr_access(
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
- self.check_and_deref_ptr(
- ptr,
- size,
- align,
- CheckAlignment::Error,
- msg,
- |alloc_id, _, _| {
- let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
- Ok((size, align, ()))
- },
- )?;
+ self.check_and_deref_ptr(ptr, size, msg, |alloc_id, _, _| {
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
+ Ok((size, align, ()))
+ })?;
Ok(())
}
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, as the actual
- /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
- /// is done.
+ /// checking is offloaded to a helper closure.
///
/// If this returns `None`, the size is 0; it can however return `Some` even for size 0.
fn check_and_deref_ptr<T>(
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
- check: CheckAlignment,
msg: CheckInAllocMsg,
alloc_size: impl FnOnce(
AllocId,
@@ -435,14 +412,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if size.bytes() > 0 || addr == 0 {
throw_ub!(DanglingIntPointer(addr, msg));
}
- // Must be aligned.
- if check.should_check() {
- self.check_offset_align(addr, align, check)?;
- }
None
}
Ok((alloc_id, offset, prov)) => {
- let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
+ let (alloc_size, _alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
// Test bounds. This also ensures non-null.
// It is sufficient to check this for the end pointer. Also check for overflow!
if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
@@ -458,20 +431,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if M::Provenance::OFFSET_IS_ADDR {
assert_ne!(ptr.addr(), Size::ZERO);
}
- // Test align. Check this last; if both bounds and alignment are violated
- // we want the error to be about the bounds.
- if check.should_check() {
- if M::use_addr_for_alignment_check(self) {
- // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
- self.check_offset_align(ptr.addr().bytes(), align, check)?;
- } else {
- // Check allocation alignment and offset alignment.
- if alloc_align.bytes() < align.bytes() {
- M::alignment_check_failed(self, alloc_align, align, check)?;
- }
- self.check_offset_align(offset.bytes(), align, check)?;
- }
- }
// We can still be zero-sized in this branch, in which case we have to
// return `None`.
@@ -480,19 +439,65 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
})
}
- fn check_offset_align(
+ pub(super) fn check_misalign(
&self,
- offset: u64,
- align: Align,
- check: CheckAlignment,
+ misaligned: Option<Misalignment>,
+ msg: CheckAlignMsg,
) -> InterpResult<'tcx> {
- if offset % align.bytes() == 0 {
- Ok(())
- } else {
- // The biggest power of two through which `offset` is divisible.
- let offset_pow2 = 1 << offset.trailing_zeros();
- M::alignment_check_failed(self, Align::from_bytes(offset_pow2).unwrap(), align, check)
+ if let Some(misaligned) = misaligned {
+ throw_ub!(AlignmentCheckFailed(misaligned, msg))
}
+ Ok(())
+ }
+
+ pub(super) fn is_ptr_misaligned(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ align: Align,
+ ) -> Option<Misalignment> {
+ if !M::enforce_alignment(self) || align.bytes() == 1 {
+ return None;
+ }
+
+ #[inline]
+ fn offset_misalignment(offset: u64, align: Align) -> Option<Misalignment> {
+ if offset % align.bytes() == 0 {
+ None
+ } else {
+ // The biggest power of two through which `offset` is divisible.
+ let offset_pow2 = 1 << offset.trailing_zeros();
+ Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
+ }
+ }
+
+ match self.ptr_try_get_alloc_id(ptr) {
+ Err(addr) => offset_misalignment(addr, align),
+ Ok((alloc_id, offset, _prov)) => {
+ let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
+ if M::use_addr_for_alignment_check(self) {
+ // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
+ offset_misalignment(ptr.addr().bytes(), align)
+ } else {
+ // Check allocation alignment and offset alignment.
+ if alloc_align.bytes() < align.bytes() {
+ Some(Misalignment { has: alloc_align, required: align })
+ } else {
+ offset_misalignment(offset.bytes(), align)
+ }
+ }
+ }
+ }
+ }
+
+ /// Checks a pointer for misalignment.
+ ///
+ /// The error assumes this is checking the pointer used directly for an access.
+ pub fn check_ptr_align(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ align: Align,
+ ) -> InterpResult<'tcx> {
+ self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
}
}
@@ -536,7 +541,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// We don't give a span -- statics don't need that, they cannot be generic or associated.
- let val = self.ctfe_query(None, |tcx| tcx.eval_static_initializer(def_id))?;
+ let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
(val, Some(def_id))
}
};
@@ -550,17 +555,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
}
- /// Get the base address for the bytes in an `Allocation` specified by the
- /// `AllocID` passed in; error if no such allocation exists.
- ///
- /// It is up to the caller to take sufficient care when using this address:
- /// there could be provenance or uninit memory in there, and other memory
- /// accesses could invalidate the exposed pointer.
- pub fn alloc_base_addr(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
- let alloc = self.get_alloc_raw(id)?;
- Ok(alloc.base_addr())
- }
-
/// Gives raw access to the `Allocation`, without bounds or alignment checks.
/// The caller is responsible for calling the access hooks!
///
@@ -598,19 +592,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// "Safe" (bounds and align-checked) allocation access.
+ /// Bounds-checked *but not align-checked* allocation access.
pub fn get_ptr_alloc<'a>(
&'a self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
let ptr_and_alloc = self.check_and_deref_ptr(
ptr,
size,
- align,
- M::enforce_alignment(self),
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
let alloc = self.get_alloc_raw(alloc_id)?;
@@ -671,15 +662,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok((alloc, &mut self.machine))
}
- /// "Safe" (bounds and align-checked) allocation access.
+ /// Bounds-checked *but not align-checked* allocation access.
pub fn get_ptr_alloc_mut<'a>(
&'a mut self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
- let parts = self.get_ptr_access(ptr, size, align)?;
+ let parts = self.get_ptr_access(ptr, size)?;
if let Some((alloc_id, offset, prov)) = parts {
let tcx = *self.tcx;
// FIXME: can we somehow avoid looking up the allocation twice here?
@@ -1021,7 +1011,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
}
/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
- pub(crate) fn has_provenance(&self) -> bool {
+ pub fn has_provenance(&self) -> bool {
!self.alloc.provenance().range_empty(self.range, &self.tcx)
}
}
@@ -1036,7 +1026,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ptr: Pointer<Option<M::Provenance>>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
- let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
// zero-sized access
return Ok(&[]);
};
@@ -1062,7 +1052,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(lower, len, "can only write iterators with a precise length");
let size = Size::from_bytes(len);
- let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
+ let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
// zero-sized access
assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
return Ok(());
@@ -1087,29 +1077,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn mem_copy(
&mut self,
src: Pointer<Option<M::Provenance>>,
- src_align: Align,
dest: Pointer<Option<M::Provenance>>,
- dest_align: Align,
size: Size,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
- self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
+ self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
}
pub fn mem_copy_repeatedly(
&mut self,
src: Pointer<Option<M::Provenance>>,
- src_align: Align,
dest: Pointer<Option<M::Provenance>>,
- dest_align: Align,
size: Size,
num_copies: u64,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
let tcx = self.tcx;
// We need to do our own bounds-checks.
- let src_parts = self.get_ptr_access(src, size, src_align)?;
- let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
+ let src_parts = self.get_ptr_access(src, size)?;
+ let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
// FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
// and once below to get the underlying `&[mut] Allocation`.
@@ -1249,6 +1235,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Turning a "maybe pointer" into a proper pointer (and some information
/// about where it points), or an absolute address.
+ ///
+ /// The result must be used immediately; it is not allowed to convert
+ /// the returned data back into a `Pointer` and store that in machine state.
+ /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
+ /// we don't have an operation to turn it back into `M::Provenance`.)
pub fn ptr_try_get_alloc_id(
&self,
ptr: Pointer<Option<M::Provenance>>,
@@ -1267,6 +1258,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
/// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
+ ///
+ /// The result must be used immediately; it is not allowed to convert
+ /// the returned data back into a `Pointer` and store that in machine state.
+ /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
+ /// we don't have an operation to turn it back into `M::Provenance`.)
#[inline(always)]
pub fn ptr_get_alloc_id(
&self,
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 69eb22028..7d286d103 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -21,12 +21,14 @@ mod visitor;
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
pub use self::eval_context::{Frame, FrameInfo, InterpCx, StackPopCleanup};
-pub use self::intern::{intern_const_alloc_recursive, InternKind};
+pub use self::intern::{
+ intern_const_alloc_for_constprop, intern_const_alloc_recursive, InternKind,
+};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
-pub use self::projection::Projectable;
+pub use self::projection::{OffsetMode, Projectable};
pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking};
pub use self::visitor::ValueVisitor;
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index a32ea204f..255dd1eba 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -10,11 +10,12 @@ use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty};
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
+use rustc_target::abi::{self, Abi, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult,
- MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable, Provenance, Scalar,
+ MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy, Pointer, Projectable,
+ Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -43,12 +44,16 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
}
impl<Prov: Provenance> Immediate<Prov> {
- pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_pointer(ptr, cx))
- }
-
- pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_maybe_pointer(ptr, cx))
+ pub fn new_pointer_with_meta(
+ ptr: Pointer<Option<Prov>>,
+ meta: MemPlaceMeta<Prov>,
+ cx: &impl HasDataLayout,
+ ) -> Self {
+ let ptr = Scalar::from_maybe_pointer(ptr, cx);
+ match meta {
+ MemPlaceMeta::None => Immediate::from(ptr),
+ MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(ptr, meta),
+ }
}
pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
@@ -102,10 +107,10 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter
fn p<'a, 'tcx, Prov: Provenance>(
- cx: FmtPrinter<'a, 'tcx>,
+ cx: &mut FmtPrinter<'a, 'tcx>,
s: Scalar<Prov>,
ty: Ty<'tcx>,
- ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
+ ) -> Result<(), std::fmt::Error> {
match s {
Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true),
Scalar::Ptr(ptr, _sz) => {
@@ -120,8 +125,9 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
match self.imm {
Immediate::Scalar(s) => {
if let Some(ty) = tcx.lift(self.layout.ty) {
- let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
- f.write_str(&p(cx, s, ty)?.into_buffer())?;
+ let s =
+ FmtPrinter::print_string(tcx, Namespace::ValueNS, |cx| p(cx, s, ty))?;
+ f.write_str(&s)?;
return Ok(());
}
write!(f, "{:x}: {}", s, self.layout.ty)
@@ -163,6 +169,16 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
ImmTy { imm: val.into(), layout }
}
+ #[inline]
+ pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(
+ matches!(layout.abi, Abi::ScalarPair(..)),
+ "`ImmTy::from_scalar_pair` on non-scalar-pair layout"
+ );
+ let imm = Immediate::ScalarPair(a, b);
+ ImmTy { imm, layout }
+ }
+
#[inline(always)]
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(
@@ -219,6 +235,17 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
/// given layout.
// Not called `offset` to avoid confusion with the trait method.
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+ debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
+ // `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
+ // remains in-bounds. This cannot actually be violated since projections are type-checked
+ // and bounds-checked.
+ assert!(
+ offset + layout.size <= self.layout.size,
+ "attempting to project to field at offset {} with size {} into immediate with layout {:#?}",
+ offset.bytes(),
+ layout.size.bytes(),
+ self.layout,
+ );
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let inner_val: Immediate<_> = match (**self, self.layout.abi) {
@@ -286,6 +313,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ _mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
@@ -315,14 +343,6 @@ pub(super) enum Operand<Prov: Provenance = AllocId> {
pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
op: Operand<Prov>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for an `OpTy`!
- /// `None` means "alignment does not matter since this is a by-value operand"
- /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
- /// Also CTFE ignores alignment anyway, so this is for Miri only.
- pub align: Option<Align>,
}
impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
@@ -338,18 +358,14 @@ impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+ OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
}
}
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- OpTy {
- op: Operand::Indirect(*mplace.mplace()),
- layout: mplace.layout,
- align: Some(mplace.align),
- }
+ OpTy { op: Operand::Indirect(*mplace.mplace()), layout: mplace.layout }
}
}
@@ -380,14 +396,14 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
match self.as_mplace_or_imm() {
- Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()),
+ Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()),
Right(imm) => {
- debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
// Every part of an uninit is uninit.
Ok(imm.offset_(offset, layout, ecx).into())
@@ -622,7 +638,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_inval!(ConstPropNonsense);
}
}
- Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ Ok(OpTy { op, layout })
}
/// Every place can be read from, so we can turn them into an operand.
@@ -637,16 +653,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Right((frame, local, offset)) => {
debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
let base = self.local_to_op(&self.stack()[frame], local, None)?;
- let mut field = match offset {
+ Ok(match offset {
Some(offset) => base.offset(offset, place.layout, self)?,
None => {
// In the common case this hasn't been projected.
debug_assert_eq!(place.layout, base.layout);
base
}
- };
- field.align = Some(place.align);
- Ok(field)
+ })
}
}
}
@@ -670,19 +684,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("eval_place_to_op: got {:?}", op);
// Sanity-check the type we ended up with.
- debug_assert!(
- mir_assign_valid_types(
+ if cfg!(debug_assertions) {
+ let normalized_place_ty = self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ )?;
+ if !mir_assign_valid_types(
*self.tcx,
self.param_env,
- self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
- )?)?,
+ self.layout_of(normalized_place_ty)?,
op.layout,
- ),
- "eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
- op.layout.ty,
- );
+ ) {
+ span_bug!(
+ self.cur_span(),
+ "eval_place of a MIR place with type {} produced an interpreter operand with type {}",
+ normalized_place_ty,
+ op.layout.ty,
+ )
+ }
+ }
Ok(op)
}
@@ -729,27 +748,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
})
};
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
- let op = match val_val {
+ let imm = match val_val {
mir::ConstValue::Indirect { alloc_id, offset } => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen.
let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?;
- Operand::Indirect(MemPlace::from_ptr(ptr.into()))
+ return Ok(self.ptr_to_mplace(ptr.into(), layout).into());
}
- mir::ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
- mir::ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+ mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(),
+ mir::ConstValue::ZeroSized => Immediate::Uninit,
mir::ConstValue::Slice { data, meta } => {
// We rely on mutability being set correctly in `data` to prevent writes
// where none should happen.
let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO);
- Operand::Immediate(Immediate::new_slice(
- self.global_base_pointer(ptr)?.into(),
- meta,
- self,
- ))
+ Immediate::new_slice(self.global_base_pointer(ptr)?.into(), meta, self)
}
};
- Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ Ok(OpTy { op: Operand::Immediate(imm), layout })
}
}
@@ -762,6 +777,6 @@ mod size_asserts {
static_assert_size!(Immediate, 48);
static_assert_size!(ImmTy<'_>, 64);
static_assert_size!(Operand, 56);
- static_assert_size!(OpTy<'_>, 80);
+ static_assert_size!(OpTy<'_>, 72);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index b084864f3..a3ba9530f 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,7 +1,7 @@
-use rustc_apfloat::Float;
+use rustc_apfloat::{Float, FloatConvert};
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
@@ -104,7 +104,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(ImmTy::from_bool(res, *self.tcx), false)
}
- fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
+ fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
layout: TyAndLayout<'tcx>,
@@ -113,6 +113,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
+ // Performs appropriate non-deterministic adjustments of NaN results.
+ let adjust_nan = |f: F| -> F {
+ if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f }
+ };
+
let val = match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
@@ -120,11 +125,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => ImmTy::from_bool(l >= r, *self.tcx),
- Add => ImmTy::from_scalar((l + r).value.into(), layout),
- Sub => ImmTy::from_scalar((l - r).value.into(), layout),
- Mul => ImmTy::from_scalar((l * r).value.into(), layout),
- Div => ImmTy::from_scalar((l / r).value.into(), layout),
- Rem => ImmTy::from_scalar((l % r).value.into(), layout),
+ Add => ImmTy::from_scalar(adjust_nan((l + r).value).into(), layout),
+ Sub => ImmTy::from_scalar(adjust_nan((l - r).value).into(), layout),
+ Mul => ImmTy::from_scalar(adjust_nan((l * r).value).into(), layout),
+ Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
+ Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
(val, false)
@@ -332,7 +337,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let offset_count = right.to_scalar().to_target_isize(self)?;
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
- let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
+ // We cannot overflow i64 as a type's size must be <= isize::MAX.
+ let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ // The computed offset, in bytes, must not overflow an isize.
+ // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
+ // the difference to be noticeable.
+ let offset_bytes =
+ offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+
+ let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
Ok((
ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
false,
@@ -456,6 +469,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok((ImmTy::from_bool(res, *self.tcx), false))
}
ty::Float(fty) => {
+ // No NaN adjustment here, `-` is a bitwise operation!
let res = match (un_op, fty) {
(Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 503004cbb..09ffdec7d 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -15,9 +15,9 @@ use rustc_middle::ty::Ty;
use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{
- alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, ImmTy,
- Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, Pointer,
- PointerArithmetic, Projectable, Provenance, Readable, Scalar,
+ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckAlignMsg, ImmTy,
+ Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy,
+ Operand, Pointer, PointerArithmetic, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -57,19 +57,11 @@ pub(super) struct MemPlace<Prov: Provenance = AllocId> {
/// Must not be present for sized types, but can be missing for unsized types
/// (e.g., `extern type`).
pub meta: MemPlaceMeta<Prov>,
+ /// Stores whether this place was created based on a sufficiently aligned pointer.
+ misaligned: Option<Misalignment>,
}
impl<Prov: Provenance> MemPlace<Prov> {
- #[inline(always)]
- pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
- MemPlace { ptr, meta: MemPlaceMeta::None }
- }
-
- #[inline(always)]
- pub fn from_ptr_with_meta(ptr: Pointer<Option<Prov>>, meta: MemPlaceMeta<Prov>) -> Self {
- MemPlace { ptr, meta }
- }
-
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
@@ -78,27 +70,32 @@ impl<Prov: Provenance> MemPlace<Prov> {
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
#[inline]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
- match self.meta {
- MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
- MemPlaceMeta::Meta(meta) => {
- Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx), meta)
- }
- }
+ Immediate::new_pointer_with_meta(self.ptr, self.meta, cx)
}
#[inline]
// Not called `offset_with_meta` to avoid confusion with the trait method.
- fn offset_with_meta_<'tcx>(
+ fn offset_with_meta_<'mir, 'tcx, M: Machine<'mir, 'tcx, Provenance = Prov>>(
self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
debug_assert!(
!meta.has_meta() || self.meta.has_meta(),
"cannot use `offset_with_meta` to add metadata to a place"
);
- Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
+ if offset > ecx.data_layout().max_size_of_val() {
+ throw_ub!(PointerArithOverflow);
+ }
+ let ptr = match mode {
+ OffsetMode::Inbounds => {
+ ecx.ptr_offset_inbounds(self.ptr, offset.bytes().try_into().unwrap())?
+ }
+ OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
+ };
+ Ok(MemPlace { ptr, meta, misaligned: self.misaligned })
}
}
@@ -107,11 +104,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
mplace: MemPlace<Prov>,
pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `MPlaceTy`!
- pub align: Align,
}
impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
@@ -133,25 +125,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
assert!(layout.is_zst());
let align = layout.align.abi;
let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
- MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
- }
-
- #[inline]
- pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
- MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
- }
-
- #[inline]
- pub fn from_aligned_ptr_with_meta(
- ptr: Pointer<Option<Prov>>,
- layout: TyAndLayout<'tcx>,
- meta: MemPlaceMeta<Prov>,
- ) -> Self {
- MPlaceTy {
- mplace: MemPlace::from_ptr_with_meta(ptr, meta),
- layout,
- align: layout.align.abi,
- }
+ MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None, misaligned: None }, layout }
}
/// Adjust the provenance of the main pointer (metadata is unaffected).
@@ -189,15 +163,12 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
- Ok(MPlaceTy {
- mplace: self.mplace.offset_with_meta_(offset, meta, ecx)?,
- align: self.align.restrict_for_offset(offset),
- layout,
- })
+ Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout })
}
fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
@@ -228,11 +199,6 @@ pub(super) enum Place<Prov: Provenance = AllocId> {
pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
place: Place<Prov>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `PlaceTy`!
- pub align: Align,
}
impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
@@ -248,7 +214,7 @@ impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout, align: mplace.align }
+ PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
}
}
@@ -264,7 +230,7 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
&self,
) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
match self.place {
- Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout }),
Place::Local { frame, local, offset } => Right((frame, local, offset)),
}
}
@@ -301,27 +267,27 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(match self.as_mplace_or_local() {
- Left(mplace) => mplace.offset_with_meta(offset, meta, layout, ecx)?.into(),
+ Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
Right((frame, local, old_offset)) => {
debug_assert!(layout.is_sized(), "unsized locals should live in memory");
assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
- let new_offset = ecx
- .data_layout()
- .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
- PlaceTy {
- place: Place::Local {
- frame,
- local,
- offset: Some(Size::from_bytes(new_offset)),
- },
- align: self.align.restrict_for_offset(offset),
- layout,
- }
+ // `Place::Local` are always in-bounds of their surrounding local, so we can just
+ // check directly if this remains in-bounds. This cannot actually be violated since
+ // projections are type-checked and bounds-checked.
+ assert!(offset + layout.size <= self.layout.size);
+
+ let new_offset = Size::from_bytes(
+ ecx.data_layout()
+ .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?,
+ );
+
+ PlaceTy { place: Place::Local { frame, local, offset: Some(new_offset) }, layout }
}
})
}
@@ -339,9 +305,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
match self.op() {
- Operand::Indirect(mplace) => {
- Left(MPlaceTy { mplace: *mplace, layout: self.layout, align: self.align.unwrap() })
- }
+ Operand::Indirect(mplace) => Left(MPlaceTy { mplace: *mplace, layout: self.layout }),
Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
}
}
@@ -362,7 +326,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
fn as_mplace_or_local(
&self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>;
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, TyAndLayout<'tcx>)>;
fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
@@ -374,10 +338,9 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
- {
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, TyAndLayout<'tcx>)> {
self.as_mplace_or_local()
- .map_right(|(frame, local, offset)| (frame, local, offset, self.align, self.layout))
+ .map_right(|(frame, local, offset)| (frame, local, offset, self.layout))
}
#[inline(always)]
@@ -393,8 +356,7 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
- {
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, TyAndLayout<'tcx>)> {
Left(self.clone())
}
@@ -413,6 +375,25 @@ where
Prov: Provenance,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
+ pub fn ptr_with_meta_to_mplace(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ meta: MemPlaceMeta<M::Provenance>,
+ layout: TyAndLayout<'tcx>,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ let misaligned = self.is_ptr_misaligned(ptr, layout.align.abi);
+ MPlaceTy { mplace: MemPlace { ptr, meta, misaligned }, layout }
+ }
+
+ pub fn ptr_to_mplace(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ layout: TyAndLayout<'tcx>,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ assert!(layout.is_sized());
+ self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout)
+ }
+
/// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
///
@@ -434,7 +415,8 @@ where
// `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
// we hence can't call `size_and_align_of` since that asserts more validity than we want.
- Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.to_pointer(self)?, layout, meta))
+ let ptr = ptr.to_pointer(self)?;
+ Ok(self.ptr_with_meta_to_mplace(ptr, meta, layout))
}
/// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
@@ -464,7 +446,6 @@ where
}
let mplace = self.ref_to_mplace(&val)?;
- self.check_mplace(&mplace)?;
Ok(mplace)
}
@@ -477,8 +458,11 @@ where
let (size, _align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- // Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc(mplace.ptr(), size, mplace.align)
+ // We check alignment separately, and *after* checking everything else.
+ // If an access is both OOB and misaligned, we want to see the bounds error.
+ let a = self.get_ptr_alloc(mplace.ptr(), size)?;
+ self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+ Ok(a)
}
#[inline]
@@ -490,20 +474,13 @@ where
let (size, _align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- // Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc_mut(mplace.ptr(), size, mplace.align)
- }
-
- /// Check if this mplace is dereferenceable and sufficiently aligned.
- pub fn check_mplace(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
- let (size, _align) = self
- .size_and_align_of_mplace(&mplace)?
- .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- // Due to packed places, only `mplace.align` matters.
- let align =
- if M::enforce_alignment(self).should_check() { mplace.align } else { Align::ONE };
- self.check_ptr_access_align(mplace.ptr(), size, align, CheckInAllocMsg::DerefTest)?;
- Ok(())
+ // We check alignment separately, and raise that error *after* checking everything else.
+ // If an access is both OOB and misaligned, we want to see the bounds error.
+ // However we have to call `check_misalign` first to make the borrow checker happy.
+ let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
+ let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
+ misalign_err?;
+ Ok(a)
}
/// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
@@ -518,8 +495,8 @@ where
let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
let array = Ty::new_array(self.tcx.tcx, e_ty, len);
let layout = self.layout_of(array)?;
- assert_eq!(layout.size, mplace.layout.size);
- Ok((MPlaceTy { layout, ..*mplace }, len))
+ let mplace = mplace.transmute(layout, self)?;
+ Ok((mplace, len))
}
/// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
@@ -555,7 +532,7 @@ where
Operand::Indirect(mplace) => Place::Ptr(*mplace),
}
};
- Ok(PlaceTy { place, layout, align: layout.align.abi })
+ Ok(PlaceTy { place, layout })
}
/// Computes a place. You should only use this if you intend to write into this
@@ -573,19 +550,24 @@ where
trace!("{:?}", self.dump_place(&place));
// Sanity-check the type we ended up with.
- debug_assert!(
- mir_assign_valid_types(
+ if cfg!(debug_assertions) {
+ let normalized_place_ty = self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ )?;
+ if !mir_assign_valid_types(
*self.tcx,
self.param_env,
- self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
- )?)?,
+ self.layout_of(normalized_place_ty)?,
place.layout,
- ),
- "eval_place of a MIR place with type {:?} produced an interpreter place with type {}",
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
- place.layout.ty,
- );
+ ) {
+ span_bug!(
+ self.cur_span(),
+ "eval_place of a MIR place with type {} produced an interpreter place with type {}",
+ normalized_place_ty,
+ place.layout.ty,
+ )
+ }
+ }
Ok(place)
}
@@ -640,7 +622,7 @@ where
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
// but not factored as a separate function.
let mplace = match dest.as_mplace_or_local() {
- Right((frame, local, offset, align, layout)) => {
+ Right((frame, local, offset, layout)) => {
if offset.is_some() {
// This has been projected to a part of this local. We could have complicated
// logic to still keep this local as an `Operand`... but it's much easier to
@@ -681,7 +663,7 @@ where
}
Operand::Indirect(mplace) => {
// The local is in memory, go on below.
- MPlaceTy { mplace: *mplace, align, layout }
+ MPlaceTy { mplace: *mplace, layout }
}
}
}
@@ -690,7 +672,7 @@ where
};
// This is already in memory, write there.
- self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.align, mplace.mplace)
+ self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)
}
/// Write an immediate to memory.
@@ -700,7 +682,6 @@ where
&mut self,
value: Immediate<M::Provenance>,
layout: TyAndLayout<'tcx>,
- align: Align,
dest: MemPlace<M::Provenance>,
) -> InterpResult<'tcx> {
// Note that it is really important that the type here is the right one, and matches the
@@ -709,9 +690,7 @@ where
// wrong type.
let tcx = *self.tcx;
- let Some(mut alloc) =
- self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })?
- else {
+ let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
// zero-sized access
return Ok(());
};
@@ -729,9 +708,6 @@ where
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
- // We checked `ptr_align` above, so all fields will have the alignment they need.
- // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
- // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let Abi::ScalarPair(a, b) = layout.abi else {
span_bug!(
self.cur_span(),
@@ -760,7 +736,7 @@ where
) -> InterpResult<'tcx> {
let mplace = match dest.as_mplace_or_local() {
Left(mplace) => mplace,
- Right((frame, local, offset, align, layout)) => {
+ Right((frame, local, offset, layout)) => {
if offset.is_some() {
// This has been projected to a part of this local. We could have complicated
// logic to still keep this local as an `Operand`... but it's much easier to
@@ -776,7 +752,7 @@ where
}
Operand::Indirect(mplace) => {
// The local is in memory, go on below.
- MPlaceTy { mplace: *mplace, layout, align }
+ MPlaceTy { mplace: *mplace, layout }
}
}
}
@@ -869,7 +845,6 @@ where
self.write_immediate_to_mplace_no_validate(
*src_val,
src.layout(),
- dest_mem.align,
dest_mem.mplace,
)
};
@@ -896,14 +871,12 @@ where
// type does not have Scalar/ScalarPair layout.
// (Or as the `Assign` docs put it, assignments "not producing primitives" must be
// non-overlapping.)
- self.mem_copy(
- src.ptr(),
- src.align,
- dest.ptr(),
- dest.align,
- dest_size,
- /*nonoverlapping*/ true,
- )
+ // We check alignment separately, and *after* checking everything else.
+ // If an access is both OOB and misaligned, we want to see the bounds error.
+ self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
+ self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+ self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+ Ok(())
}
/// Ensures that a place is in memory, and returns where it is.
@@ -937,7 +910,6 @@ where
self.write_immediate_to_mplace_no_validate(
local_val,
local_layout,
- local_layout.align.abi,
mplace.mplace,
)?;
}
@@ -952,7 +924,13 @@ where
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
};
if let Some(offset) = offset {
- whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
+ // This offset is always inbounds, no need to check it again.
+ whole_local.offset_with_meta_(
+ offset,
+ OffsetMode::Wrapping,
+ MemPlaceMeta::None,
+ self,
+ )?
} else {
// Preserve wide place metadata, do not call `offset`.
whole_local
@@ -961,7 +939,7 @@ where
Place::Ptr(mplace) => mplace,
};
// Return with the original layout and align, so that the caller can go on
- Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
+ Ok(MPlaceTy { mplace, layout: place.layout })
}
pub fn allocate_dyn(
@@ -974,7 +952,7 @@ where
span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
};
let ptr = self.allocate_ptr(size, align, kind)?;
- Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), layout, meta))
+ Ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout))
}
pub fn allocate(
@@ -986,7 +964,7 @@ where
self.allocate_dyn(layout, kind, MemPlaceMeta::None)
}
- /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
+ /// Returns a wide MPlace of type `str` to a new 1-aligned allocation.
pub fn allocate_str(
&mut self,
str: &str,
@@ -995,15 +973,8 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
- let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
-
- let ty = Ty::new_ref(
- self.tcx.tcx,
- self.tcx.lifetimes.re_static,
- ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
- );
- let layout = self.layout_of(ty).unwrap();
- Ok(MPlaceTy { mplace, layout, align: layout.align.abi })
+ let layout = self.layout_of(self.tcx.types.str_).unwrap();
+ Ok(self.ptr_with_meta_to_mplace(ptr.into(), MemPlaceMeta::Meta(meta), layout))
}
/// Writes the aggregate to the destination.
@@ -1042,7 +1013,7 @@ where
let _ = self.tcx.global_alloc(raw.alloc_id);
let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
let layout = self.layout_of(raw.ty)?;
- Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ Ok(self.ptr_to_mplace(ptr.into(), layout))
}
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
@@ -1058,12 +1029,10 @@ where
let vtable = mplace.meta().unwrap_meta().to_pointer(self)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
-
- let mplace = MPlaceTy {
- mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace },
- layout,
- align: layout.align.abi,
- };
+ // This is a kind of transmute, from a place with unsized type and metadata to
+ // a place with sized type and no metadata.
+ let mplace =
+ MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace }, layout };
Ok((mplace, vtable))
}
@@ -1095,10 +1064,10 @@ mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
// tidy-alphabetical-start
- static_assert_size!(MemPlace, 40);
+ static_assert_size!(MemPlace, 48);
static_assert_size!(MemPlaceMeta, 24);
static_assert_size!(MPlaceTy<'_>, 64);
- static_assert_size!(Place, 40);
+ static_assert_size!(Place, 48);
static_assert_size!(PlaceTy<'_>, 64);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 70df3d8fd..6694c43c9 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -19,6 +19,15 @@ use rustc_target::abi::{self, VariantIdx};
use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
+/// Describes the constraints placed on offset-projections.
+#[derive(Copy, Clone, Debug)]
+pub enum OffsetMode {
+ /// The offset has to be inbounds, like `ptr::offset`.
+ Inbounds,
+ /// No constraints, just wrap around the edge of the address space.
+ Wrapping,
+}
+
/// A thing that we can project into, and that has a layout.
pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
/// Get the layout.
@@ -53,12 +62,12 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self>;
- #[inline]
fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
@@ -66,10 +75,9 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, ecx)
+ self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx)
}
- #[inline]
fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
layout: TyAndLayout<'tcx>,
@@ -77,7 +85,7 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
) -> InterpResult<'tcx, Self> {
assert!(self.layout().is_sized() && layout.is_sized());
assert_eq!(self.layout().size, layout.size);
- self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, ecx)
+ self.offset_with_meta(Size::ZERO, OffsetMode::Wrapping, MemPlaceMeta::None, layout, ecx)
}
/// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
@@ -104,7 +112,17 @@ impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Option<(u64, P)>> {
let Some(idx) = self.range.next() else { return Ok(None) };
- Ok(Some((idx, self.base.offset(self.stride * idx, self.field_layout, ecx)?)))
+ // We use `Wrapping` here since the offset has already been checked when the iterator was created.
+ Ok(Some((
+ idx,
+ self.base.offset_with_meta(
+ self.stride * idx,
+ OffsetMode::Wrapping,
+ MemPlaceMeta::None,
+ self.field_layout,
+ ecx,
+ )?,
+ )))
}
}
@@ -159,7 +177,7 @@ where
(MemPlaceMeta::None, offset)
};
- base.offset_with_meta(offset, meta, field_layout, self)
+ base.offset_with_meta(offset, OffsetMode::Inbounds, meta, field_layout, self)
}
/// Downcasting to an enum variant.
@@ -248,6 +266,10 @@ where
};
let len = base.len(self)?;
let field_layout = base.layout().field(self, 0);
+ // Ensure that all the offsets are in-bounds once, up-front.
+ debug!("project_array_fields: {base:?} {len}");
+ base.offset(len * stride, self.layout_of(self.tcx.types.unit).unwrap(), self)?;
+ // Create the iterator.
Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
}
@@ -305,7 +327,7 @@ where
};
let layout = self.layout_of(ty)?;
- base.offset_with_meta(from_offset, meta, layout, self)
+ base.offset_with_meta(from_offset, OffsetMode::Inbounds, meta, layout, self)
}
/// Applying a general projection
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 284e13407..b6993d939 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -206,15 +206,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let elem_size = first.layout.size;
let first_ptr = first.ptr();
let rest_ptr = first_ptr.offset(elem_size, self)?;
- // For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
- // that place might be more aligned than its type mandates (a `u8` array could
- // be 4-aligned if it sits at the right spot in a struct). We have to also factor
- // in element size.
+ // No alignment requirement since `copy_op` above already checked it.
self.mem_copy_repeatedly(
first_ptr,
- dest.align,
rest_ptr,
- dest.align.restrict_for_offset(elem_size),
elem_size,
length - 1,
/*nonoverlapping:*/ true,
@@ -268,7 +263,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
NullaryOp(ref null_op, ty) => {
let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
let layout = self.layout_of(ty)?;
- if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op && layout.is_unsized() {
+ if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op
+ && layout.is_unsized()
+ {
span_bug!(
self.frame().current_span(),
"{null_op:?} MIR operator called for unsized type {ty}",
@@ -278,7 +275,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
mir::NullOp::OffsetOf(fields) => {
- layout.offset_of_subfield(self, fields.iter().map(|f| f.index())).bytes()
+ layout.offset_of_subfield(self, fields.iter()).bytes()
}
};
self.write_scalar(Scalar::from_target_usize(val, self), &dest)?;
@@ -300,7 +297,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Discriminant(place) => {
let op = self.eval_place_to_op(place, None)?;
let variant = self.read_discriminant(&op)?;
- let discr = self.discriminant_for_variant(op.layout, variant)?;
+ let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
self.write_immediate(*discr, &dest)?;
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 578dd6622..b54c66814 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -1,6 +1,5 @@
use std::borrow::Cow;
-use either::Either;
use rustc_ast::ast::InlineAsmOptions;
use rustc_middle::{
mir,
@@ -219,7 +218,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Unreachable => throw_ub!(Unreachable),
// These should never occur for MIR we actually run.
- FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | GeneratorDrop => span_bug!(
+ FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
terminator.source_info.span,
"{:#?} should have been eliminated by MIR pass",
terminator.kind
@@ -729,13 +728,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
callee_ty: callee_fn_abi.ret.layout.ty
});
}
- // Ensure the return place is aligned and dereferenceable, and protect it for
- // in-place return value passing.
- if let Either::Left(mplace) = destination.as_mplace_or_local() {
- self.check_mplace(&mplace)?;
- } else {
- // Nothing to do for locals, they are always properly allocated and aligned.
- }
+ // Protect return place for in-place return value passing.
M::protect_in_place_function_argument(self, destination)?;
// Don't forget to mark "initially live" locals as live.
@@ -890,11 +883,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
+ // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
- if attrs
- .target_features
- .iter()
- .any(|feature| !self.tcx.sess.target_features.contains(feature))
+ if !self.tcx.sess.target.is_like_wasm
+ && attrs
+ .target_features
+ .iter()
+ .any(|feature| !self.tcx.sess.target_features.contains(feature))
{
throw_ub_custom!(
fluent::const_eval_unavailable_target_features_for_fn,
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index eb639ded7..416443f5f 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -34,7 +34,7 @@ where
match *ty.kind() {
ty::Param(_) => ControlFlow::Break(FoundParam),
ty::Closure(def_id, args)
- | ty::Generator(def_id, args, ..)
+ | ty::Coroutine(def_id, args, ..)
| ty::FnDef(def_id, args) => {
let instance = ty::InstanceDef::Item(def_id);
let unused_params = self.tcx.unused_generic_params(instance);
@@ -42,10 +42,10 @@ where
let index = index
.try_into()
.expect("more generic parameters than can fit into a `u32`");
- // Only recurse when generic parameters in fns, closures and generators
+ // Only recurse when generic parameters in fns, closures and coroutines
// are used and have to be instantiated.
//
- // Just in case there are closures or generators within this subst,
+ // Just in case there are closures or coroutines within this subst,
// recurse.
if unused_params.is_used(index) && subst.has_param() {
return subst.visit_with(self);
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 3e023a896..d21fef58f 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -13,7 +13,7 @@ use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_middle::mir::interpret::{
- ExpectedKind, InterpError, InvalidMetaKind, PointerKind, ValidationErrorInfo,
+ ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, ValidationErrorInfo,
ValidationErrorKind, ValidationErrorKind::*,
};
use rustc_middle::ty;
@@ -112,13 +112,13 @@ macro_rules! try_validation {
pub enum PathElem {
Field(Symbol),
Variant(Symbol),
- GeneratorState(VariantIdx),
+ CoroutineState(VariantIdx),
CapturedVar(Symbol),
ArrayElem(usize),
TupleElem(usize),
Deref,
EnumTag,
- GeneratorTag,
+ CoroutineTag,
DynDowncast,
}
@@ -171,8 +171,8 @@ fn write_path(out: &mut String, path: &[PathElem]) {
Field(name) => write!(out, ".{name}"),
EnumTag => write!(out, ".<enum-tag>"),
Variant(name) => write!(out, ".<enum-variant({name})>"),
- GeneratorTag => write!(out, ".<generator-tag>"),
- GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
+ CoroutineTag => write!(out, ".<coroutine-tag>"),
+ CoroutineState(idx) => write!(out, ".<coroutine-state({})>", idx.index()),
CapturedVar(name) => write!(out, ".<captured-var({name})>"),
TupleElem(idx) => write!(out, ".{idx}"),
ArrayElem(idx) => write!(out, "[{idx}]"),
@@ -206,7 +206,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if tag_field == field {
return match layout.ty.kind() {
ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
- ty::Generator(..) => PathElem::GeneratorTag,
+ ty::Coroutine(..) => PathElem::CoroutineTag,
_ => bug!("non-variant type {:?}", layout.ty),
};
}
@@ -216,8 +216,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Now we know we are projecting to a field, so figure out which one.
match layout.ty.kind() {
- // generators and closures.
- ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ // coroutines and closures.
+ ty::Closure(def_id, _) | ty::Coroutine(def_id, _, _) => {
let mut name = None;
// FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
// https://github.com/rust-lang/project-rfc-2229/issues/46
@@ -225,7 +225,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let captures = self.ecx.tcx.closure_captures(local_def_id);
if let Some(captured_place) = captures.get(field) {
// Sometimes the index is beyond the number of upvars (seen
- // for a generator).
+ // for a coroutine).
let var_hir_id = captured_place.get_root_variable();
let node = self.ecx.tcx.hir().get(var_hir_id);
if let hir::Node::Pat(pat) = node {
@@ -355,7 +355,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
value: &OpTy<'tcx, M::Provenance>,
ptr_kind: PointerKind,
) -> InterpResult<'tcx> {
- // Not using `deref_pointer` since we do the dereferenceable check ourselves below.
+ // Not using `deref_pointer` since we want to use our `read_immediate` wrapper.
let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
// Handle wide pointers.
// Check metadata early, for better diagnostics
@@ -378,18 +378,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
.unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
- self.ecx.check_ptr_access_align(
+ self.ecx.check_ptr_access(
place.ptr(),
size,
- align,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
- Ub(AlignmentCheckFailed { required, has }) => UnalignedPtr {
- ptr_kind,
- required_bytes: required.bytes(),
- found_bytes: has.bytes()
- },
Ub(DanglingIntPointer(0, _)) => NullPtr { ptr_kind },
Ub(DanglingIntPointer(i, _)) => DanglingPtrNoProvenance {
ptr_kind,
@@ -405,6 +399,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
ptr_kind,
},
);
+ try_validation!(
+ self.ecx.check_ptr_align(
+ place.ptr(),
+ align,
+ ),
+ self.path,
+ Ub(AlignmentCheckFailed(Misalignment { required, has }, _msg)) => UnalignedPtr {
+ ptr_kind,
+ required_bytes: required.bytes(),
+ found_bytes: has.bytes()
+ },
+ );
// Do not allow pointers to uninhabited types.
if place.layout.abi.is_uninhabited() {
let ty = place.layout.ty;
@@ -574,7 +580,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Str
| ty::Dynamic(..)
| ty::Closure(..)
- | ty::Generator(..) => Ok(false),
+ | ty::Coroutine(..) => Ok(false),
// Some types only occur during typechecking, they have no layout.
// We should not see them here and we could not check them anyway.
ty::Error(_)
@@ -583,7 +589,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Bound(..)
| ty::Param(..)
| ty::Alias(..)
- | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
+ | ty::CoroutineWitness(..) => bug!("Encountered invalid type {:?}", ty),
}
}
@@ -645,7 +651,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
#[inline(always)]
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
- &self.ecx
+ self.ecx
}
fn read_discriminant(
@@ -686,8 +692,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.kind() {
ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
- // Generators also have variants
- ty::Generator(..) => PathElem::GeneratorState(variant_id),
+ // Coroutines also have variants
+ ty::Coroutine(..) => PathElem::CoroutineState(variant_id),
_ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
};
self.with_elem(name, move |this| this.visit_value(new_op))
@@ -781,14 +787,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// Optimization: we just check the entire range at once.
// NOTE: Keep this in sync with the handling of integer and float
// types above, in `visit_primitive`.
- // In run-time mode, we accept pointers in here. This is actually more
- // permissive than a per-element check would be, e.g., we accept
- // a &[u8] that contains a pointer even though bytewise checking would
- // reject it. However, that's good: We don't inherently want
- // to reject those pointers, we just do not have the machinery to
- // talk about parts of a pointer.
- // We also accept uninit, for consistency with the slow path.
- let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size, mplace.align)?.expect("we already excluded size 0");
+ // No need for an alignment check here, this is not an actual memory access.
+ let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size)?.expect("we already excluded size 0");
match alloc.get_bytes_strip_provenance() {
// In the happy case, we needn't check anything else.
@@ -929,7 +929,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// - no pointers to statics.
/// - no `UnsafeCell` or non-ZST `&mut`.
#[inline(always)]
- pub fn const_validate_operand(
+ pub(crate) fn const_validate_operand(
&self,
op: &OpTy<'tcx, M::Provenance>,
path: Vec<PathElem>,