summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/interpret
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
commit5363f350887b1e5b5dd21a86f88c8af9d7fea6da (patch)
tree35ca005eb6e0e9a1ba3bb5dbc033209ad445dc17 /compiler/rustc_const_eval/src/interpret
parentAdding debian version 1.66.0+dfsg1-1. (diff)
downloadrustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.tar.xz
rustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs108
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs26
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs198
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs20
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs48
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs109
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs56
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs12
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs54
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs24
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs2
14 files changed, 270 insertions, 397 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index a9063ad31..0b2809f1d 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -2,10 +2,12 @@ use std::cell::Cell;
use std::fmt;
use std::mem;
+use either::{Either, Left, Right};
+
use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
use rustc_index::vec::IndexVec;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::{InterpError, InvalidProgramInfo};
+use rustc_middle::mir::interpret::{ErrorHandled, InterpError, InvalidProgramInfo};
use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
@@ -15,7 +17,7 @@ use rustc_middle::ty::{
};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_session::Limit;
-use rustc_span::{Pos, Span};
+use rustc_span::Span;
use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout};
use super::{
@@ -23,7 +25,7 @@ use super::{
MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
Scalar, StackPopJump,
};
-use crate::transform::validate::equal_up_to_regions;
+use crate::util;
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance.
@@ -121,13 +123,12 @@ pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
- /// If this is `Err`, we are not currently executing any particular statement in
+ /// If this is `Right`, we are not currently executing any particular statement in
/// this frame (can happen e.g. during frame initialization, and during unwinding on
/// frames without cleanup code).
- /// We basically abuse `Result` as `Either`.
///
/// Needs to be public because ConstProp does unspeakable things to it.
- pub loc: Result<mir::Location, Span>,
+ pub loc: Either<mir::Location, Span>,
}
/// What we store about a frame in an interpreter backtrace.
@@ -227,25 +228,24 @@ impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
/// Get the current location within the Frame.
///
- /// If this is `Err`, we are not currently executing any particular statement in
+ /// If this is `Left`, we are not currently executing any particular statement in
/// this frame (can happen e.g. during frame initialization, and during unwinding on
/// frames without cleanup code).
- /// We basically abuse `Result` as `Either`.
///
/// Used by priroda.
- pub fn current_loc(&self) -> Result<mir::Location, Span> {
+ pub fn current_loc(&self) -> Either<mir::Location, Span> {
self.loc
}
/// Return the `SourceInfo` of the current instruction.
pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
- self.loc.ok().map(|loc| self.body.source_info(loc))
+ self.loc.left().map(|loc| self.body.source_info(loc))
}
pub fn current_span(&self) -> Span {
match self.loc {
- Ok(loc) => self.body.source_info(loc).span,
- Err(span) => span,
+ Left(loc) => self.body.source_info(loc).span,
+ Right(span) => span,
}
}
}
@@ -256,25 +256,13 @@ impl<'tcx> fmt::Display for FrameInfo<'tcx> {
if tcx.def_key(self.instance.def_id()).disambiguated_data.data
== DefPathData::ClosureExpr
{
- write!(f, "inside closure")?;
+ write!(f, "inside closure")
} else {
// Note: this triggers a `good_path_bug` state, which means that if we ever get here
// we must emit a diagnostic. We should never display a `FrameInfo` unless we
// actually want to emit a warning or error to the user.
- write!(f, "inside `{}`", self.instance)?;
- }
- if !self.span.is_dummy() {
- let sm = tcx.sess.source_map();
- let lo = sm.lookup_char_pos(self.span.lo());
- write!(
- f,
- " at {}:{}:{}",
- sm.filename_for_diagnostics(&lo.file.name),
- lo.line,
- lo.col.to_usize() + 1
- )?;
+ write!(f, "inside `{}`", self.instance)
}
- Ok(())
})
}
}
@@ -354,8 +342,8 @@ pub(super) fn mir_assign_valid_types<'tcx>(
// Type-changing assignments can happen when subtyping is used. While
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
- // differences. So we compare ignoring lifetimes.
- if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
+ // differences.
+ if util::is_subtype(tcx, param_env, src.ty, dest.ty) {
// Make sure the layout is equal, too -- just to be safe. Miri really
// needs layout equality. For performance reason we skip this check when
// the types are equal. Equal types *can* have different layouts when
@@ -572,7 +560,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
metadata: &MemPlaceMeta<M::Provenance>,
layout: &TyAndLayout<'tcx>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
- if !layout.is_unsized() {
+ if layout.is_sized() {
return Ok(Some((layout.size, layout.align.abi)));
}
match layout.ty.kind() {
@@ -598,7 +586,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the last field). Can't have foreign types here, how would we
// adjust alignment and size for them?
let field = layout.field(self, layout.fields.count() - 1);
- let Some((unsized_size, unsized_align)) = self.size_and_align_of(metadata, &field)? else {
+ let Some((unsized_size, mut unsized_align)) = self.size_and_align_of(metadata, &field)? else {
// A field with an extern type. We don't know the actual dynamic size
// or the alignment.
return Ok(None);
@@ -614,6 +602,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Return the sum of sizes and max of aligns.
let size = sized_size + unsized_size; // `Size` addition
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = layout.ty.kind() {
+ if def.repr().packed() {
+ unsized_align = sized_align;
+ }
+ }
+
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = sized_align.max(unsized_align);
@@ -669,10 +664,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
+ // Clobber previous return place contents, nobody is supposed to be able to see them any more
+ // This also checks dereferenceable, but not align. We rely on all constructed places being
+ // sufficiently aligned (in particular we rely on `deref_operand` checking alignment).
+ self.write_uninit(return_place)?;
// first push a stack frame so we have access to the local substs
let pre_frame = Frame {
body,
- loc: Err(body.span), // Span used for errors caused during preamble.
+ loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block,
return_place: return_place.clone(),
// empty local array, we fill it in below, after we are inside the stack frame and
@@ -689,12 +688,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
for ct in &body.required_consts {
let span = ct.span;
let ct = self.subst_from_current_frame_and_normalize_erasing_regions(ct.literal)?;
- self.const_to_op(&ct, None).map_err(|err| {
- // If there was an error, set the span of the current frame to this constant.
- // Avoiding doing this when evaluation succeeds.
- self.frame_mut().loc = Err(span);
- err
- })?;
+ self.eval_mir_constant(&ct, Some(span), None)?;
}
// Most locals are initially dead.
@@ -711,7 +705,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// done
self.frame_mut().locals = locals;
M::after_stack_push(self)?;
- self.frame_mut().loc = Ok(mir::Location::START);
+ self.frame_mut().loc = Left(mir::Location::START);
let span = info_span!("frame", "{}", instance);
self.frame_mut().tracing_span.enter(span);
@@ -722,7 +716,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Jump to the given block.
#[inline]
pub fn go_to_block(&mut self, target: mir::BasicBlock) {
- self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
+ self.frame_mut().loc = Left(mir::Location { block: target, statement_index: 0 });
}
/// *Return* to the given `target` basic block.
@@ -748,8 +742,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// unwinding, and doing so is UB.
pub fn unwind_to_block(&mut self, target: StackPopUnwind) -> InterpResult<'tcx> {
self.frame_mut().loc = match target {
- StackPopUnwind::Cleanup(block) => Ok(mir::Location { block, statement_index: 0 }),
- StackPopUnwind::Skip => Err(self.frame_mut().body.span),
+ StackPopUnwind::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
+ StackPopUnwind::Skip => Right(self.frame_mut().body.span),
StackPopUnwind::NotAllowed => {
throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
}
@@ -781,8 +775,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(
unwinding,
match self.frame().loc {
- Ok(loc) => self.body().basic_blocks[loc.block].is_cleanup,
- Err(_) => true,
+ Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
+ Right(_) => true,
}
);
if unwinding && self.frame_idx() == 0 {
@@ -905,9 +899,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
- pub fn eval_to_allocation(
+ /// Call a query that can return `ErrorHandled`. If `span` is `Some`, point to that span when an error occurs.
+ pub fn ctfe_query<T>(
+ &self,
+ span: Option<Span>,
+ query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
+ ) -> InterpResult<'tcx, T> {
+ // Use a precise span for better cycle errors.
+ query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
+ match err {
+ ErrorHandled::Reported(err) => {
+ if let Some(span) = span {
+ // To make it easier to figure out where this error comes from, also add a note at the current location.
+ self.tcx.sess.span_note_without_error(span, "erroneous constant used");
+ }
+ err_inval!(AlreadyReported(err))
+ }
+ ErrorHandled::TooGeneric => err_inval!(TooGeneric),
+ }
+ .into()
+ })
+ }
+
+ pub fn eval_global(
&self,
gid: GlobalId<'tcx>,
+ span: Option<Span>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
// and thus don't care about the parameter environment. While we could just use
@@ -920,8 +937,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.param_env
};
let param_env = param_env.with_const();
- // Use a precise span for better cycle errors.
- let val = self.tcx.at(self.cur_span()).eval_to_allocation_raw(param_env.and(gid))?;
+ let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 6809a42dc..458cc6180 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -134,7 +134,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
alloc.mutability = Mutability::Not;
};
// link the alloc id to the actual allocation
- leftover_allocations.extend(alloc.provenance().iter().map(|&(_, alloc_id)| alloc_id));
+ leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id));
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
None
@@ -439,7 +439,7 @@ pub fn intern_const_alloc_recursive<
}
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
- for &(_, alloc_id) in alloc.inner().provenance().iter() {
+ for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() {
if leftover_allocations.insert(alloc_id) {
todo.push(alloc_id);
}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 8637d6a77..7940efcd2 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -7,7 +7,9 @@ use std::convert::TryFrom;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
- interpret::{ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar},
+ interpret::{
+ Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
+ },
BinOp, NonDivergingIntrinsic,
};
use rustc_middle::ty;
@@ -23,7 +25,6 @@ use super::{
};
mod caller_location;
-mod type_name;
fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
let size = match kind {
@@ -42,6 +43,13 @@ fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<
Scalar::from_uint(bits_out, size)
}
+/// Directly returns an `Allocation` containing an absolute path representation of the given type.
+pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
+ let path = crate::util::type_name(tcx, ty);
+ let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
+ tcx.intern_const_alloc(alloc)
+}
+
/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
pub(crate) fn eval_nullary_intrinsic<'tcx>(
@@ -55,7 +63,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
Ok(match name {
sym::type_name => {
ensure_monomorphic_enough(tcx, tp_ty)?;
- let alloc = type_name::alloc_type_name(tcx, tp_ty);
+ let alloc = alloc_type_name(tcx, tp_ty);
ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
}
sym::needs_drop => {
@@ -169,8 +177,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::type_name => self.tcx.mk_static_str(),
_ => bug!(),
};
- let val =
- self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
+ let val = self.ctfe_query(None, |tcx| {
+ tcx.const_eval_global_id(self.param_env, gid, Some(tcx.span))
+ })?;
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
self.copy_op(&val, dest, /*allow_transmute*/ false)?;
}
@@ -234,6 +243,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let discr_val = self.read_discriminant(&place.into())?.0;
self.write_scalar(discr_val, dest)?;
}
+ sym::exact_div => {
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
+ self.exact_div(&l, &r, dest)?;
+ }
sym::unchecked_shl
| sym::unchecked_shr
| sym::unchecked_add
@@ -705,7 +719,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
let get_bytes = |this: &InterpCx<'mir, 'tcx, M>,
op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index 0e3867557..7d94a22c4 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -19,8 +19,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
// Assert that the frame we look at is actually executing code currently
- // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
- let loc = frame.loc.unwrap();
+ // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
+ let loc = frame.loc.left().unwrap();
// This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
// (such as `box`). Use the normal span by default.
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
deleted file mode 100644
index ffdb8de5b..000000000
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
+++ /dev/null
@@ -1,198 +0,0 @@
-use rustc_data_structures::intern::Interned;
-use rustc_hir::def_id::CrateNum;
-use rustc_hir::definitions::DisambiguatedDefPathData;
-use rustc_middle::mir::interpret::{Allocation, ConstAllocation};
-use rustc_middle::ty::{
- self,
- print::{with_no_verbose_constants, PrettyPrinter, Print, Printer},
- subst::{GenericArg, GenericArgKind},
- Ty, TyCtxt,
-};
-use std::fmt::Write;
-
-struct AbsolutePathPrinter<'tcx> {
- tcx: TyCtxt<'tcx>,
- path: String,
-}
-
-impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
- type Error = std::fmt::Error;
-
- type Path = Self;
- type Region = Self;
- type Type = Self;
- type DynExistential = Self;
- type Const = Self;
-
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
- fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
- Ok(self)
- }
-
- fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
- match *ty.kind() {
- // Types without identity.
- ty::Bool
- | ty::Char
- | ty::Int(_)
- | ty::Uint(_)
- | ty::Float(_)
- | ty::Str
- | ty::Array(_, _)
- | ty::Slice(_)
- | ty::RawPtr(_)
- | ty::Ref(_, _, _)
- | ty::FnPtr(_)
- | ty::Never
- | ty::Tuple(_)
- | ty::Dynamic(_, _, _) => self.pretty_print_type(ty),
-
- // Placeholders (all printed as `_` to uniformize them).
- ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
- write!(self, "_")?;
- Ok(self)
- }
-
- // Types with identity (print the module path).
- ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
- | ty::FnDef(def_id, substs)
- | ty::Opaque(def_id, substs)
- | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
- | ty::Closure(def_id, substs)
- | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
- ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
-
- ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
- }
- }
-
- fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
- self.pretty_print_const(ct, false)
- }
-
- fn print_dyn_existential(
- mut self,
- predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
- ) -> Result<Self::DynExistential, Self::Error> {
- let mut first = true;
- for p in predicates {
- if !first {
- write!(self, "+")?;
- }
- first = false;
- self = p.print(self)?;
- }
- Ok(self)
- }
-
- fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
- self.path.push_str(self.tcx.crate_name(cnum).as_str());
- Ok(self)
- }
-
- fn path_qualified(
- self,
- self_ty: Ty<'tcx>,
- trait_ref: Option<ty::TraitRef<'tcx>>,
- ) -> Result<Self::Path, Self::Error> {
- self.pretty_path_qualified(self_ty, trait_ref)
- }
-
- fn path_append_impl(
- self,
- print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
- _disambiguated_data: &DisambiguatedDefPathData,
- self_ty: Ty<'tcx>,
- trait_ref: Option<ty::TraitRef<'tcx>>,
- ) -> Result<Self::Path, Self::Error> {
- self.pretty_path_append_impl(
- |mut cx| {
- cx = print_prefix(cx)?;
-
- cx.path.push_str("::");
-
- Ok(cx)
- },
- self_ty,
- trait_ref,
- )
- }
-
- fn path_append(
- mut self,
- print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
- disambiguated_data: &DisambiguatedDefPathData,
- ) -> Result<Self::Path, Self::Error> {
- self = print_prefix(self)?;
-
- write!(self.path, "::{}", disambiguated_data.data).unwrap();
-
- Ok(self)
- }
-
- fn path_generic_args(
- mut self,
- print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
- args: &[GenericArg<'tcx>],
- ) -> Result<Self::Path, Self::Error> {
- self = print_prefix(self)?;
- let args =
- args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
- if args.clone().next().is_some() {
- self.generic_delimiters(|cx| cx.comma_sep(args))
- } else {
- Ok(self)
- }
- }
-}
-
-impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
- fn should_print_region(&self, _region: ty::Region<'_>) -> bool {
- false
- }
- fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
- where
- T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
- {
- if let Some(first) = elems.next() {
- self = first.print(self)?;
- for elem in elems {
- self.path.push_str(", ");
- self = elem.print(self)?;
- }
- }
- Ok(self)
- }
-
- fn generic_delimiters(
- mut self,
- f: impl FnOnce(Self) -> Result<Self, Self::Error>,
- ) -> Result<Self, Self::Error> {
- write!(self, "<")?;
-
- self = f(self)?;
-
- write!(self, ">")?;
-
- Ok(self)
- }
-}
-
-impl Write for AbsolutePathPrinter<'_> {
- fn write_str(&mut self, s: &str) -> std::fmt::Result {
- self.path.push_str(s);
- Ok(())
- }
-}
-
-/// Directly returns an `Allocation` containing an absolute path representation of the given type.
-pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
- let path = with_no_verbose_constants!(
- AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path
- );
- let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
- tcx.intern_const_alloc(alloc)
-}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 351152eba..0604d5ee6 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -373,9 +373,21 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(())
}
- /// Executes a retagging operation.
+ /// Executes a retagging operation for a single pointer.
+ /// Returns the possibly adjusted pointer.
#[inline]
- fn retag(
+ fn retag_ptr_value(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _kind: mir::RetagKind,
+ val: &ImmTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
+ Ok(val.clone())
+ }
+
+ /// Executes a retagging operation on a compound value.
+ /// Replaces all pointers stored in the given place.
+ #[inline]
+ fn retag_place_contents(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_kind: mir::RetagKind,
_place: &PlaceTy<'tcx, Self::Provenance>,
@@ -417,8 +429,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
}
}
-// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
-// (CTFE and ConstProp) use the same instance. Here, we share that code.
+/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+/// (CTFE and ConstProp) use the same instance. Here, we share that code.
pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
type Provenance = AllocId;
type ProvenanceExtra = ();
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index e5e015c1e..528c1cb06 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -112,7 +112,7 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
#[derive(Copy, Clone)]
-pub struct AllocRef<'a, 'tcx, Prov, Extra> {
+pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra> {
alloc: &'a Allocation<Prov, Extra>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
@@ -120,7 +120,7 @@ pub struct AllocRef<'a, 'tcx, Prov, Extra> {
}
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
-pub struct AllocRefMut<'a, 'tcx, Prov, Extra> {
+pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra> {
alloc: &'a mut Allocation<Prov, Extra>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
@@ -302,8 +302,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.into());
};
- debug!(?alloc);
-
if alloc.mutability == Mutability::Not {
throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
}
@@ -503,8 +501,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_unsup!(ReadExternStatic(def_id));
}
- // Use a precise span for better cycle errors.
- (self.tcx.at(self.cur_span()).eval_static_initializer(def_id)?, Some(def_id))
+ // We don't give a span -- statics don't need that, they cannot be generic or associated.
+ let val = self.ctfe_query(None, |tcx| tcx.eval_static_initializer(def_id))?;
+ (val, Some(def_id))
}
};
M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
@@ -683,7 +682,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Use size and align of the type.
let ty = self.tcx.type_of(def_id);
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
(layout.size, layout.align.abi, AllocKind::LiveData)
}
Some(GlobalAlloc::Memory(alloc)) => {
@@ -797,7 +796,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// This is a new allocation, add the allocation it points to `todo`.
if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
todo.extend(
- alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()),
+ alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
);
}
}
@@ -833,7 +832,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Prov, Extra>,
) -> std::fmt::Result {
- for alloc_id in alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()) {
+ for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
+ {
allocs_to_print.push_back(alloc_id);
}
write!(fmt, "{}", display_allocation(tcx, alloc))
@@ -962,7 +962,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
pub(crate) fn has_provenance(&self) -> bool {
- self.alloc.range_has_provenance(&self.tcx, self.range)
+ !self.alloc.provenance().range_empty(self.range, &self.tcx)
}
}
@@ -1060,7 +1060,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Source alloc preparations and access hooks.
let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
- // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+ // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
return Ok(());
};
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
@@ -1079,22 +1079,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(());
};
- // Checks provenance edges on the src, which needs to happen before
- // `prepare_provenance_copy`.
- if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.start, Size::ZERO)) {
- throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.start)));
- }
- if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.end(), Size::ZERO)) {
- throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.end())));
- }
+ // Prepare getting source provenance.
let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
// first copy the provenance to a temporary buffer, because
// `get_bytes_mut` will clear the provenance, which is correct,
// since we don't want to keep any provenance at the target.
- let provenance =
- src_alloc.prepare_provenance_copy(self, src_range, dest_offset, num_copies);
+ // This will also error if copying partial provenance is not supported.
+ let provenance = src_alloc
+ .provenance()
+ .prepare_copy(src_range, dest_offset, num_copies, self)
+ .map_err(|e| e.to_interp_error(dest_alloc_id))?;
// Prepare a copy of the initialization mask.
- let compressed = src_alloc.compress_uninit_range(src_range);
+ let init = src_alloc.init_mask().prepare_copy(src_range);
// Destination alloc preparations and access hooks.
let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
@@ -1111,7 +1107,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_err(|e| e.to_interp_error(dest_alloc_id))?
.as_mut_ptr();
- if compressed.no_bytes_init() {
+ if init.no_bytes_init() {
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
// is marked as uninitialized but we otherwise omit changing the byte representation which may
// be arbitrary for uninitialized bytes.
@@ -1160,13 +1156,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// now fill in all the "init" data
- dest_alloc.mark_compressed_init_range(
- &compressed,
+ dest_alloc.init_mask_apply_copy(
+ init,
alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
num_copies,
);
// copy the provenance to the destination
- dest_alloc.mark_provenance_range(provenance);
+ dest_alloc.provenance_apply_copy(provenance);
Ok(())
}
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 0c212cf59..221e359d2 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,11 +1,14 @@
//! Functions concerning immediate values and operands, and reading from operands.
//! All high-level functions to read from memory work on operands as sources.
+use either::{Either, Left, Right};
+
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
-use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
+use rustc_middle::ty::{ConstInt, Ty, ValTree};
use rustc_middle::{mir, ty};
+use rustc_span::Span;
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
@@ -260,9 +263,9 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
- match self.try_as_mplace() {
- Ok(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
- Err(imm) => {
+ match self.as_mplace_or_imm() {
+ Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+ Right(imm) => {
assert!(
matches!(*imm, Immediate::Uninit),
"Scalar/ScalarPair cannot be offset into"
@@ -280,7 +283,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
}
}
@@ -352,8 +355,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Try returning an immediate for the operand. If the layout does not permit loading this as an
/// immediate, return where in memory we can find the data.
- /// Note that for a given layout, this operation will either always fail or always
- /// succeed! Whether it succeeds depends on whether the layout can be represented
+ /// Note that for a given layout, this operation will either always return Left or Right!
+ /// succeed! Whether it returns Left depends on whether the layout can be represented
/// in an `Immediate`, not on which data is stored there currently.
///
/// This is an internal function that should not usually be used; call `read_immediate` instead.
@@ -361,22 +364,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn read_immediate_raw(
&self,
src: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
- Ok(match src.try_as_mplace() {
- Ok(ref mplace) => {
+ ) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
+ Ok(match src.as_mplace_or_imm() {
+ Left(ref mplace) => {
if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
- Ok(val)
+ Right(val)
} else {
- Err(*mplace)
+ Left(*mplace)
}
}
- Err(val) => Ok(val),
+ Right(val) => Right(val),
})
}
/// Read an immediate from a place, asserting that that is possible with the given layout.
///
- /// If this suceeds, the `ImmTy` is never `Uninit`.
+ /// If this succeeds, the `ImmTy` is never `Uninit`.
#[inline(always)]
pub fn read_immediate(
&self,
@@ -389,7 +392,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) {
span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
}
- let imm = self.read_immediate_raw(op)?.unwrap();
+ let imm = self.read_immediate_raw(op)?.right().unwrap();
if matches!(*imm, Immediate::Uninit) {
throw_ub!(InvalidUninitBytes(None));
}
@@ -431,9 +434,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Basically we just transmute this place into an array following simd_size_and_type.
// This only works in memory, but repr(simd) types should never be immediates anyway.
assert!(op.layout.ty.is_simd());
- match op.try_as_mplace() {
- Ok(mplace) => self.mplace_to_simd(&mplace),
- Err(imm) => match *imm {
+ match op.as_mplace_or_imm() {
+ Left(mplace) => self.mplace_to_simd(&mplace),
+ Right(imm) => match *imm {
Immediate::Uninit => {
throw_ub!(InvalidUninitBytes(None))
}
@@ -527,14 +530,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
Constant(ref constant) => {
- let val =
+ let c =
self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
// This can still fail:
// * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
// checked yet.
// * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
- self.const_to_op(&val, layout)?
+ self.eval_mir_constant(&c, Some(constant.span), layout)?
}
};
trace!("{:?}: {:?}", mir_op, *op);
@@ -549,9 +552,37 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ops.iter().map(|op| self.eval_operand(op, None)).collect()
}
- pub fn const_to_op(
+ fn eval_ty_constant(
+ &self,
+ val: ty::Const<'tcx>,
+ span: Option<Span>,
+ ) -> InterpResult<'tcx, ValTree<'tcx>> {
+ Ok(match val.kind() {
+ ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
+ throw_inval!(TooGeneric)
+ }
+ // FIXME(generic_const_exprs): `ConstKind::Expr` should be able to be evaluated
+ ty::ConstKind::Expr(_) => throw_inval!(TooGeneric),
+ ty::ConstKind::Error(reported) => {
+ throw_inval!(AlreadyReported(reported))
+ }
+ ty::ConstKind::Unevaluated(uv) => {
+ let instance = self.resolve(uv.def, uv.substs)?;
+ let cid = GlobalId { instance, promoted: None };
+ self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
+ .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
+ }
+ ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
+ span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
+ }
+ ty::ConstKind::Value(valtree) => valtree,
+ })
+ }
+
+ pub fn eval_mir_constant(
&self,
val: &mir::ConstantKind<'tcx>,
+ span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// FIXME(const_prop): normalization needed b/c const prop lint in
@@ -563,44 +594,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let val = self.tcx.normalize_erasing_regions(self.param_env, *val);
match val {
mir::ConstantKind::Ty(ct) => {
- match ct.kind() {
- ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
- throw_inval!(TooGeneric)
- }
- ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
- throw_inval!(AlreadyReported(reported))
- }
- ty::ConstKind::Unevaluated(uv) => {
- // NOTE: We evaluate to a `ValTree` here as a check to ensure
- // we're working with valid constants, even though we never need it.
- let instance = self.resolve(uv.def, uv.substs)?;
- let cid = GlobalId { instance, promoted: None };
- let _valtree = self
- .tcx
- .eval_to_valtree(self.param_env.and(cid))?
- .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"));
-
- Ok(self.eval_to_allocation(cid)?.into())
- }
- ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
- span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {ct:?}")
- }
- ty::ConstKind::Value(valtree) => {
- let ty = ct.ty();
- let const_val = self.tcx.valtree_to_const_val((ty, valtree));
- self.const_val_to_op(const_val, ty, layout)
- }
- }
+ let ty = ct.ty();
+ let valtree = self.eval_ty_constant(ct, span)?;
+ let const_val = self.tcx.valtree_to_const_val((ty, valtree));
+ self.const_val_to_op(const_val, ty, layout)
}
mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
mir::ConstantKind::Unevaluated(uv, _) => {
let instance = self.resolve(uv.def, uv.substs)?;
- Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
+ Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
}
}
}
- pub(crate) fn const_val_to_op(
+ pub(super) fn const_val_to_op(
&self,
val_val: ConstValue<'tcx>,
ty: Ty<'tcx>,
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index b0625b5f4..c47cfe8bb 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -2,6 +2,8 @@
//! into a place.
//! All high-level functions to write to memory work on places as destinations.
+use either::{Either, Left, Right};
+
use rustc_ast::Mutability;
use rustc_middle::mir;
use rustc_middle::ty;
@@ -201,7 +203,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
}
@@ -252,36 +254,36 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
// These are defined here because they produce a place.
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
- pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
match **self {
Operand::Indirect(mplace) => {
- Ok(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
+ Left(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
}
- Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
+ Operand::Immediate(imm) => Right(ImmTy::from_immediate(imm, self.layout)),
}
}
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
- self.try_as_mplace().unwrap()
+ self.as_mplace_or_imm().left().unwrap()
}
}
impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
/// A place is either an mplace or some local.
#[inline]
- pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+ pub fn as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
match **self {
- Place::Ptr(mplace) => Ok(MPlaceTy { mplace, layout: self.layout, align: self.align }),
- Place::Local { frame, local } => Err((frame, local)),
+ Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Local { frame, local } => Right((frame, local)),
}
}
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
- self.try_as_mplace().unwrap()
+ self.as_mplace_or_local().left().unwrap()
}
}
@@ -316,8 +318,7 @@ where
Ok(MPlaceTy { mplace, layout, align })
}
- /// Take an operand, representing a pointer, and dereference it to a place -- that
- /// will always be a MemPlace. Lives in `place.rs` because it creates a place.
+ /// Take an operand, representing a pointer, and dereference it to a place.
#[instrument(skip(self), level = "debug")]
pub fn deref_operand(
&self,
@@ -331,7 +332,7 @@ where
}
let mplace = self.ref_to_mplace(&val)?;
- self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
+ self.check_mplace(mplace)?;
Ok(mplace)
}
@@ -340,7 +341,7 @@ where
&self,
place: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
- assert!(!place.layout.is_unsized());
+ assert!(place.layout.is_sized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
self.get_ptr_alloc(place.ptr, size, place.align)
@@ -351,24 +352,25 @@ where
&mut self,
place: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
- assert!(!place.layout.is_unsized());
+ assert!(place.layout.is_sized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
self.get_ptr_alloc_mut(place.ptr, size, place.align)
}
/// Check if this mplace is dereferenceable and sufficiently aligned.
- fn check_mplace_access(
- &self,
- mplace: MPlaceTy<'tcx, M::Provenance>,
- msg: CheckInAllocMsg,
- ) -> InterpResult<'tcx> {
+ pub fn check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
let (size, align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
let align = M::enforce_alignment(self).then_some(align);
- self.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
+ self.check_ptr_access_align(
+ mplace.ptr,
+ size,
+ align.unwrap_or(Align::ONE),
+ CheckInAllocMsg::DerefTest,
+ )?;
Ok(())
}
@@ -485,7 +487,7 @@ where
src: Immediate<M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
+ assert!(dest.layout.is_sized(), "Cannot write unsized data");
trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
@@ -569,9 +571,9 @@ where
}
pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
- let mplace = match dest.try_as_mplace() {
- Ok(mplace) => mplace,
- Err((frame, local)) => {
+ let mplace = match dest.as_mplace_or_local() {
+ Left(mplace) => mplace,
+ Right((frame, local)) => {
match M::access_local_mut(self, frame, local)? {
Operand::Immediate(local) => {
*local = Immediate::Uninit;
@@ -639,7 +641,7 @@ where
// Let us see if the layout is simple so we take a shortcut,
// avoid force_allocation.
let src = match self.read_immediate_raw(src)? {
- Ok(src_val) => {
+ Right(src_val) => {
// FIXME(const_prop): Const-prop can possibly evaluate an
// unsized copy operation when it thinks that the type is
// actually sized, due to a trivially false where-clause
@@ -669,7 +671,7 @@ where
)
};
}
- Err(mplace) => mplace,
+ Left(mplace) => mplace,
};
// Slow path, this does not fit into an immediate. Just memcpy.
trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
@@ -746,7 +748,7 @@ where
layout: TyAndLayout<'tcx>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 6b2e2bb8a..2ffd73eef 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,6 +7,8 @@
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
+use either::{Left, Right};
+
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::LayoutOf;
@@ -84,13 +86,13 @@ where
base: &OpTy<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let base = match base.try_as_mplace() {
- Ok(ref mplace) => {
+ let base = match base.as_mplace_or_imm() {
+ Left(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?;
return Ok(field.into());
}
- Err(value) => value,
+ Right(value) => value,
};
let field_layout = base.layout.field(self, field);
@@ -204,8 +206,8 @@ where
}
}
- // Iterates over all fields of an array. Much more efficient than doing the
- // same by repeatedly calling `operand_index`.
+ /// Iterates over all fields of an array. Much more efficient than doing the
+ /// same by repeatedly calling `operand_index`.
pub fn operand_array_fields<'a>(
&self,
base: &'a OpTy<'tcx, Prov>,
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index c6e04cbfb..81b44a494 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -2,11 +2,13 @@
//!
//! The main entry point is the `step` method.
+use either::Either;
+
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::LayoutOf;
-use super::{InterpCx, Machine};
+use super::{ImmTy, InterpCx, Machine};
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
/// same type as the result.
@@ -30,11 +32,6 @@ fn binop_right_homogeneous(op: mir::BinOp) -> bool {
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
- pub fn run(&mut self) -> InterpResult<'tcx> {
- while self.step()? {}
- Ok(())
- }
-
/// Returns `true` as long as there are more things to do.
///
/// This is used by [priroda](https://github.com/oli-obk/priroda)
@@ -46,7 +43,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(false);
}
- let Ok(loc) = self.frame().loc else {
+ let Either::Left(loc) = self.frame().loc else {
// We are unwinding and this fn has no cleanup code.
// Just go on unwinding.
trace!("unwinding: skipping frame");
@@ -61,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Make sure we are not updating `statement_index` of the wrong frame.
assert_eq!(old_frames, self.frame_idx());
// Advance the program counter.
- self.frame_mut().loc.as_mut().unwrap().statement_index += 1;
+ self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
return Ok(true);
}
@@ -111,7 +108,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Stacked Borrows.
Retag(kind, place) => {
let dest = self.eval_place(**place)?;
- M::retag(self, *kind, &dest)?;
+ M::retag_place_contents(self, *kind, &dest)?;
}
Intrinsic(box ref intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?,
@@ -209,7 +206,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Repeat(ref operand, _) => {
let src = self.eval_operand(operand, None)?;
- assert!(!src.layout.is_unsized());
+ assert!(src.layout.is_sized());
let dest = self.force_allocation(&dest)?;
let length = dest.len(self)?;
@@ -250,10 +247,41 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
}
- AddressOf(_, place) | Ref(_, _, place) => {
+ Ref(_, borrow_kind, place) => {
let src = self.eval_place(place)?;
let place = self.force_allocation(&src)?;
- self.write_immediate(place.to_ref(self), &dest)?;
+ let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
+ // A fresh reference was created, make sure it gets retagged.
+ let val = M::retag_ptr_value(
+ self,
+ if borrow_kind.allows_two_phase_borrow() {
+ mir::RetagKind::TwoPhase
+ } else {
+ mir::RetagKind::Default
+ },
+ &val,
+ )?;
+ self.write_immediate(*val, &dest)?;
+ }
+
+ AddressOf(_, place) => {
+ // Figure out whether this is an addr_of of an already raw place.
+ let place_base_raw = if place.has_deref() {
+ let ty = self.frame().body.local_decls[place.local].ty;
+ ty.is_unsafe_ptr()
+ } else {
+ // Not a deref, and thus not raw.
+ false
+ };
+
+ let src = self.eval_place(place)?;
+ let place = self.force_allocation(&src)?;
+ let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
+ if !place_base_raw {
+ // If this was not already raw, it needs retagging.
+ val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
+ }
+ self.write_immediate(*val, &dest)?;
}
NullaryOp(null_op, ty) => {
@@ -305,7 +333,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.eval_terminator(terminator)?;
if !self.stack().is_empty() {
- if let Ok(loc) = self.frame().loc {
+ if let Either::Left(loc) = self.frame().loc {
info!("// executing {:?}", loc.block);
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index cab23b724..fa15d466a 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -53,7 +53,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, (Size, Align)> {
let (ty, _trait_ref) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
- assert!(!layout.is_unsized(), "there are no vtables for unsized types");
+ assert!(layout.is_sized(), "there are no vtables for unsized types");
Ok((layout.size, layout.align.abi))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 8aa56c275..0e85c7d11 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -8,6 +8,8 @@ use std::convert::TryFrom;
use std::fmt::{Display, Write};
use std::num::NonZeroUsize;
+use either::{Left, Right};
+
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
@@ -783,18 +785,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
}
}
Abi::ScalarPair(a_layout, b_layout) => {
- // There is no `rustc_layout_scalar_valid_range_start` for pairs, so
- // we would validate these things as we descend into the fields,
- // but that can miss bugs in layout computation. Layout computation
- // is subtle due to enums having ScalarPair layout, where one field
- // is the discriminant.
- if cfg!(debug_assertions)
- && !a_layout.is_uninit_valid()
- && !b_layout.is_uninit_valid()
- {
- // We can only proceed if *both* scalars need to be initialized.
- // FIXME: find a way to also check ScalarPair when one side can be uninit but
- // the other must be init.
+ // We can only proceed if *both* scalars need to be initialized.
+ // FIXME: find a way to also check ScalarPair when one side can be uninit but
+ // the other must be init.
+ if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
let (a, b) =
self.read_immediate(op, "initiailized scalar value")?.to_scalar_pair();
self.visit_scalar(a, a_layout)?;
@@ -852,9 +846,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
return Ok(());
}
// Now that we definitely have a non-ZST array, we know it lives in memory.
- let mplace = match op.try_as_mplace() {
- Ok(mplace) => mplace,
- Err(imm) => match *imm {
+ let mplace = match op.as_mplace_or_imm() {
+ Left(mplace) => mplace,
+ Right(imm) => match *imm {
Immediate::Uninit =>
throw_validation_failure!(self.path, { "uninitialized bytes" }),
Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index aee1f93b1..1a10851a9 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -324,7 +324,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
macro_rules! make_value_visitor {
($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
- // How to traverse a value and what to do when we are at the leaves.
+ /// How to traverse a value and what to do when we are at the leaves.
pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
type V: $value_trait<'mir, 'tcx, M>;