summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/interpret
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs49
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs47
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs27
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs106
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs63
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs68
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs13
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs5
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs28
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs57
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs217
12 files changed, 352 insertions, 332 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 163e3f869..83a072d6f 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -4,7 +4,7 @@ use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::{Float, FloatConvert};
use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
use rustc_middle::mir::CastKind;
-use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
use rustc_target::abi::Integer;
@@ -14,6 +14,8 @@ use super::{
util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
};
+use crate::fluent_generated as fluent;
+
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn cast(
&mut self,
@@ -22,51 +24,52 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: Ty<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- use rustc_middle::mir::CastKind::*;
// FIXME: In which cases should we trigger UB when the source is uninit?
match cast_kind {
- Pointer(PointerCast::Unsize) => {
+ CastKind::PointerCoercion(PointerCoercion::Unsize) => {
let cast_ty = self.layout_of(cast_ty)?;
self.unsize_into(src, cast_ty, dest)?;
}
- PointerExposeAddress => {
+ CastKind::PointerExposeAddress => {
let src = self.read_immediate(src)?;
let res = self.pointer_expose_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
- PointerFromExposedAddress => {
+ CastKind::PointerFromExposedAddress => {
let src = self.read_immediate(src)?;
let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
- IntToInt | IntToFloat => {
+ CastKind::IntToInt | CastKind::IntToFloat => {
let src = self.read_immediate(src)?;
let res = self.int_to_int_or_float(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
- FloatToFloat | FloatToInt => {
+ CastKind::FloatToFloat | CastKind::FloatToInt => {
let src = self.read_immediate(src)?;
let res = self.float_to_float_or_int(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
- FnPtrToPtr | PtrToPtr => {
+ CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.read_immediate(&src)?;
let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
- Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer) => {
+ CastKind::PointerCoercion(
+ PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer,
+ ) => {
// These are NOPs, but can be wide pointers.
let v = self.read_immediate(src)?;
self.write_immediate(*v, dest)?;
}
- Pointer(PointerCast::ReifyFnPointer) => {
+ CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => {
// All reifications must be monomorphic, bail out otherwise.
ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
@@ -88,7 +91,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- Pointer(PointerCast::UnsafeFnPointer) => {
+ CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer) => {
let src = self.read_immediate(src)?;
match cast_ty.kind() {
ty::FnPtr(_) => {
@@ -99,7 +102,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- Pointer(PointerCast::ClosureFnPointer(_)) => {
+ CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)) => {
// All reifications must be monomorphic, bail out otherwise.
ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
@@ -120,7 +123,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- DynStar => {
+ CastKind::DynStar => {
if let ty::Dynamic(data, _, ty::DynStar) = cast_ty.kind() {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?;
@@ -134,16 +137,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- Transmute => {
+ CastKind::Transmute => {
assert!(src.layout.is_sized());
assert!(dest.layout.is_sized());
if src.layout.size != dest.layout.size {
- throw_ub_format!(
- "transmuting from {}-byte type to {}-byte type: `{}` -> `{}`",
- src.layout.size.bytes(),
- dest.layout.size.bytes(),
- src.layout.ty,
- dest.layout.ty,
+ let src_bytes = src.layout.size.bytes();
+ let dest_bytes = dest.layout.size.bytes();
+ let src_ty = format!("{}", src.layout.ty);
+ let dest_ty = format!("{}", dest.layout.ty);
+ throw_ub_custom!(
+ fluent::const_eval_invalid_transmute,
+ src_bytes = src_bytes,
+ dest_bytes = dest_bytes,
+ src = src_ty,
+ dest = dest_ty,
);
}
@@ -363,7 +370,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let old_vptr = old_vptr.to_pointer(self)?;
let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
if old_trait != data_a.principal() {
- throw_ub_format!("upcast on a pointer whose vtable does not match its type");
+ throw_ub_custom!(fluent::const_eval_upcast_mismatch);
}
let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 7e9457800..36606ff69 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -1,13 +1,13 @@
use std::cell::Cell;
-use std::fmt;
-use std::mem;
+use std::{fmt, mem};
use either::{Either, Left, Right};
+use hir::CRATE_HIR_ID;
use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
use rustc_index::IndexVec;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::{ErrorHandled, InterpError, ReportedErrorInfo};
+use rustc_middle::mir::interpret::{ErrorHandled, InterpError, InvalidMetaKind, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
@@ -24,6 +24,8 @@ use super::{
MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
Scalar, StackPopJump,
};
+use crate::errors::{self, ErroneousConstUsed};
+use crate::fluent_generated as fluent;
use crate::util;
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
@@ -246,6 +248,7 @@ impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
}
}
+// FIXME: only used by miri, should be removed once translatable.
impl<'tcx> fmt::Display for FrameInfo<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ty::tls::with(|tcx| {
@@ -263,6 +266,21 @@ impl<'tcx> fmt::Display for FrameInfo<'tcx> {
}
}
+impl<'tcx> FrameInfo<'tcx> {
+ pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote {
+ let span = self.span;
+ if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr {
+ errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 }
+ } else {
+ let instance = format!("{}", self.instance);
+ // Note: this triggers a `good_path_bug` state, which means that if we ever get here
+ // we must emit a diagnostic. We should never display a `FrameInfo` unless we
+ // actually want to emit a warning or error to the user.
+ errors::FrameNote { where_: "instance", span, instance, times: 0 }
+ }
+ }
+}
+
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
@@ -406,6 +424,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
#[inline(always)]
+ /// Find the first stack frame that is within the current crate, if any, otherwise return the crate's HirId
+ pub fn best_lint_scope(&self) -> hir::HirId {
+ self.stack()
+ .iter()
+ .find_map(|frame| frame.body.source.def_id().as_local())
+ .map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id))
+ }
+
+ #[inline(always)]
pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
M::stack(self)
}
@@ -497,7 +524,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.try_subst_mir_and_normalize_erasing_regions(
*self.tcx,
self.param_env,
- ty::EarlyBinder(value),
+ ty::EarlyBinder::bind(value),
)
.map_err(|_| err_inval!(TooGeneric))
}
@@ -610,7 +637,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Check if this brought us over the size limit.
if size > self.max_size_of_val() {
- throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
+ throw_ub!(InvalidMeta(InvalidMetaKind::TooBig));
}
Ok(Some((size, align)))
}
@@ -628,7 +655,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = elem.size.bytes().saturating_mul(len); // we rely on `max_size_of_val` being smaller than `u64::MAX`.
let size = Size::from_bytes(size);
if size > self.max_size_of_val() {
- throw_ub!(InvalidMeta("slice is bigger than largest supported object"));
+ throw_ub!(InvalidMeta(InvalidMetaKind::SliceTooBig));
}
Ok(Some((size, elem.align.abi)))
}
@@ -736,7 +763,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
mir::UnwindAction::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
mir::UnwindAction::Continue => Right(self.frame_mut().body.span),
mir::UnwindAction::Unreachable => {
- throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
+ throw_ub_custom!(fluent::const_eval_unreachable_unwind);
}
mir::UnwindAction::Terminate => {
self.frame_mut().loc = Right(self.frame_mut().body.span);
@@ -775,7 +802,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
);
if unwinding && self.frame_idx() == 0 {
- throw_ub_format!("unwinding past the topmost frame of the stack");
+ throw_ub_custom!(fluent::const_eval_unwind_past_top);
}
// Copy return value. Must of course happen *before* we deallocate the locals.
@@ -863,7 +890,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// StorageLive expects the local to be dead, and marks it live.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
if !matches!(old, LocalValue::Dead) {
- throw_ub_format!("StorageLive on a local that was already live");
+ throw_ub_custom!(fluent::const_eval_double_storage_live);
}
Ok(())
}
@@ -906,7 +933,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ErrorHandled::Reported(err) => {
if !err.is_tainted_by_errors() && let Some(span) = span {
// To make it easier to figure out where this error comes from, also add a note at the current location.
- self.tcx.sess.span_note_without_error(span, "erroneous constant used");
+ self.tcx.sess.emit_note(ErroneousConstUsed { span });
}
err_inval!(AlreadyReported(err))
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index c2b82ba9b..7b11ad330 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -28,6 +28,7 @@ use super::{
ValueVisitor,
};
use crate::const_eval;
+use crate::errors::{DanglingPtrInFinal, UnsupportedUntypedPointer};
pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
'mir,
@@ -320,10 +321,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
}
+/// How a constant value should be interned.
#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
pub enum InternKind {
/// The `mutability` of the static, ignoring the type which may have interior mutability.
Static(hir::Mutability),
+ /// A `const` item
Constant,
Promoted,
}
@@ -388,8 +391,7 @@ pub fn intern_const_alloc_recursive<
ecx.tcx.sess.delay_span_bug(
ecx.tcx.span,
format!(
- "error during interning should later cause validation failure: {}",
- error
+ "error during interning should later cause validation failure: {error:?}"
),
);
}
@@ -425,14 +427,16 @@ pub fn intern_const_alloc_recursive<
// immutability is so important.
alloc.mutability = Mutability::Not;
}
+ // If it's a constant, we should not have any "leftovers" as everything
+ // is tracked by const-checking.
+ // FIXME: downgrade this to a warning? It rejects some legitimate consts,
+ // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
+ //
+ // NOTE: it looks likes this code path is only reachable when we try to intern
+ // something that cannot be promoted, which in constants means values that have
+ // drop glue, such as the example above.
InternKind::Constant => {
- // If it's a constant, we should not have any "leftovers" as everything
- // is tracked by const-checking.
- // FIXME: downgrade this to a warning? It rejects some legitimate consts,
- // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
- ecx.tcx
- .sess
- .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant");
+ ecx.tcx.sess.emit_err(UnsupportedUntypedPointer { span: ecx.tcx.span });
// For better errors later, mark the allocation as immutable.
alloc.mutability = Mutability::Not;
}
@@ -447,10 +451,7 @@ pub fn intern_const_alloc_recursive<
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
// Codegen does not like dangling pointers, and generally `tcx` assumes that
// all allocations referenced anywhere actually exist. So, make sure we error here.
- let reported = ecx
- .tcx
- .sess
- .span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+ let reported = ecx.tcx.sess.emit_err(DanglingPtrInFinal { span: ecx.tcx.span });
return Err(reported);
} else if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
// We have hit an `AllocId` that is neither in local or global memory and isn't
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index a77c699c2..ed64a7655 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -22,6 +22,8 @@ use super::{
Pointer,
};
+use crate::fluent_generated as fluent;
+
mod caller_location;
fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
@@ -70,12 +72,12 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
}
sym::pref_align_of => {
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
- let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
+ let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(*e)))?;
ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx)
}
sym::type_id => {
ensure_monomorphic_enough(tcx, tp_ty)?;
- ConstValue::from_u64(tcx.type_id_hash(tp_ty).as_u64())
+ ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128())
}
sym::variant_count => match tp_ty.kind() {
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
@@ -167,8 +169,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let ty = match intrinsic_name {
sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
sym::needs_drop => self.tcx.types.bool,
- sym::type_id => self.tcx.types.u64,
- sym::type_name => self.tcx.mk_static_str(),
+ sym::type_id => self.tcx.types.u128,
+ sym::type_name => Ty::new_static_str(self.tcx.tcx),
_ => bug!(),
};
let val = self.ctfe_query(None, |tcx| {
@@ -198,15 +200,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty
),
};
- let (nonzero, intrinsic_name) = match intrinsic_name {
+ let (nonzero, actual_intrinsic_name) = match intrinsic_name {
sym::cttz_nonzero => (true, sym::cttz),
sym::ctlz_nonzero => (true, sym::ctlz),
other => (false, other),
};
if nonzero && bits == 0 {
- throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
+ throw_ub_custom!(
+ fluent::const_eval_call_nonzero_intrinsic,
+ name = intrinsic_name,
+ );
}
- let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
+ let out_val = numeric_intrinsic(actual_intrinsic_name, bits, kind);
self.write_scalar(out_val, dest)?;
}
sym::saturating_add | sym::saturating_sub => {
@@ -229,37 +234,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let r = self.read_immediate(&args[1])?;
self.exact_div(&l, &r, dest)?;
}
- sym::unchecked_shl
- | sym::unchecked_shr
- | sym::unchecked_add
- | sym::unchecked_sub
- | sym::unchecked_mul
- | sym::unchecked_div
- | sym::unchecked_rem => {
- let l = self.read_immediate(&args[0])?;
- let r = self.read_immediate(&args[1])?;
- let bin_op = match intrinsic_name {
- sym::unchecked_shl => BinOp::Shl,
- sym::unchecked_shr => BinOp::Shr,
- sym::unchecked_add => BinOp::Add,
- sym::unchecked_sub => BinOp::Sub,
- sym::unchecked_mul => BinOp::Mul,
- sym::unchecked_div => BinOp::Div,
- sym::unchecked_rem => BinOp::Rem,
- _ => bug!(),
- };
- let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
- if overflowed {
- let layout = self.layout_of(substs.type_at(0))?;
- let r_val = r.to_scalar().to_bits(layout.size)?;
- if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
- throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
- } else {
- throw_ub_format!("overflow executing `{}`", intrinsic_name);
- }
- }
- self.write_scalar(val, dest)?;
- }
sym::rotate_left | sym::rotate_right => {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
@@ -314,17 +288,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(Err(_), _) | (_, Err(_)) => {
// We managed to find a valid allocation for one pointer, but not the other.
// That means they are definitely not pointing to the same allocation.
- throw_ub_format!(
- "`{}` called on pointers into different allocations",
- intrinsic_name
+ throw_ub_custom!(
+ fluent::const_eval_different_allocations,
+ name = intrinsic_name,
);
}
(Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => {
// Found allocation for both. They must be into the same allocation.
if a_alloc_id != b_alloc_id {
- throw_ub_format!(
- "`{}` called on pointers into different allocations",
- intrinsic_name
+ throw_ub_custom!(
+ fluent::const_eval_different_allocations,
+ name = intrinsic_name,
);
}
// Use these offsets for distance calculation.
@@ -344,11 +318,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflowed {
// a < b
if intrinsic_name == sym::ptr_offset_from_unsigned {
- throw_ub_format!(
- "`{}` called when first pointer has smaller offset than second: {} < {}",
- intrinsic_name,
- a_offset,
- b_offset,
+ throw_ub_custom!(
+ fluent::const_eval_unsigned_offset_from_overflow,
+ a_offset = a_offset,
+ b_offset = b_offset,
);
}
// The signed form of the intrinsic allows this. If we interpret the
@@ -356,9 +329,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// seems *positive*, they were more than isize::MAX apart.
let dist = val.to_target_isize(self)?;
if dist >= 0 {
- throw_ub_format!(
- "`{}` called when first pointer is too far before second",
- intrinsic_name
+ throw_ub_custom!(
+ fluent::const_eval_offset_from_underflow,
+ name = intrinsic_name,
);
}
dist
@@ -368,9 +341,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
- throw_ub_format!(
- "`{}` called when first pointer is too far ahead of second",
- intrinsic_name
+ throw_ub_custom!(
+ fluent::const_eval_offset_from_overflow,
+ name = intrinsic_name,
);
}
dist
@@ -513,7 +486,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let op = self.eval_operand(op, None)?;
let cond = self.read_scalar(&op)?.to_bool()?;
if !cond {
- throw_ub_format!("`assume` called with `false`");
+ throw_ub_custom!(fluent::const_eval_assume_false);
}
Ok(())
}
@@ -542,7 +515,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow.
if res.assert_bits(a.layout.size) != 0 {
- throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b)
+ throw_ub_custom!(
+ fluent::const_eval_exact_div_has_remainder,
+ a = format!("{a}"),
+ b = format!("{b}")
+ )
}
// `Rem` says this is all right, so we can let `Div` do its job.
self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
@@ -638,9 +615,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
let size = size.checked_mul(count, self).ok_or_else(|| {
- err_ub_format!(
- "overflow computing total size of `{}`",
- if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+ err_ub_custom!(
+ fluent::const_eval_size_overflow,
+ name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
)
})?;
@@ -664,10 +641,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
- let len = layout
- .size
- .checked_mul(count, self)
- .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
+ let len = layout.size.checked_mul(count, self).ok_or_else(|| {
+ err_ub_custom!(fluent::const_eval_size_overflow, name = "write_bytes")
+ })?;
let bytes = std::iter::repeat(byte).take(len.bytes_usize());
self.write_bytes_ptr(dst, bytes)
@@ -691,7 +667,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(&[]);
};
if alloc_ref.has_provenance() {
- throw_ub_format!("`raw_eq` on bytes with provenance");
+ throw_ub_custom!(fluent::const_eval_raw_eq_with_provenance);
}
alloc_ref.get_bytes_strip_provenance()
};
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index d5b6a581a..1125d8d1f 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -19,6 +19,7 @@ use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
use rustc_target::abi::{Align, HasDataLayout, Size};
use crate::const_eval::CheckAlignment;
+use crate::fluent_generated as fluent;
use super::{
alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg,
@@ -200,7 +201,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
align: Align,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
- let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
+ let alloc = if M::PANIC_ON_ALLOC_FAIL {
+ Allocation::uninit(size, align)
+ } else {
+ Allocation::try_uninit(size, align)?
+ };
self.allocate_raw_ptr(alloc, kind)
}
@@ -242,9 +247,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
if offset.bytes() != 0 {
- throw_ub_format!(
- "reallocating {:?} which does not point to the beginning of an object",
- ptr
+ throw_ub_custom!(
+ fluent::const_eval_realloc_or_alloc_with_offset,
+ ptr = format!("{ptr:?}"),
+ kind = "realloc"
);
}
@@ -280,9 +286,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("deallocating: {alloc_id:?}");
if offset.bytes() != 0 {
- throw_ub_format!(
- "deallocating {:?} which does not point to the beginning of an object",
- ptr
+ throw_ub_custom!(
+ fluent::const_eval_realloc_or_alloc_with_offset,
+ ptr = format!("{ptr:?}"),
+ kind = "dealloc",
);
}
@@ -290,13 +297,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Deallocating global memory -- always an error
return Err(match self.tcx.try_get_global_alloc(alloc_id) {
Some(GlobalAlloc::Function(..)) => {
- err_ub_format!("deallocating {alloc_id:?}, which is a function")
+ err_ub_custom!(
+ fluent::const_eval_invalid_dealloc,
+ alloc_id = alloc_id,
+ kind = "fn",
+ )
}
Some(GlobalAlloc::VTable(..)) => {
- err_ub_format!("deallocating {alloc_id:?}, which is a vtable")
+ err_ub_custom!(
+ fluent::const_eval_invalid_dealloc,
+ alloc_id = alloc_id,
+ kind = "vtable",
+ )
}
Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
- err_ub_format!("deallocating {alloc_id:?}, which is static memory")
+ err_ub_custom!(
+ fluent::const_eval_invalid_dealloc,
+ alloc_id = alloc_id,
+ kind = "static_mem"
+ )
}
None => err_ub!(PointerUseAfterFree(alloc_id)),
}
@@ -304,21 +323,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
if alloc.mutability.is_not() {
- throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
+ throw_ub_custom!(fluent::const_eval_dealloc_immutable, alloc = alloc_id,);
}
if alloc_kind != kind {
- throw_ub_format!(
- "deallocating {alloc_id:?}, which is {alloc_kind} memory, using {kind} deallocation operation"
+ throw_ub_custom!(
+ fluent::const_eval_dealloc_kind_mismatch,
+ alloc = alloc_id,
+ alloc_kind = format!("{alloc_kind}"),
+ kind = format!("{kind}"),
);
}
if let Some((size, align)) = old_size_and_align {
if size != alloc.size() || align != alloc.align {
- throw_ub_format!(
- "incorrect layout on deallocation: {alloc_id:?} has size {} and alignment {}, but gave size {} and alignment {}",
- alloc.size().bytes(),
- alloc.align.bytes(),
- size.bytes(),
- align.bytes(),
+ throw_ub_custom!(
+ fluent::const_eval_dealloc_incorrect_layout,
+ alloc = alloc_id,
+ size = alloc.size().bytes(),
+ align = alloc.align.bytes(),
+ size_found = size.bytes(),
+ align_found = align.bytes(),
)
}
}
@@ -1166,7 +1189,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if (src_offset <= dest_offset && src_offset + size > dest_offset)
|| (dest_offset <= src_offset && dest_offset + size > src_offset)
{
- throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
+ throw_ub_custom!(fluent::const_eval_copy_nonoverlapping_overlapping);
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index e30af1655..5f89d652f 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -106,7 +106,7 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
// Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
// print what is points to, which would fail since it has no access to the local
// memory.
- cx.pretty_print_const_pointer(ptr, ty, true)
+ cx.pretty_print_const_pointer(ptr, ty)
}
}
}
@@ -633,7 +633,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- pub(super) fn const_val_to_op(
+ pub(crate) fn const_val_to_op(
&self,
val_val: ConstValue<'tcx>,
ty: Ty<'tcx>,
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 7186148da..e04764636 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -3,10 +3,13 @@ use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
+use crate::fluent_generated as fluent;
+
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
@@ -19,7 +22,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx> {
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
- self.tcx.mk_tup(&[ty, self.tcx.types.bool]),
+ Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {:?}",
op,
@@ -139,8 +142,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
use rustc_middle::mir::BinOp::*;
+ let throw_ub_on_overflow = match bin_op {
+ AddUnchecked => Some(sym::unchecked_add),
+ SubUnchecked => Some(sym::unchecked_sub),
+ MulUnchecked => Some(sym::unchecked_mul),
+ ShlUnchecked => Some(sym::unchecked_shl),
+ ShrUnchecked => Some(sym::unchecked_shr),
+ _ => None,
+ };
+
// Shift ops can have an RHS with a different numeric type.
- if bin_op == Shl || bin_op == Shr {
+ if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
let size = u128::from(left_layout.size.bits());
// Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
// zero-extended form). This matches the codegen backend:
@@ -155,6 +167,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// integers are maximally 128bits wide, so negative shifts *always* overflow and we have
// consistent results for the same value represented at different bit widths.
assert!(size <= 128);
+ let original_r = r;
let overflow = r >= size;
// The shift offset is implicitly masked to the type size, to make sure this operation
// is always defined. This is the one MIR operator that does *not* directly map to a
@@ -166,19 +179,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let result = if left_layout.abi.is_signed() {
let l = self.sign_extend(l, left_layout) as i128;
let result = match bin_op {
- Shl => l.checked_shl(r).unwrap(),
- Shr => l.checked_shr(r).unwrap(),
+ Shl | ShlUnchecked => l.checked_shl(r).unwrap(),
+ Shr | ShrUnchecked => l.checked_shr(r).unwrap(),
_ => bug!(),
};
result as u128
} else {
match bin_op {
- Shl => l.checked_shl(r).unwrap(),
- Shr => l.checked_shr(r).unwrap(),
+ Shl | ShlUnchecked => l.checked_shl(r).unwrap(),
+ Shr | ShrUnchecked => l.checked_shr(r).unwrap(),
_ => bug!(),
}
};
let truncated = self.truncate(result, left_layout);
+
+ if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
+ throw_ub_custom!(
+ fluent::const_eval_overflow_shift,
+ val = original_r,
+ name = intrinsic_name
+ );
+ }
+
return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
}
@@ -216,9 +238,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Rem if r == 0 => throw_ub!(RemainderByZero),
Div => Some(i128::overflowing_div),
Rem => Some(i128::overflowing_rem),
- Add => Some(i128::overflowing_add),
- Sub => Some(i128::overflowing_sub),
- Mul => Some(i128::overflowing_mul),
+ Add | AddUnchecked => Some(i128::overflowing_add),
+ Sub | SubUnchecked => Some(i128::overflowing_sub),
+ Mul | MulUnchecked => Some(i128::overflowing_mul),
_ => None,
};
if let Some(op) = op {
@@ -242,11 +264,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// If that truncation loses any information, we have an overflow.
let result = result as u128;
let truncated = self.truncate(result, left_layout);
- return Ok((
- Scalar::from_uint(truncated, size),
- oflo || self.sign_extend(truncated, left_layout) != result,
- left_layout.ty,
- ));
+ let overflow = oflo || self.sign_extend(truncated, left_layout) != result;
+ if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
+ throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
+ }
+ return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
}
}
@@ -263,12 +285,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
- Add | Sub | Mul | Rem | Div => {
+ Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
assert!(!left_layout.abi.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
- Add => u128::overflowing_add,
- Sub => u128::overflowing_sub,
- Mul => u128::overflowing_mul,
+ Add | AddUnchecked => u128::overflowing_add,
+ Sub | SubUnchecked => u128::overflowing_sub,
+ Mul | MulUnchecked => u128::overflowing_mul,
Div if r == 0 => throw_ub!(DivisionByZero),
Rem if r == 0 => throw_ub!(RemainderByZero),
Div => u128::overflowing_div,
@@ -279,11 +301,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Truncate to target type.
// If that truncation loses any information, we have an overflow.
let truncated = self.truncate(result, left_layout);
- return Ok((
- Scalar::from_uint(truncated, size),
- oflo || truncated != result,
- left_layout.ty,
- ));
+ let overflow = oflo || truncated != result;
+ if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
+ throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
+ }
+ return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
}
_ => span_bug!(
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 2a31a59ad..ca1106384 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -9,6 +9,7 @@ use rustc_index::IndexSlice;
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::Ty;
use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{
@@ -395,7 +396,7 @@ where
// (Transmuting is okay since this is an in-memory place. We also double-check the size
// stays the same.)
let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
- let array = self.tcx.mk_array(e_ty, len);
+ let array = Ty::new_array(self.tcx.tcx, e_ty, len);
let layout = self.layout_of(array)?;
assert_eq!(layout.size, mplace.layout.size);
Ok((MPlaceTy { layout, ..*mplace }, len))
@@ -699,8 +700,13 @@ where
assert_eq!(src.layout.size, dest.layout.size);
}
+ // Setting `nonoverlapping` here only has an effect when we don't hit the fast-path above,
+ // but that should at least match what LLVM does where `memcpy` is also only used when the
+ // type does not have Scalar/ScalarPair layout.
+ // (Or as the `Assign` docs put it, assignments "not producing primitives" must be
+ // non-overlapping.)
self.mem_copy(
- src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ false,
+ src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ true,
)
}
@@ -775,7 +781,8 @@ where
let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
- let ty = self.tcx.mk_ref(
+ let ty = Ty::new_ref(
+ self.tcx.tcx,
self.tcx.lifetimes.re_static,
ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
);
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 91da930db..d7d31fe18 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -12,6 +12,7 @@ use either::{Left, Right};
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::Ty;
use rustc_target::abi::{self, Abi, VariantIdx};
use super::{
@@ -317,7 +318,9 @@ where
let (meta, ty) = match base.layout.ty.kind() {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
- ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
+ ty::Array(inner, _) => {
+ (MemPlaceMeta::None, Ty::new_array(self.tcx.tcx, *inner, inner_len))
+ }
ty::Slice(..) => {
let len = Scalar::from_target_usize(inner_len, self);
(MemPlaceMeta::Meta(len), base.layout.ty)
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 1e60a1e72..619da8abb 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -9,27 +9,7 @@ use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::LayoutOf;
use super::{ImmTy, InterpCx, Machine};
-
-/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
-/// same type as the result.
-#[inline]
-fn binop_left_homogeneous(op: mir::BinOp) -> bool {
- use rustc_middle::mir::BinOp::*;
- match op {
- Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
- Eq | Ne | Lt | Le | Gt | Ge => false,
- }
-}
-/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
-/// same type as the LHS.
-#[inline]
-fn binop_right_homogeneous(op: mir::BinOp) -> bool {
- use rustc_middle::mir::BinOp::*;
- match op {
- Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
- Offset | Shl | Shr => false,
- }
-}
+use crate::util;
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Returns `true` as long as there are more things to do.
@@ -179,9 +159,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
BinaryOp(bin_op, box (ref left, ref right)) => {
- let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
+ let layout = util::binop_left_homogeneous(bin_op).then_some(dest.layout);
let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
- let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+ let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
}
@@ -189,7 +169,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
// Due to the extra boolean in the result, we can never reuse the `dest.layout`.
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
- let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+ let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
self.binop_with_overflow(bin_op, &left, &right, &dest)?;
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 586e8f063..15823a597 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -15,6 +15,7 @@ use super::{
FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
PlaceTy, Scalar, StackPopCleanup,
};
+use crate::fluent_generated as fluent;
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub(super) fn eval_terminator(
@@ -61,7 +62,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
destination,
target,
unwind,
- from_hir_call: _,
+ call_source: _,
fn_span: _,
} => {
let old_stack = self.frame_idx();
@@ -172,7 +173,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
InlineAsm { template, ref operands, options, destination, .. } => {
M::eval_inline_asm(self, template, operands, options)?;
if options.contains(InlineAsmOptions::NORETURN) {
- throw_ub_format!("returned from noreturn inline assembly");
+ throw_ub_custom!(fluent::const_eval_noreturn_asm_returned);
}
self.go_to_block(
destination
@@ -288,15 +289,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(());
}
// Find next caller arg.
- let (caller_arg, caller_abi) = caller_args.next().ok_or_else(|| {
- err_ub_format!("calling a function with fewer arguments than it requires")
- })?;
+ let Some((caller_arg, caller_abi)) = caller_args.next() else {
+ throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
+ };
// Now, check
if !Self::check_argument_compat(caller_abi, callee_abi) {
- throw_ub_format!(
- "calling a function with argument of type {:?} passing data of type {:?}",
- callee_arg.layout.ty,
- caller_arg.layout.ty
+ let callee_ty = format!("{}", callee_arg.layout.ty);
+ let caller_ty = format!("{}", caller_arg.layout.ty);
+ throw_ub_custom!(
+ fluent::const_eval_incompatible_types,
+ callee_ty = callee_ty,
+ caller_ty = caller_ty,
)
}
// Special handling for unsized parameters.
@@ -398,10 +401,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if M::enforce_abi(self) {
if caller_fn_abi.conv != callee_fn_abi.conv {
- throw_ub_format!(
- "calling a function with calling convention {:?} using calling convention {:?}",
- callee_fn_abi.conv,
- caller_fn_abi.conv
+ throw_ub_custom!(
+ fluent::const_eval_incompatible_calling_conventions,
+ callee_conv = format!("{:?}", callee_fn_abi.conv),
+ caller_conv = format!("{:?}", caller_fn_abi.conv),
)
}
}
@@ -508,15 +511,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
"mismatch between callee ABI and callee body arguments"
);
if caller_args.next().is_some() {
- throw_ub_format!("calling a function with more arguments than it expected")
+ throw_ub_custom!(fluent::const_eval_too_many_caller_args);
}
// Don't forget to check the return type!
if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
- throw_ub_format!(
- "calling a function with return type {:?} passing \
- return place of type {:?}",
- callee_fn_abi.ret.layout.ty,
- caller_fn_abi.ret.layout.ty,
+ let callee_ty = format!("{}", callee_fn_abi.ret.layout.ty);
+ let caller_ty = format!("{}", caller_fn_abi.ret.layout.ty);
+ throw_ub_custom!(
+ fluent::const_eval_incompatible_return_types,
+ callee_ty = callee_ty,
+ caller_ty = caller_ty,
)
}
};
@@ -587,9 +591,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() {
- throw_ub_format!(
- "`dyn*` call on a pointer whose vtable does not match its type"
- );
+ throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
}
let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
@@ -609,9 +611,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() {
- throw_ub_format!(
- "`dyn` call on a pointer whose vtable does not match its type"
- );
+ throw_ub_custom!(fluent::const_eval_dyn_call_vtable_mismatch);
}
// It might be surprising that we use a pointer as the receiver even if this
@@ -623,7 +623,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Now determine the actual method to call. We can do that in two different ways and
// compare them to ensure everything fits.
let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
- throw_ub_format!("`dyn` call trying to call something that is not a method")
+ // FIXME(fee1-dead) these could be variants of the UB info enum instead of this
+ throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
};
trace!("Virtual call dispatches to {fn_inst:#?}");
if cfg!(debug_assertions) {
@@ -649,7 +650,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Adjust receiver argument. Layout can be any (thin) ptr.
args[0] = ImmTy::from_immediate(
Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
- self.layout_of(self.tcx.mk_mut_ptr(dyn_ty))?,
+ self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
)
.into();
trace!("Patched receiver operand to {:#?}", args[0]);
@@ -702,7 +703,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let arg = ImmTy::from_immediate(
place.to_ref(self),
- self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
+ self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, place.layout.ty))?,
);
let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 01b772899..21c655988 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -4,7 +4,7 @@
//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
//! to be const-safe.
-use std::fmt::{Display, Write};
+use std::fmt::Write;
use std::num::NonZeroUsize;
use either::{Left, Right};
@@ -12,7 +12,10 @@ use either::{Left, Right};
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
-use rustc_middle::mir::interpret::InterpError;
+use rustc_middle::mir::interpret::{
+ ExpectedKind, InterpError, InvalidMetaKind, PointerKind, ValidationErrorInfo,
+ ValidationErrorKind, ValidationErrorKind::*,
+};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_span::symbol::{sym, Symbol};
@@ -30,14 +33,7 @@ use super::{
};
macro_rules! throw_validation_failure {
- ($where:expr, { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )?) => {{
- let mut msg = String::new();
- msg.push_str("encountered ");
- write!(&mut msg, $($what_fmt)*).unwrap();
- $(
- msg.push_str(", but expected ");
- write!(&mut msg, $($expected_fmt)*).unwrap();
- )?
+ ($where:expr, $kind: expr) => {{
let where_ = &$where;
let path = if !where_.is_empty() {
let mut path = String::new();
@@ -46,7 +42,8 @@ macro_rules! throw_validation_failure {
} else {
None
};
- throw_ub!(ValidationFailure { path, msg })
+
+ throw_ub!(Validation(ValidationErrorInfo { path, kind: $kind }))
}};
}
@@ -82,22 +79,22 @@ macro_rules! throw_validation_failure {
///
macro_rules! try_validation {
($e:expr, $where:expr,
- $( $( $p:pat_param )|+ => { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )? ),+ $(,)?
+ $( $( $p:pat_param )|+ => $kind: expr ),+ $(,)?
) => {{
match $e {
Ok(x) => x,
// We catch the error and turn it into a validation failure. We are okay with
// allocation here as this can only slow down builds that fail anyway.
- Err(e) => match e.kind() {
+ Err(e) => match e.into_parts() {
$(
- InterpError::UndefinedBehavior($($p)|+) =>
+ (InterpError::UndefinedBehavior($($p)|+), _) =>
throw_validation_failure!(
$where,
- { $( $what_fmt )* } $( expected { $( $expected_fmt )* } )?
+ $kind
)
),+,
#[allow(unreachable_patterns)]
- _ => Err::<!, _>(e)?,
+ (e, rest) => Err::<!, _>($crate::interpret::InterpErrorInfo::from_parts(e, rest))?,
}
}
}};
@@ -160,6 +157,7 @@ impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH>
}
}
+// FIXME make this translatable as well?
/// Format a path
fn write_path(out: &mut String, path: &[PathElem]) {
use self::PathElem::*;
@@ -185,26 +183,6 @@ fn write_path(out: &mut String, path: &[PathElem]) {
}
}
-// Formats such that a sentence like "expected something {}" to mean
-// "expected something <in the given range>" makes sense.
-fn wrapping_range_format(r: WrappingRange, max_hi: u128) -> String {
- let WrappingRange { start: lo, end: hi } = r;
- assert!(hi <= max_hi);
- if lo > hi {
- format!("less or equal to {}, or greater or equal to {}", hi, lo)
- } else if lo == hi {
- format!("equal to {}", lo)
- } else if lo == 0 {
- assert!(hi < max_hi, "should not be printing if the range covers everything");
- format!("less or equal to {}", hi)
- } else if hi == max_hi {
- assert!(lo > 0, "should not be printing if the range covers everything");
- format!("greater or equal to {}", lo)
- } else {
- format!("in the range {:?}", r)
- }
-}
-
struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// The `path` may be pushed to, but the part that is present when a function
/// starts must not be changed! `visit_fields` and `visit_array` rely on
@@ -311,19 +289,19 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
fn read_immediate(
&self,
op: &OpTy<'tcx, M::Provenance>,
- expected: impl Display,
+ expected: ExpectedKind,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
Ok(try_validation!(
self.ecx.read_immediate(op),
self.path,
- InvalidUninitBytes(None) => { "uninitialized memory" } expected { "{expected}" }
+ InvalidUninitBytes(None) => Uninit { expected }
))
}
fn read_scalar(
&self,
op: &OpTy<'tcx, M::Provenance>,
- expected: impl Display,
+ expected: ExpectedKind,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
Ok(self.read_immediate(op, expected)?.to_scalar())
}
@@ -342,8 +320,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
self.ecx.get_ptr_vtable(vtable),
self.path,
DanglingIntPointer(..) |
- InvalidVTablePointer(..) =>
- { "{vtable}" } expected { "a vtable pointer" },
+ InvalidVTablePointer(..) => InvalidVTablePtr { value: format!("{vtable}") }
);
// FIXME: check if the type/trait match what ty::Dynamic says?
}
@@ -366,10 +343,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
fn check_safe_pointer(
&mut self,
value: &OpTy<'tcx, M::Provenance>,
- kind: &str,
+ ptr_kind: PointerKind,
) -> InterpResult<'tcx> {
- let place =
- self.ecx.ref_to_mplace(&self.read_immediate(value, format_args!("a {kind}"))?)?;
+ let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
// Handle wide pointers.
// Check metadata early, for better diagnostics
if place.layout.is_unsized() {
@@ -379,7 +355,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let size_and_align = try_validation!(
self.ecx.size_and_align_of_mplace(&place),
self.path,
- InvalidMeta(msg) => { "invalid {} metadata: {}", kind, msg },
+ InvalidMeta(msg) => match msg {
+ InvalidMetaKind::SliceTooBig => InvalidMetaSliceTooLarge { ptr_kind },
+ InvalidMetaKind::TooBig => InvalidMetaTooLarge { ptr_kind },
+ }
);
let (size, align) = size_and_align
// for the purpose of validity, consider foreign types to have
@@ -395,31 +374,30 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
- AlignmentCheckFailed { required, has } =>
- {
- "an unaligned {kind} (required {} byte alignment but found {})",
- required.bytes(),
- has.bytes(),
- },
- DanglingIntPointer(0, _) =>
- { "a null {kind}" },
- DanglingIntPointer(i, _) =>
- {
- "a dangling {kind} ({pointer} has no provenance)",
- pointer = Pointer::<Option<AllocId>>::from_addr_invalid(*i),
- },
- PointerOutOfBounds { .. } =>
- { "a dangling {kind} (going beyond the bounds of its allocation)" },
+ AlignmentCheckFailed { required, has } => UnalignedPtr {
+ ptr_kind,
+ required_bytes: required.bytes(),
+ found_bytes: has.bytes()
+ },
+ DanglingIntPointer(0, _) => NullPtr { ptr_kind },
+ DanglingIntPointer(i, _) => DanglingPtrNoProvenance {
+ ptr_kind,
+ // FIXME this says "null pointer" when null but we need translate
+ pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i))
+ },
+ PointerOutOfBounds { .. } => DanglingPtrOutOfBounds {
+ ptr_kind
+ },
// This cannot happen during const-eval (because interning already detects
// dangling pointers), but it can happen in Miri.
- PointerUseAfterFree(..) =>
- { "a dangling {kind} (use-after-free)" },
+ PointerUseAfterFree(..) => DanglingPtrUseAfterFree {
+ ptr_kind,
+ },
);
// Do not allow pointers to uninhabited types.
if place.layout.abi.is_uninhabited() {
- throw_validation_failure!(self.path,
- { "a {kind} pointing to uninhabited type {}", place.layout.ty }
- )
+ let ty = place.layout.ty;
+ throw_validation_failure!(self.path, PtrToUninhabited { ptr_kind, ty })
}
// Recursive checking
if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() {
@@ -441,9 +419,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// this check is so important.
// This check is reachable when the const just referenced the static,
// but never read it (so we never entered `before_access_global`).
- throw_validation_failure!(self.path,
- { "a {} pointing to a static variable in a constant", kind }
- );
+ throw_validation_failure!(self.path, PtrToStatic { ptr_kind });
}
// We skip recursively checking other statics. These statics must be sound by
// themselves, and the only way to get broken statics here is by using
@@ -464,9 +440,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// This should be unreachable, but if someone manages to copy a pointer
// out of a `static`, then that pointer might point to mutable memory,
// and we would catch that here.
- throw_validation_failure!(self.path,
- { "a {} pointing to mutable memory in a constant", kind }
- );
+ throw_validation_failure!(self.path, PtrToMut { ptr_kind });
}
}
// Nothing to check for these.
@@ -496,22 +470,24 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let ty = value.layout.ty;
match ty.kind() {
ty::Bool => {
- let value = self.read_scalar(value, "a boolean")?;
+ let value = self.read_scalar(value, ExpectedKind::Bool)?;
try_validation!(
value.to_bool(),
self.path,
- InvalidBool(..) =>
- { "{:x}", value } expected { "a boolean" },
+ InvalidBool(..) => ValidationErrorKind::InvalidBool {
+ value: format!("{value:x}"),
+ }
);
Ok(true)
}
ty::Char => {
- let value = self.read_scalar(value, "a unicode scalar value")?;
+ let value = self.read_scalar(value, ExpectedKind::Char)?;
try_validation!(
value.to_char(),
self.path,
- InvalidChar(..) =>
- { "{:x}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
+ InvalidChar(..) => ValidationErrorKind::InvalidChar {
+ value: format!("{value:x}"),
+ }
);
Ok(true)
}
@@ -521,16 +497,17 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let value = self.read_scalar(
value,
if matches!(ty.kind(), ty::Float(..)) {
- "a floating point number"
+ ExpectedKind::Float
} else {
- "an integer"
+ ExpectedKind::Int
},
)?;
// As a special exception we *do* match on a `Scalar` here, since we truly want
// to know its underlying representation (and *not* cast it to an integer).
if matches!(value, Scalar::Ptr(..)) {
- throw_validation_failure!(self.path,
- { "{:x}", value } expected { "plain (non-pointer) bytes" }
+ throw_validation_failure!(
+ self.path,
+ ExpectedNonPtr { value: format!("{value:x}") }
)
}
Ok(true)
@@ -540,7 +517,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// actually enforce the strict rules for raw pointers (mostly because
// that lets us re-use `ref_to_mplace`).
let place =
- self.ecx.ref_to_mplace(&self.read_immediate(value, "a raw pointer")?)?;
+ self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
if place.layout.is_unsized() {
self.check_wide_ptr_meta(place.meta, place.layout)?;
}
@@ -554,14 +531,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// a ZST).
let layout = self.ecx.layout_of(*ty)?;
if !layout.is_zst() {
- throw_validation_failure!(self.path, { "mutable reference in a `const`" });
+ throw_validation_failure!(self.path, MutableRefInConst);
}
}
- self.check_safe_pointer(value, "reference")?;
+ self.check_safe_pointer(value, PointerKind::Ref)?;
Ok(true)
}
ty::FnPtr(_sig) => {
- let value = self.read_scalar(value, "a function pointer")?;
+ let value = self.read_scalar(value, ExpectedKind::FnPtr)?;
// If we check references recursively, also check that this points to a function.
if let Some(_) = self.ref_tracking {
@@ -570,19 +547,20 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
self.ecx.get_ptr_fn(ptr),
self.path,
DanglingIntPointer(..) |
- InvalidFunctionPointer(..) =>
- { "{ptr}" } expected { "a function pointer" },
+ InvalidFunctionPointer(..) => InvalidFnPtr {
+ value: format!("{ptr}"),
+ },
);
// FIXME: Check if the signature matches
} else {
// Otherwise (for standalone Miri), we have to still check it to be non-null.
if self.ecx.scalar_may_be_null(value)? {
- throw_validation_failure!(self.path, { "a null function pointer" });
+ throw_validation_failure!(self.path, NullFnPtr);
}
}
Ok(true)
}
- ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }),
+ ty::Never => throw_validation_failure!(self.path, NeverVal),
ty::Foreign(..) | ty::FnDef(..) => {
// Nothing to check.
Ok(true)
@@ -629,12 +607,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.scalar_may_be_null(scalar)? {
- throw_validation_failure!(self.path,
- { "a potentially null pointer" }
- expected {
- "something that cannot possibly fail to be {}",
- wrapping_range_format(valid_range, max_value)
- }
+ throw_validation_failure!(
+ self.path,
+ NullablePtrOutOfRange { range: valid_range, max_value }
)
} else {
return Ok(());
@@ -645,12 +620,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
} else {
// Conservatively, we reject, because the pointer *could* have a bad
// value.
- throw_validation_failure!(self.path,
- { "a pointer" }
- expected {
- "something that cannot possibly fail to be {}",
- wrapping_range_format(valid_range, max_value)
- }
+ throw_validation_failure!(
+ self.path,
+ PtrOutOfRange { range: valid_range, max_value }
)
}
}
@@ -659,9 +631,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if valid_range.contains(bits) {
Ok(())
} else {
- throw_validation_failure!(self.path,
- { "{}", bits }
- expected { "something {}", wrapping_range_format(valid_range, max_value) }
+ throw_validation_failure!(
+ self.path,
+ OutOfRange { value: format!("{bits}"), range: valid_range, max_value }
)
}
}
@@ -685,10 +657,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Ok(try_validation!(
this.ecx.read_discriminant(op),
this.path,
- InvalidTag(val) =>
- { "{:x}", val } expected { "a valid enum tag" },
- InvalidUninitBytes(None) =>
- { "uninitialized bytes" } expected { "a valid enum tag" },
+ InvalidTag(val) => InvalidEnumTag {
+ value: format!("{val:x}"),
+ },
+
+ InvalidUninitBytes(None) => UninitEnumTag,
)
.1)
})
@@ -730,7 +703,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// Special check preventing `UnsafeCell` inside unions in the inner part of constants.
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) {
if !op.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
- throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+ throw_validation_failure!(self.path, UnsafeCell);
}
}
Ok(())
@@ -738,7 +711,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
#[inline]
fn visit_box(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
- self.check_safe_pointer(op, "box")?;
+ self.check_safe_pointer(op, PointerKind::Box)?;
Ok(())
}
@@ -756,7 +729,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
&& def.is_unsafe_cell()
{
- throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+ throw_validation_failure!(self.path, UnsafeCell);
}
}
@@ -775,14 +748,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// MyNewtype and then the scalar in there).
match op.layout.abi {
Abi::Uninhabited => {
- throw_validation_failure!(self.path,
- { "a value of uninhabited type {:?}", op.layout.ty }
- );
+ let ty = op.layout.ty;
+ throw_validation_failure!(self.path, UninhabitedVal { ty });
}
Abi::Scalar(scalar_layout) => {
if !scalar_layout.is_uninit_valid() {
// There is something to check here.
- let scalar = self.read_scalar(op, "initialized scalar value")?;
+ let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
self.visit_scalar(scalar, scalar_layout)?;
}
}
@@ -792,7 +764,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// the other must be init.
if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
let (a, b) =
- self.read_immediate(op, "initialized scalar value")?.to_scalar_pair();
+ self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
self.visit_scalar(a, a_layout)?;
self.visit_scalar(b, b_layout)?;
}
@@ -822,7 +794,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
try_validation!(
self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)),
self.path,
- InvalidUninitBytes(..) => { "uninitialized data in `str`" },
+ InvalidUninitBytes(..) => { UninitStr },
);
}
ty::Array(tys, ..) | ty::Slice(tys)
@@ -852,7 +824,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Left(mplace) => mplace,
Right(imm) => match *imm {
Immediate::Uninit =>
- throw_validation_failure!(self.path, { "uninitialized bytes" }),
+ throw_validation_failure!(self.path, UninitVal),
Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
bug!("arrays/slices can never have Scalar/ScalarPair layout"),
}
@@ -888,7 +860,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
.unwrap();
self.path.push(PathElem::ArrayElem(i));
- throw_validation_failure!(self.path, { "uninitialized bytes" })
+ throw_validation_failure!(self.path, UninitVal)
}
// Propagate upwards (that will also check for unexpected errors).
@@ -929,12 +901,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match visitor.visit_value(&op) {
Ok(()) => Ok(()),
// Pass through validation failures.
- Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err),
+ Err(err) if matches!(err.kind(), err_ub!(Validation { .. })) => Err(err),
// Complain about any other kind of UB error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies.
Err(err) if matches!(err.kind(), InterpError::UndefinedBehavior(_)) => {
- err.print_backtrace();
- bug!("Unexpected Undefined Behavior error during validation: {}", err);
+ let (err, backtrace) = err.into_parts();
+ backtrace.print_backtrace();
+ bug!("Unexpected Undefined Behavior error during validation: {err:?}");
}
// Pass through everything else.
Err(err) => Err(err),