summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:42 +0000
commitcec1877e180393eba0f6ddb0cf97bf3a791631c7 (patch)
tree47b4dac2a9dd9a40c30c251b4d4a72d7ccf77e9f /compiler/rustc_const_eval
parentAdding debian version 1.74.1+dfsg1-1. (diff)
downloadrustc-cec1877e180393eba0f6ddb0cf97bf3a791631c7.tar.xz
rustc-cec1877e180393eba0f6ddb0cf97bf3a791631c7.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval')
-rw-r--r--compiler/rustc_const_eval/Cargo.toml10
-rw-r--r--compiler/rustc_const_eval/messages.ftl9
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs159
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs9
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs80
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs22
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs51
-rw-r--r--compiler/rustc_const_eval/src/errors.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs41
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs19
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs92
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs42
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs47
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs128
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs25
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs182
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs123
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs32
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs279
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs36
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs15
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs21
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs62
-rw-r--r--compiler/rustc_const_eval/src/lib.rs9
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs80
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs18
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs53
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs10
-rw-r--r--compiler/rustc_const_eval/src/util/caller_location.rs66
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs1
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs92
35 files changed, 958 insertions, 889 deletions
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
index 4e47fed86..c4f8841d7 100644
--- a/compiler/rustc_const_eval/Cargo.toml
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -3,25 +3,25 @@ name = "rustc_const_eval"
version = "0.0.0"
edition = "2021"
-[lib]
-
[dependencies]
-tracing = "0.1"
+# tidy-alphabetical-start
either = "1"
rustc_apfloat = "0.2.0"
rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
-rustc_hir = { path = "../rustc_hir" }
rustc_fluent_macro = { path = "../rustc_fluent_macro" }
+rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_infer = { path = "../rustc_infer" }
rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
-rustc_span = { path = "../rustc_span" }
rustc_type_ir = { path = "../rustc_type_ir" }
+tracing = "0.1"
+# tidy-alphabetical-end
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index d23e2a9f3..f926da464 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -1,11 +1,15 @@
const_eval_address_space_full =
there are no more free addresses in the address space
-const_eval_align_check_failed = accessing memory with alignment {$has}, but alignment {$required} is required
+
const_eval_align_offset_invalid_align =
`align_offset` called with non-power-of-two align: {$target_align}
const_eval_alignment_check_failed =
- accessing memory with alignment {$has}, but alignment {$required} is required
+ {$msg ->
+ [AccessedPtr] accessing memory
+ *[other] accessing memory based on pointer
+ } with alignment {$has}, but alignment {$required} is required
+
const_eval_already_reported =
an error has already been reported elsewhere (this should not usually be printed)
const_eval_assume_false =
@@ -61,7 +65,6 @@ const_eval_deref_coercion_non_const =
.target_note = deref defined here
const_eval_deref_function_pointer =
accessing {$allocation} which contains a function
-const_eval_deref_test = dereferencing pointer failed
const_eval_deref_vtable_pointer =
accessing {$allocation} which contains a vtable
const_eval_different_allocations =
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 3d758cd01..13937a941 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -1,21 +1,22 @@
-use crate::const_eval::CheckAlignment;
-use crate::errors::ConstEvalError;
+use std::mem;
use either::{Left, Right};
use rustc_hir::def::DefKind;
-use rustc_middle::mir::interpret::{ErrorHandled, InterpErrorInfo};
+use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
use rustc_middle::mir::pretty::write_allocation_bytes;
use rustc_middle::mir::{self, ConstAlloc, ConstValue};
use rustc_middle::traits::Reveal;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, TyCtxt};
-use rustc_span::source_map::Span;
+use rustc_span::Span;
use rustc_target::abi::{self, Abi};
use super::{CanAccessStatics, CompileTimeEvalContext, CompileTimeInterpreter};
+use crate::const_eval::CheckAlignment;
use crate::errors;
+use crate::errors::ConstEvalError;
use crate::interpret::eval_nullary_intrinsic;
use crate::interpret::{
intern_const_alloc_recursive, CtfeValidationMode, GlobalId, Immediate, InternKind, InterpCx,
@@ -74,9 +75,9 @@ fn eval_body_using_ecx<'mir, 'tcx>(
None => InternKind::Constant,
}
};
- ecx.machine.check_alignment = CheckAlignment::No; // interning doesn't need to respect alignment
+ let check_alignment = mem::replace(&mut ecx.machine.check_alignment, CheckAlignment::No); // interning doesn't need to respect alignment
intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
- // we leave alignment checks off, since this `ecx` will not be used for further evaluation anyway
+ ecx.machine.check_alignment = check_alignment;
debug!("eval_body_using_ecx done: {:?}", ret);
Ok(ret)
@@ -89,7 +90,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
/// that inform us about the generic bounds of the constant. E.g., using an associated constant
/// of a function's generic parameter will require knowledge about the bounds on the generic
/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
-pub(super) fn mk_eval_cx<'mir, 'tcx>(
+pub(crate) fn mk_eval_cx<'mir, 'tcx>(
tcx: TyCtxt<'tcx>,
root_span: Span,
param_env: ty::ParamEnv<'tcx>,
@@ -105,10 +106,16 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>(
}
/// This function converts an interpreter value into a MIR constant.
+///
+/// The `for_diagnostics` flag turns the usual rules for returning `ConstValue::Scalar` into a
+/// best-effort attempt. This is not okay for use in const-eval sine it breaks invariants rustc
+/// relies on, but it is okay for diagnostics which will just give up gracefully when they
+/// encounter an `Indirect` they cannot handle.
#[instrument(skip(ecx), level = "debug")]
pub(super) fn op_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, 'tcx>,
op: &OpTy<'tcx>,
+ for_diagnostics: bool,
) -> ConstValue<'tcx> {
// Handle ZST consistently and early.
if op.layout.is_zst() {
@@ -132,7 +139,13 @@ pub(super) fn op_to_const<'tcx>(
_ => false,
};
let immediate = if force_as_immediate {
- Right(ecx.read_immediate(op).expect("normalization works on validated constants"))
+ match ecx.read_immediate(op) {
+ Ok(imm) => Right(imm),
+ Err(err) if !for_diagnostics => {
+ panic!("normalization works on validated constants: {err:?}")
+ }
+ _ => op.as_mplace_or_imm(),
+ }
} else {
op.as_mplace_or_imm()
};
@@ -204,7 +217,7 @@ pub(crate) fn turn_into_const_value<'tcx>(
);
// Turn this into a proper constant.
- op_to_const(&ecx, &mplace.into())
+ op_to_const(&ecx, &mplace.into(), /* for diagnostics */ false)
}
#[instrument(skip(tcx), level = "debug")]
@@ -284,22 +297,22 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
let def = cid.instance.def.def_id();
let is_static = tcx.is_static(def);
- let mut ecx = InterpCx::new(
+ let ecx = InterpCx::new(
tcx,
tcx.def_span(def),
key.param_env,
// Statics (and promoteds inside statics) may access other statics, because unlike consts
// they do not have to behave "as if" they were evaluated at runtime.
- CompileTimeInterpreter::new(
- CanAccessStatics::from(is_static),
- if tcx.sess.opts.unstable_opts.extra_const_ub_checks {
- CheckAlignment::Error
- } else {
- CheckAlignment::FutureIncompat
- },
- ),
+ CompileTimeInterpreter::new(CanAccessStatics::from(is_static), CheckAlignment::Error),
);
+ eval_in_interpreter(ecx, cid, is_static)
+}
+pub fn eval_in_interpreter<'mir, 'tcx>(
+ mut ecx: InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+ cid: GlobalId<'tcx>,
+ is_static: bool,
+) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
let res = ecx.load_mir(cid.instance.def, cid.promoted);
match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
Err(error) => {
@@ -312,7 +325,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
// If the current item has generics, we'd like to enrich the message with the
// instance and its args: to show the actual compile-time values, in addition to
// the expression, leading to the const eval error.
- let instance = &key.value.instance;
+ let instance = &cid.instance;
if !instance.args.is_empty() {
let instance = with_no_trimmed_paths!(instance.to_string());
("const_with_path", instance)
@@ -337,56 +350,14 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
Ok(mplace) => {
// Since evaluation had no errors, validate the resulting constant.
// This is a separate `try` block to provide more targeted error reporting.
- let validation: Result<_, InterpErrorInfo<'_>> = try {
- let mut ref_tracking = RefTracking::new(mplace.clone());
- let mut inner = false;
- while let Some((mplace, path)) = ref_tracking.todo.pop() {
- let mode = match tcx.static_mutability(cid.instance.def_id()) {
- Some(_) if cid.promoted.is_some() => {
- // Promoteds in statics are allowed to point to statics.
- CtfeValidationMode::Const { inner, allow_static_ptrs: true }
- }
- Some(_) => CtfeValidationMode::Regular, // a `static`
- None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
- };
- ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
- inner = true;
- }
- };
+ let validation =
+ const_validate_mplace(&ecx, &mplace, is_static, cid.promoted.is_some());
+
let alloc_id = mplace.ptr().provenance.unwrap();
// Validation failed, report an error.
if let Err(error) = validation {
- let (error, backtrace) = error.into_parts();
- backtrace.print_backtrace();
-
- let ub_note = matches!(error, InterpError::UndefinedBehavior(_)).then(|| {});
-
- let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory().inner();
- let mut bytes = String::new();
- if alloc.size() != abi::Size::ZERO {
- bytes = "\n".into();
- // FIXME(translation) there might be pieces that are translatable.
- write_allocation_bytes(*ecx.tcx, alloc, &mut bytes, " ").unwrap();
- }
- let raw_bytes = errors::RawBytesNote {
- size: alloc.size().bytes(),
- align: alloc.align.bytes(),
- bytes,
- };
-
- Err(super::report(
- *ecx.tcx,
- error,
- None,
- || super::get_span_and_frames(&ecx),
- move |span, frames| errors::UndefinedBehavior {
- span,
- ub_note,
- frames,
- raw_bytes,
- },
- ))
+ Err(const_report_error(&ecx, error, alloc_id))
} else {
// Convert to raw constant
Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
@@ -394,3 +365,61 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
}
}
}
+
+#[inline(always)]
+pub fn const_validate_mplace<'mir, 'tcx>(
+ ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+ mplace: &MPlaceTy<'tcx>,
+ is_static: bool,
+ is_promoted: bool,
+) -> InterpResult<'tcx> {
+ let mut ref_tracking = RefTracking::new(mplace.clone());
+ let mut inner = false;
+ while let Some((mplace, path)) = ref_tracking.todo.pop() {
+ let mode = if is_static {
+ if is_promoted {
+ // Promoteds in statics are allowed to point to statics.
+ CtfeValidationMode::Const { inner, allow_static_ptrs: true }
+ } else {
+ // a `static`
+ CtfeValidationMode::Regular
+ }
+ } else {
+ CtfeValidationMode::Const { inner, allow_static_ptrs: false }
+ };
+ ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
+ inner = true;
+ }
+
+ Ok(())
+}
+
+#[inline(always)]
+pub fn const_report_error<'mir, 'tcx>(
+ ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+ error: InterpErrorInfo<'tcx>,
+ alloc_id: AllocId,
+) -> ErrorHandled {
+ let (error, backtrace) = error.into_parts();
+ backtrace.print_backtrace();
+
+ let ub_note = matches!(error, InterpError::UndefinedBehavior(_)).then(|| {});
+
+ let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory().inner();
+ let mut bytes = String::new();
+ if alloc.size() != abi::Size::ZERO {
+ bytes = "\n".into();
+ // FIXME(translation) there might be pieces that are translatable.
+ write_allocation_bytes(*ecx.tcx, alloc, &mut bytes, " ").unwrap();
+ }
+ let raw_bytes =
+ errors::RawBytesNote { size: alloc.size().bytes(), align: alloc.align.bytes(), bytes };
+
+ crate::const_eval::report(
+ *ecx.tcx,
+ error,
+ None,
+ || crate::const_eval::get_span_and_frames(ecx),
+ move |span, frames| errors::UndefinedBehavior { span, ub_note, frames, raw_bytes },
+ )
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index 4ee4ebbb9..9e992637f 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -39,8 +39,13 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
hir::Node::Ctor(_)
| hir::Node::AnonConst(_)
| hir::Node::ConstBlock(_)
- | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => hir::Constness::Const,
- hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => tcx.generics_of(def_id).host_effect_index.map_or(hir::Constness::NotConst, |_| hir::Constness::Const),
+ | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => {
+ hir::Constness::Const
+ }
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => tcx
+ .generics_of(def_id)
+ .host_effect_index
+ .map_or(hir::Constness::NotConst, |_| hir::Constness::Const),
hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
// Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
// foreign items cannot be evaluated at compile-time.
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 14b9894aa..4b447229c 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -1,10 +1,10 @@
use rustc_hir::def::DefKind;
-use rustc_hir::{LangItem, CRATE_HIR_ID};
+use rustc_hir::LangItem;
use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
use rustc_middle::ty::{self, TyCtxt};
-use rustc_session::lint::builtin::INVALID_ALIGNMENT;
+use rustc_span::Span;
use std::borrow::Borrow;
use std::hash::Hash;
use std::ops::ControlFlow;
@@ -21,11 +21,11 @@ use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi;
use crate::errors::{LongRunning, LongRunningWarn};
+use crate::fluent_generated as fluent;
use crate::interpret::{
self, compile_time_machine, AllocId, ConstAllocation, FnArg, FnVal, Frame, ImmTy, InterpCx,
InterpResult, OpTy, PlaceTy, Pointer, Scalar,
};
-use crate::{errors, fluent_generated as fluent};
use super::error::*;
@@ -65,22 +65,11 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
#[derive(Copy, Clone)]
pub enum CheckAlignment {
- /// Ignore alignment when following relocations.
+ /// Ignore all alignment requirements.
/// This is mainly used in interning.
No,
/// Hard error when dereferencing a misaligned pointer.
Error,
- /// Emit a future incompat lint when dereferencing a misaligned pointer.
- FutureIncompat,
-}
-
-impl CheckAlignment {
- pub fn should_check(&self) -> bool {
- match self {
- CheckAlignment::No => false,
- CheckAlignment::Error | CheckAlignment::FutureIncompat => true,
- }
- }
}
#[derive(Copy, Clone, PartialEq)]
@@ -193,6 +182,24 @@ impl interpret::MayLeak for ! {
}
impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+ fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+
+ use rustc_session::{config::RemapPathScopeComponents, RemapFileNameExt};
+ (
+ Symbol::intern(
+ &caller
+ .file
+ .name
+ .for_scope(&self.tcx.sess, RemapPathScopeComponents::DIAGNOSTICS)
+ .to_string_lossy(),
+ ),
+ u32::try_from(caller.line).unwrap(),
+ u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
+ )
+ }
+
/// "Intercept" a function call, because we have something special to do for it.
/// All `#[rustc_do_not_const_check]` functions should be hooked here.
/// If this returns `Some` function, which may be `instance` or a different function with
@@ -207,7 +214,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
let def_id = instance.def_id();
- if Some(def_id) == self.tcx.lang_items().panic_display()
+ if self.tcx.has_attr(def_id, sym::rustc_const_panic_str)
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
{
let args = self.copy_fn_args(args)?;
@@ -358,8 +365,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
#[inline(always)]
- fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
- ecx.machine.check_alignment
+ fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ matches!(ecx.machine.check_alignment, CheckAlignment::Error)
}
#[inline(always)]
@@ -367,39 +374,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited()
}
- fn alignment_check_failed(
- ecx: &InterpCx<'mir, 'tcx, Self>,
- has: Align,
- required: Align,
- check: CheckAlignment,
- ) -> InterpResult<'tcx, ()> {
- let err = err_ub!(AlignmentCheckFailed { has, required }).into();
- match check {
- CheckAlignment::Error => Err(err),
- CheckAlignment::No => span_bug!(
- ecx.cur_span(),
- "`alignment_check_failed` called when no alignment check requested"
- ),
- CheckAlignment::FutureIncompat => {
- let (_, backtrace) = err.into_parts();
- backtrace.print_backtrace();
- let (span, frames) = super::get_span_and_frames(&ecx);
-
- ecx.tcx.emit_spanned_lint(
- INVALID_ALIGNMENT,
- ecx.stack().iter().find_map(|frame| frame.lint_root()).unwrap_or(CRATE_HIR_ID),
- span,
- errors::AlignmentCheckFailed {
- has: has.bytes(),
- required: required.bytes(),
- frames,
- },
- );
- Ok(())
- }
- }
- }
-
fn load_mir(
ecx: &InterpCx<'mir, 'tcx, Self>,
instance: ty::InstanceDef<'tcx>,
@@ -579,8 +553,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
- ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
- ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
+ ResumedAfterReturn(coroutine_kind) => ResumedAfterReturn(*coroutine_kind),
+ ResumedAfterPanic(coroutine_kind) => ResumedAfterPanic(*coroutine_kind),
MisalignedPointerDereference { ref required, ref found } => {
MisalignedPointerDereference {
required: eval_to_int(required)?,
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index bcbe996be..f6942366c 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -1,12 +1,12 @@
// Not in interpret to make sure we do not use private implementation details
use crate::errors::MaxNumNodesInConstErr;
-use crate::interpret::{intern_const_alloc_recursive, InternKind, InterpCx, Scalar};
+use crate::interpret::InterpCx;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
+use rustc_span::DUMMY_SP;
mod error;
mod eval_queries;
@@ -20,20 +20,6 @@ pub use fn_queries::*;
pub use machine::*;
pub(crate) use valtrees::{const_to_valtree_inner, valtree_to_const_value};
-pub(crate) fn const_caller_location(
- tcx: TyCtxt<'_>,
- (file, line, col): (Symbol, u32, u32),
-) -> mir::ConstValue<'_> {
- trace!("const_caller_location: {}:{}:{}", file, line, col);
- let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), CanAccessStatics::No);
-
- let loc_place = ecx.alloc_caller_location(file, line, col);
- if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
- bug!("intern_const_alloc_recursive should not error in this case")
- }
- mir::ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
-}
-
// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
const VALTREE_MAX_NODES: usize = 100000;
@@ -86,7 +72,7 @@ pub(crate) fn eval_to_valtree<'tcx>(
}
#[instrument(skip(tcx), level = "debug")]
-pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
+pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
tcx: TyCtxtAt<'tcx>,
val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
@@ -113,7 +99,7 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
let fields_iter = (0..field_count)
.map(|i| {
let field_op = ecx.project_field(&down, i).ok()?;
- let val = op_to_const(&ecx, &field_op);
+ let val = op_to_const(&ecx, &field_op, /* for diagnostics */ true);
Some((val, field_op.layout.ty))
})
.collect::<Option<Vec<_>>>()?;
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 7436ea6ae..ed2d81727 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -10,7 +10,7 @@ use crate::interpret::{
use rustc_middle::mir;
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
-use rustc_span::source_map::DUMMY_SP;
+use rustc_span::DUMMY_SP;
use rustc_target::abi::VariantIdx;
#[instrument(skip(ecx), level = "debug")]
@@ -97,11 +97,27 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
Ok(ty::ValTree::Leaf(val.assert_int()))
}
- // Raw pointers are not allowed in type level constants, as we cannot properly test them for
- // equality at compile-time (see `ptr_guaranteed_cmp`).
+ ty::RawPtr(_) => {
+ // Not all raw pointers are allowed, as we cannot properly test them for
+ // equality at compile-time (see `ptr_guaranteed_cmp`).
+ // However we allow those that are just integers in disguise.
+ // (We could allow wide raw pointers where both sides are integers in the future,
+ // but for now we reject them.)
+ let Ok(val) = ecx.read_scalar(place) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ // We are in the CTFE machine, so ptr-to-int casts will fail.
+ // This can only be `Ok` if `val` already is an integer.
+ let Ok(val) = val.try_to_int() else {
+ return Err(ValTreeCreationError::Other);
+ };
+ // It's just a ScalarInt!
+ Ok(ty::ValTree::Leaf(val))
+ }
+
// Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
// agree with runtime equality tests.
- ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
+ ty::FnPtr(_) => Err(ValTreeCreationError::NonSupportedType),
ty::Ref(_, _, _) => {
let Ok(derefd_place)= ecx.deref_pointer(place) else {
@@ -151,8 +167,8 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
| ty::Infer(_)
// FIXME(oli-obk): we can probably encode closures just like structs
| ty::Closure(..)
- | ty::Generator(..)
- | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+ | ty::Coroutine(..)
+ | ty::CoroutineWitness(..) => Err(ValTreeCreationError::NonSupportedType),
}
}
@@ -222,17 +238,19 @@ pub fn valtree_to_const_value<'tcx>(
assert!(valtree.unwrap_branch().is_empty());
mir::ConstValue::ZeroSized
}
- ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
- ty::ValTree::Leaf(scalar_int) => mir::ConstValue::Scalar(Scalar::Int(scalar_int)),
- ty::ValTree::Branch(_) => bug!(
- "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
- ),
- },
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(_) => {
+ match valtree {
+ ty::ValTree::Leaf(scalar_int) => mir::ConstValue::Scalar(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => bug!(
+ "ValTrees for Bool, Int, Uint, Float, Char or RawPtr should have the form ValTree::Leaf"
+ ),
+ }
+ }
ty::Ref(_, inner_ty, _) => {
let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
let imm = valtree_to_ref(&mut ecx, valtree, *inner_ty);
let imm = ImmTy::from_immediate(imm, tcx.layout_of(param_env_ty).unwrap());
- op_to_const(&ecx, &imm.into())
+ op_to_const(&ecx, &imm.into(), /* for diagnostics */ false)
}
ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
let layout = tcx.layout_of(param_env_ty).unwrap();
@@ -265,7 +283,7 @@ pub fn valtree_to_const_value<'tcx>(
dump_place(&ecx, &place);
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
- op_to_const(&ecx, &place.into())
+ op_to_const(&ecx, &place.into(), /* for diagnostics */ false)
}
ty::Never
| ty::Error(_)
@@ -278,10 +296,9 @@ pub fn valtree_to_const_value<'tcx>(
| ty::Placeholder(..)
| ty::Infer(_)
| ty::Closure(..)
- | ty::Generator(..)
- | ty::GeneratorWitness(..)
+ | ty::Coroutine(..)
+ | ty::CoroutineWitness(..)
| ty::FnPtr(_)
- | ty::RawPtr(_)
| ty::Str
| ty::Slice(_)
| ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()),
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index b1599dd68..cc8f33872 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -5,8 +5,9 @@ use rustc_errors::{
use rustc_hir::ConstContext;
use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_middle::mir::interpret::{
- CheckInAllocMsg, ExpectedKind, InterpError, InvalidMetaKind, InvalidProgramInfo, PointerKind,
- ResourceExhaustionInfo, UndefinedBehaviorInfo, UnsupportedOpInfo, ValidationErrorInfo,
+ CheckInAllocMsg, ExpectedKind, InterpError, InvalidMetaKind, InvalidProgramInfo, Misalignment,
+ PointerKind, ResourceExhaustionInfo, UndefinedBehaviorInfo, UnsupportedOpInfo,
+ ValidationErrorInfo,
};
use rustc_middle::ty::{self, Ty};
use rustc_span::Span;
@@ -389,15 +390,6 @@ pub struct LiveDrop<'tcx> {
pub dropped_at: Option<Span>,
}
-#[derive(LintDiagnostic)]
-#[diag(const_eval_align_check_failed)]
-pub struct AlignmentCheckFailed {
- pub has: u64,
- pub required: u64,
- #[subdiagnostic]
- pub frames: Vec<FrameNote>,
-}
-
#[derive(Diagnostic)]
#[diag(const_eval_error, code = "E0080")]
pub struct ConstEvalError {
@@ -459,7 +451,6 @@ fn bad_pointer_message(msg: CheckInAllocMsg, handler: &Handler) -> String {
use crate::fluent_generated::*;
let msg = match msg {
- CheckInAllocMsg::DerefTest => const_eval_deref_test,
CheckInAllocMsg::MemoryAccessTest => const_eval_memory_access_test,
CheckInAllocMsg::PointerArithmeticTest => const_eval_pointer_arithmetic_test,
CheckInAllocMsg::OffsetFromTest => const_eval_offset_from_test,
@@ -568,9 +559,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
builder.set_arg("bad_pointer_message", bad_pointer_message(msg, handler));
}
- AlignmentCheckFailed { required, has } => {
+ AlignmentCheckFailed(Misalignment { required, has }, msg) => {
builder.set_arg("required", required.bytes());
builder.set_arg("has", has.bytes());
+ builder.set_arg("msg", format!("{msg:?}"));
}
WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => {
builder.set_arg("allocation", alloc);
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index b9f88cf63..f4cb12c8d 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -8,7 +8,7 @@ use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
use rustc_target::abi::Integer;
-use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::TyKind::*;
use super::{
util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
@@ -145,16 +145,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(dest.layout.is_sized());
assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
if src.layout.size != dest.layout.size {
- let src_bytes = src.layout.size.bytes();
- let dest_bytes = dest.layout.size.bytes();
- let src_ty = format!("{}", src.layout.ty);
- let dest_ty = format!("{}", dest.layout.ty);
throw_ub_custom!(
fluent::const_eval_invalid_transmute,
- src_bytes = src_bytes,
- dest_bytes = dest_bytes,
- src = src_ty,
- dest = dest_ty,
+ src_bytes = src.layout.size.bytes(),
+ dest_bytes = dest.layout.size.bytes(),
+ src = src.layout.ty,
+ dest = dest.layout.ty,
);
}
@@ -185,7 +181,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
src: &ImmTy<'tcx, M::Provenance>,
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- use rustc_type_ir::sty::TyKind::*;
+ use rustc_type_ir::TyKind::*;
let val = match src.layout.ty.kind() {
// Floating point
@@ -310,7 +306,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
where
F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
{
- use rustc_type_ir::sty::TyKind::*;
+ use rustc_type_ir::TyKind::*;
+
+ fn adjust_nan<
+ 'mir,
+ 'tcx: 'mir,
+ M: Machine<'mir, 'tcx>,
+ F1: rustc_apfloat::Float + FloatConvert<F2>,
+ F2: rustc_apfloat::Float,
+ >(
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ f1: F1,
+ f2: F2,
+ ) -> F2 {
+ if f2.is_nan() { M::generate_nan(ecx, &[f1]) } else { f2 }
+ }
+
match *dest_ty.kind() {
// float -> uint
Uint(t) => {
@@ -330,9 +341,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Scalar::from_int(v, size)
}
// float -> f32
- Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
+ Float(FloatTy::F32) => {
+ Scalar::from_f32(adjust_nan(self, f, f.convert(&mut false).value))
+ }
// float -> f64
- Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
+ Float(FloatTy::F64) => {
+ Scalar::from_f64(adjust_nan(self, f, f.convert(&mut false).value))
+ }
// That's it.
_ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
}
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 49e01728f..fd1736703 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,7 +1,8 @@
-//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
+//! Functions for reading and writing discriminants of multi-variant layouts (enums and coroutines).
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
-use rustc_middle::{mir, ty};
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::{self, Ty};
use rustc_target::abi::{self, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
@@ -170,11 +171,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::Adt(adt, _) => {
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
}
- ty::Generator(def_id, args, _) => {
- let args = args.as_generator();
+ ty::Coroutine(def_id, args, _) => {
+ let args = args.as_coroutine();
args.discriminants(def_id, *self.tcx).find(|(_, var)| var.val == discr_bits)
}
- _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+ _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-coroutine"),
}
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
@@ -244,11 +245,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn discriminant_for_variant(
&self,
- layout: TyAndLayout<'tcx>,
+ ty: Ty<'tcx>,
variant: VariantIdx,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
- let discr_value = match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+ let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
+ let discr_value = match ty.discriminant_for_variant(*self.tcx, variant) {
Some(discr) => {
// This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty);
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index af7dfbef2..07cab5e34 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -595,6 +595,50 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+ /// frame which is not `#[track_caller]`. This is the fancy version of `cur_span`.
+ pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
+ for frame in self.stack().iter().rev() {
+ debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+ // Assert that the frame we look at is actually executing code currently
+ // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
+ let loc = frame.loc.left().unwrap();
+
+ // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+ // (such as `box`). Use the normal span by default.
+ let mut source_info = *frame.body.source_info(loc);
+
+ // If this is a `Call` terminator, use the `fn_span` instead.
+ let block = &frame.body.basic_blocks[loc.block];
+ if loc.statement_index == block.statements.len() {
+ debug!(
+ "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+ block.terminator(),
+ block.terminator().kind,
+ );
+ if let mir::TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+ source_info.span = fn_span;
+ }
+ }
+
+ let caller_location = if frame.instance.def.requires_caller_location(*self.tcx) {
+ // We use `Err(())` as indication that we should continue up the call stack since
+ // this is a `#[track_caller]` function.
+ Some(Err(()))
+ } else {
+ None
+ };
+ if let Ok(span) =
+ frame.body.caller_location_span(source_info, caller_location, *self.tcx, Ok)
+ {
+ return span;
+ }
+ }
+
+ span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
+ }
+
#[inline(always)]
pub fn layout_of_local(
&self,
@@ -750,12 +794,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
if M::POST_MONO_CHECKS {
- // `ctfe_query` does some error message decoration that we want to be in effect here.
- self.ctfe_query(None, |tcx| {
- body.post_mono_checks(*tcx, self.param_env, |c| {
- self.subst_from_current_frame_and_normalize_erasing_regions(c)
- })
- })?;
+ for &const_ in &body.required_consts {
+ let c =
+ self.subst_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
+ c.eval(*self.tcx, self.param_env, Some(const_.span)).map_err(|err| {
+ err.emit_note(*self.tcx);
+ err
+ })?;
+ }
}
// done
@@ -961,8 +1007,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
- | ty::Generator(..)
- | ty::GeneratorWitness(..)
+ | ty::Coroutine(..)
+ | ty::CoroutineWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
@@ -1008,7 +1054,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Just make this an efficient immediate.
// Note that not calling `layout_of` here does have one real consequence:
// if the type is too big, we'll only notice this when the local is actually initialized,
- // which is a bit too late -- we should ideally notice this alreayd here, when the memory
+ // which is a bit too late -- we should ideally notice this already here, when the memory
// is conceptually allocated. But given how rare that error is and that this is a hot function,
// we accept this downside for now.
Operand::Immediate(Immediate::Uninit)
@@ -1054,14 +1100,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
- /// Call a query that can return `ErrorHandled`. If `span` is `Some`, point to that span when an error occurs.
+ /// Call a query that can return `ErrorHandled`. Should be used for statics and other globals.
+ /// (`mir::Const`/`ty::Const` have `eval` methods that can be used directly instead.)
pub fn ctfe_query<T>(
&self,
- span: Option<Span>,
query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
) -> Result<T, ErrorHandled> {
// Use a precise span for better cycle errors.
- query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
+ query(self.tcx.at(self.cur_span())).map_err(|err| {
err.emit_note(*self.tcx);
err
})
@@ -1072,17 +1118,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
instance: ty::Instance<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let gid = GlobalId { instance, promoted: None };
- // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
- // and thus don't care about the parameter environment. While we could just use
- // `self.param_env`, that would mean we invoke the query to evaluate the static
- // with different parameter environments, thus causing the static to be evaluated
- // multiple times.
- let param_env = if self.tcx.is_static(gid.instance.def_id()) {
- ty::ParamEnv::reveal_all()
+ let val = if self.tcx.is_static(gid.instance.def_id()) {
+ let alloc_id = self.tcx.reserve_and_set_static_alloc(gid.instance.def_id());
+
+ let ty = instance.ty(self.tcx.tcx, self.param_env);
+ mir::ConstAlloc { alloc_id, ty }
} else {
- self.param_env
+ self.ctfe_query(|tcx| tcx.eval_to_allocation_raw(self.param_env.and(gid)))?
};
- let val = self.ctfe_query(None, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
@@ -1092,7 +1135,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- let const_val = self.ctfe_query(span, |tcx| val.eval(*tcx, self.param_env, span))?;
+ let const_val = val.eval(*self.tcx, self.param_env, span).map_err(|err| {
+ // FIXME: somehow this is reachable even when POST_MONO_CHECKS is on.
+ // Are we not always populating `required_consts`?
+ err.emit_note(*self.tcx);
+ err
+ })?;
self.const_val_to_op(const_val, val.ty(), layout)
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 8c0009cfd..3d90e95c0 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -161,7 +161,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
#[inline(always)]
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
- &self.ecx
+ self.ecx
}
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
@@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
// to avoid could be expensive: on the potentially larger types, arrays and slices,
// rather than on all aggregates unconditionally.
if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
- let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+ let Some((size, _align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
// We do the walk if we can't determine the size of the mplace: we may be
// dealing with extern types here in the future.
return Ok(true);
@@ -267,7 +267,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
- if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size, align)? {
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size)? {
if !alloc.has_provenance() {
return Ok(false);
}
@@ -450,6 +450,42 @@ pub fn intern_const_alloc_recursive<
Ok(())
}
+/// Intern `ret`. This function assumes that `ret` references no other allocation.
+#[instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_for_constprop<
+ 'mir,
+ 'tcx: 'mir,
+ T,
+ M: CompileTimeMachine<'mir, 'tcx, T>,
+>(
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ alloc_id: AllocId,
+) -> InterpResult<'tcx, ()> {
+ // Move allocation to `tcx`.
+ let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
+ // Pointer not found in local memory map. It is either a pointer to the global
+ // map, or dangling.
+ if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
+ throw_ub!(DeadLocal)
+ }
+ // The constant is already in global memory. Do nothing.
+ return Ok(());
+ };
+
+ alloc.mutability = Mutability::Not;
+
+ // We are not doing recursive interning, so we don't currently support provenance.
+ // (If this assertion ever triggers, we should just implement a
+ // proper recursive interning loop.)
+ assert!(alloc.provenance().ptrs().is_empty());
+
+ // Link the alloc id to the actual allocation
+ let alloc = ecx.tcx.mk_const_alloc(alloc);
+ ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
+
+ Ok(())
+}
+
impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
InterpCx<'mir, 'tcx, M>
{
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 2c0ba9b26..b23cafc19 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
-use rustc_target::abi::{Abi, Align, Primitive, Size};
+use rustc_target::abi::{Abi, Primitive, Size};
use super::{
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
@@ -22,8 +22,6 @@ use super::{
use crate::fluent_generated as fluent;
-mod caller_location;
-
fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
@@ -99,8 +97,8 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
| ty::FnPtr(_)
| ty::Dynamic(_, _, _)
| ty::Closure(_, _)
- | ty::Generator(_, _, _)
- | ty::GeneratorWitness(..)
+ | ty::Coroutine(_, _, _)
+ | ty::CoroutineWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
@@ -130,8 +128,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match intrinsic_name {
sym::caller_location => {
let span = self.find_closest_untracked_caller_location();
- let location = self.alloc_caller_location_for_span(span);
- self.write_immediate(location.to_ref(self), dest)?;
+ let val = self.tcx.span_as_caller_location(span);
+ let val =
+ self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
+ self.copy_op(&val, dest, /* allow_transmute */ false)?;
}
sym::min_align_of_val | sym::size_of_val => {
@@ -164,7 +164,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::type_name => Ty::new_static_str(self.tcx.tcx),
_ => bug!(),
};
- let val = self.ctfe_query(None, |tcx| {
+ let val = self.ctfe_query(|tcx| {
tcx.const_eval_global_id(self.param_env, gid, Some(tcx.span))
})?;
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
@@ -218,7 +218,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::discriminant_value => {
let place = self.deref_pointer(&args[0])?;
let variant = self.read_discriminant(&place)?;
- let discr = self.discriminant_for_variant(place.layout, variant)?;
+ let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
self.write_immediate(*discr, dest)?;
}
sym::exact_div => {
@@ -349,10 +349,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Check that the range between them is dereferenceable ("in-bounds or one past the
// end of the same allocation"). This is like the check in ptr_offset_inbounds.
let min_ptr = if dist >= 0 { b } else { a };
- self.check_ptr_access_align(
+ self.check_ptr_access(
min_ptr,
Size::from_bytes(dist.unsigned_abs()),
- Align::ONE,
CheckInAllocMsg::OffsetFromTest,
)?;
@@ -500,6 +499,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
b: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
+ assert_eq!(a.layout.ty, b.layout.ty);
+ assert!(matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
+
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
@@ -522,7 +524,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
l: &ImmTy<'tcx, M::Provenance>,
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ assert_eq!(l.layout.ty, r.layout.ty);
+ assert!(matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
+
let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let size = l.layout.size;
@@ -565,16 +570,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn ptr_offset_inbounds(
&self,
ptr: Pointer<Option<M::Provenance>>,
- pointee_ty: Ty<'tcx>,
- offset_count: i64,
+ offset_bytes: i64,
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
- // We cannot overflow i64 as a type's size must be <= isize::MAX.
- let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
- // The computed offset, in bytes, must not overflow an isize.
- // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
- // the difference to be noticeable.
- let offset_bytes =
- offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
// The offset being in bounds cannot rely on "wrapping around" the address space.
// So, first rule out overflows in the pointer arithmetic.
let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
@@ -583,10 +580,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// pointers to be properly aligned (unlike a read/write operation).
let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
// This call handles checking for integer/null pointers.
- self.check_ptr_access_align(
+ self.check_ptr_access(
min_ptr,
Size::from_bytes(offset_bytes.unsigned_abs()),
- Align::ONE,
CheckInAllocMsg::PointerArithmeticTest,
)?;
Ok(offset_ptr)
@@ -615,7 +611,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let src = self.read_pointer(src)?;
let dst = self.read_pointer(dst)?;
- self.mem_copy(src, align, dst, align, size, nonoverlapping)
+ self.check_ptr_align(src, align)?;
+ self.check_ptr_align(dst, align)?;
+
+ self.mem_copy(src, dst, size, nonoverlapping)
}
pub(crate) fn write_bytes_intrinsic(
@@ -671,7 +670,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
size|
-> InterpResult<'tcx, &[u8]> {
let ptr = this.read_pointer(op)?;
- let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
// zero-sized access
return Ok(&[]);
};
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
deleted file mode 100644
index 948bec746..000000000
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ /dev/null
@@ -1,128 +0,0 @@
-use rustc_ast::Mutability;
-use rustc_hir::lang_items::LangItem;
-use rustc_middle::mir::TerminatorKind;
-use rustc_middle::ty::layout::LayoutOf;
-use rustc_span::{Span, Symbol};
-
-use crate::interpret::{
- intrinsics::{InterpCx, Machine},
- MPlaceTy, MemoryKind, Scalar,
-};
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
- /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
- /// frame which is not `#[track_caller]`.
- pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
- for frame in self.stack().iter().rev() {
- debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
-
- // Assert that the frame we look at is actually executing code currently
- // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
- let loc = frame.loc.left().unwrap();
-
- // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
- // (such as `box`). Use the normal span by default.
- let mut source_info = *frame.body.source_info(loc);
-
- // If this is a `Call` terminator, use the `fn_span` instead.
- let block = &frame.body.basic_blocks[loc.block];
- if loc.statement_index == block.statements.len() {
- debug!(
- "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
- block.terminator(),
- block.terminator().kind
- );
- if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
- source_info.span = fn_span;
- }
- }
-
- // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
- // If so, the starting `source_info.span` is in the innermost inlined
- // function, and will be replaced with outer callsite spans as long
- // as the inlined functions were `#[track_caller]`.
- loop {
- let scope_data = &frame.body.source_scopes[source_info.scope];
-
- if let Some((callee, callsite_span)) = scope_data.inlined {
- // Stop inside the most nested non-`#[track_caller]` function,
- // before ever reaching its caller (which is irrelevant).
- if !callee.def.requires_caller_location(*self.tcx) {
- return source_info.span;
- }
- source_info.span = callsite_span;
- }
-
- // Skip past all of the parents with `inlined: None`.
- match scope_data.inlined_parent_scope {
- Some(parent) => source_info.scope = parent,
- None => break,
- }
- }
-
- // Stop inside the most nested non-`#[track_caller]` function,
- // before ever reaching its caller (which is irrelevant).
- if !frame.instance.def.requires_caller_location(*self.tcx) {
- return source_info.span;
- }
- }
-
- span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
- }
-
- /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
- pub(crate) fn alloc_caller_location(
- &mut self,
- filename: Symbol,
- line: u32,
- col: u32,
- ) -> MPlaceTy<'tcx, M::Provenance> {
- let loc_details = self.tcx.sess.opts.unstable_opts.location_detail;
- // This can fail if rustc runs out of memory right here. Trying to emit an error would be
- // pointless, since that would require allocating more memory than these short strings.
- let file = if loc_details.file {
- self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
- .unwrap()
- } else {
- // FIXME: This creates a new allocation each time. It might be preferable to
- // perform this allocation only once, and re-use the `MPlaceTy`.
- // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
- self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
- };
- let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
- let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
-
- // Allocate memory for `CallerLocation` struct.
- let loc_ty = self
- .tcx
- .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
- .instantiate(*self.tcx, self.tcx.mk_args(&[self.tcx.lifetimes.re_erased.into()]));
- let loc_layout = self.layout_of(loc_ty).unwrap();
- let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
-
- // Initialize fields.
- self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap())
- .expect("writing to memory we just allocated cannot fail");
- self.write_scalar(line, &self.project_field(&location, 1).unwrap())
- .expect("writing to memory we just allocated cannot fail");
- self.write_scalar(col, &self.project_field(&location, 2).unwrap())
- .expect("writing to memory we just allocated cannot fail");
-
- location
- }
-
- pub(crate) fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
- let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
- let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
- (
- Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
- u32::try_from(caller.line).unwrap(),
- u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
- )
- }
-
- pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::Provenance> {
- let (file, line, column) = self.location_triple_for_span(span);
- self.alloc_caller_location(file, line, column)
- }
-}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index aaa674a59..61fe9151d 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -6,16 +6,15 @@ use std::borrow::{Borrow, Cow};
use std::fmt::Debug;
use std::hash::Hash;
+use rustc_apfloat::{Float, FloatConvert};
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
-use rustc_target::abi::{Align, Size};
+use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi as CallAbi;
-use crate::const_eval::CheckAlignment;
-
use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
@@ -134,7 +133,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
const POST_MONO_CHECKS: bool = true;
/// Whether memory accesses should be alignment-checked.
- fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment;
+ fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
/// Whether, when checking alignment, we should look at the actual address and thus support
/// custom alignment logic based on whatever the integer address happens to be.
@@ -142,13 +141,6 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// If this returns true, Provenance::OFFSET_IS_ADDR must be true.
fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
- fn alignment_check_failed(
- ecx: &InterpCx<'mir, 'tcx, Self>,
- has: Align,
- required: Align,
- check: CheckAlignment,
- ) -> InterpResult<'tcx, ()>;
-
/// Whether to enforce the validity invariant for a specific layout.
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool;
@@ -240,6 +232,16 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
+ /// Generate the NaN returned by a float operation, given the list of inputs.
+ /// (This is all inputs, not just NaN inputs!)
+ fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _inputs: &[F1],
+ ) -> F2 {
+ // By default we always return the preferred NaN.
+ F2::NAN
+ }
+
/// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked
/// for ZST reads.
@@ -434,6 +436,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
place: &PlaceTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx> {
// Without an aliasing model, all we can do is put `Uninit` into the place.
+ // Conveniently this also ensures that the place actually points to suitable memory.
ecx.write_uninit(place)
}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 436c4d521..16905e93b 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -18,13 +18,12 @@ use rustc_middle::mir::display_allocation;
use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
use rustc_target::abi::{Align, HasDataLayout, Size};
-use crate::const_eval::CheckAlignment;
use crate::fluent_generated as fluent;
use super::{
- alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg,
- GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance,
- Scalar,
+ alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg,
+ CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Misalignment, Pointer,
+ PointerArithmetic, Provenance, Scalar,
};
#[derive(Debug, PartialEq, Copy, Clone)]
@@ -259,14 +258,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
None => self.get_alloc_raw(alloc_id)?.size(),
};
// This will also call the access hooks.
- self.mem_copy(
- ptr,
- Align::ONE,
- new_ptr.into(),
- Align::ONE,
- old_size.min(new_size),
- /*nonoverlapping*/ true,
- )?;
+ self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
self.deallocate_ptr(ptr, old_size_and_align, kind)?;
Ok(new_ptr)
@@ -368,13 +360,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
self.check_and_deref_ptr(
ptr,
size,
- align,
- M::enforce_alignment(self),
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
let (size, align) = self
@@ -384,43 +373,31 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
}
- /// Check if the given pointer points to live memory of given `size` and `align`
- /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
- /// out-of-bounds case.
+ /// Check if the given pointer points to live memory of the given `size`.
+ /// The caller can control the error message for the out-of-bounds case.
#[inline(always)]
- pub fn check_ptr_access_align(
+ pub fn check_ptr_access(
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
- self.check_and_deref_ptr(
- ptr,
- size,
- align,
- CheckAlignment::Error,
- msg,
- |alloc_id, _, _| {
- let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
- Ok((size, align, ()))
- },
- )?;
+ self.check_and_deref_ptr(ptr, size, msg, |alloc_id, _, _| {
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
+ Ok((size, align, ()))
+ })?;
Ok(())
}
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, as the actual
- /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
- /// is done.
+ /// checking is offloaded to a helper closure.
///
/// If this returns `None`, the size is 0; it can however return `Some` even for size 0.
fn check_and_deref_ptr<T>(
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
- check: CheckAlignment,
msg: CheckInAllocMsg,
alloc_size: impl FnOnce(
AllocId,
@@ -435,14 +412,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if size.bytes() > 0 || addr == 0 {
throw_ub!(DanglingIntPointer(addr, msg));
}
- // Must be aligned.
- if check.should_check() {
- self.check_offset_align(addr, align, check)?;
- }
None
}
Ok((alloc_id, offset, prov)) => {
- let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
+ let (alloc_size, _alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
// Test bounds. This also ensures non-null.
// It is sufficient to check this for the end pointer. Also check for overflow!
if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
@@ -458,20 +431,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if M::Provenance::OFFSET_IS_ADDR {
assert_ne!(ptr.addr(), Size::ZERO);
}
- // Test align. Check this last; if both bounds and alignment are violated
- // we want the error to be about the bounds.
- if check.should_check() {
- if M::use_addr_for_alignment_check(self) {
- // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
- self.check_offset_align(ptr.addr().bytes(), align, check)?;
- } else {
- // Check allocation alignment and offset alignment.
- if alloc_align.bytes() < align.bytes() {
- M::alignment_check_failed(self, alloc_align, align, check)?;
- }
- self.check_offset_align(offset.bytes(), align, check)?;
- }
- }
// We can still be zero-sized in this branch, in which case we have to
// return `None`.
@@ -480,19 +439,65 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
})
}
- fn check_offset_align(
+ pub(super) fn check_misalign(
&self,
- offset: u64,
- align: Align,
- check: CheckAlignment,
+ misaligned: Option<Misalignment>,
+ msg: CheckAlignMsg,
) -> InterpResult<'tcx> {
- if offset % align.bytes() == 0 {
- Ok(())
- } else {
- // The biggest power of two through which `offset` is divisible.
- let offset_pow2 = 1 << offset.trailing_zeros();
- M::alignment_check_failed(self, Align::from_bytes(offset_pow2).unwrap(), align, check)
+ if let Some(misaligned) = misaligned {
+ throw_ub!(AlignmentCheckFailed(misaligned, msg))
}
+ Ok(())
+ }
+
+ pub(super) fn is_ptr_misaligned(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ align: Align,
+ ) -> Option<Misalignment> {
+ if !M::enforce_alignment(self) || align.bytes() == 1 {
+ return None;
+ }
+
+ #[inline]
+ fn offset_misalignment(offset: u64, align: Align) -> Option<Misalignment> {
+ if offset % align.bytes() == 0 {
+ None
+ } else {
+ // The biggest power of two through which `offset` is divisible.
+ let offset_pow2 = 1 << offset.trailing_zeros();
+ Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
+ }
+ }
+
+ match self.ptr_try_get_alloc_id(ptr) {
+ Err(addr) => offset_misalignment(addr, align),
+ Ok((alloc_id, offset, _prov)) => {
+ let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
+ if M::use_addr_for_alignment_check(self) {
+ // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
+ offset_misalignment(ptr.addr().bytes(), align)
+ } else {
+ // Check allocation alignment and offset alignment.
+ if alloc_align.bytes() < align.bytes() {
+ Some(Misalignment { has: alloc_align, required: align })
+ } else {
+ offset_misalignment(offset.bytes(), align)
+ }
+ }
+ }
+ }
+ }
+
+ /// Checks a pointer for misalignment.
+ ///
+ /// The error assumes this is checking the pointer used directly for an access.
+ pub fn check_ptr_align(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ align: Align,
+ ) -> InterpResult<'tcx> {
+ self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
}
}
@@ -536,7 +541,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// We don't give a span -- statics don't need that, they cannot be generic or associated.
- let val = self.ctfe_query(None, |tcx| tcx.eval_static_initializer(def_id))?;
+ let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
(val, Some(def_id))
}
};
@@ -550,17 +555,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
}
- /// Get the base address for the bytes in an `Allocation` specified by the
- /// `AllocID` passed in; error if no such allocation exists.
- ///
- /// It is up to the caller to take sufficient care when using this address:
- /// there could be provenance or uninit memory in there, and other memory
- /// accesses could invalidate the exposed pointer.
- pub fn alloc_base_addr(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
- let alloc = self.get_alloc_raw(id)?;
- Ok(alloc.base_addr())
- }
-
/// Gives raw access to the `Allocation`, without bounds or alignment checks.
/// The caller is responsible for calling the access hooks!
///
@@ -598,19 +592,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// "Safe" (bounds and align-checked) allocation access.
+ /// Bounds-checked *but not align-checked* allocation access.
pub fn get_ptr_alloc<'a>(
&'a self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
let ptr_and_alloc = self.check_and_deref_ptr(
ptr,
size,
- align,
- M::enforce_alignment(self),
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, prov| {
let alloc = self.get_alloc_raw(alloc_id)?;
@@ -671,15 +662,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok((alloc, &mut self.machine))
}
- /// "Safe" (bounds and align-checked) allocation access.
+ /// Bounds-checked *but not align-checked* allocation access.
pub fn get_ptr_alloc_mut<'a>(
&'a mut self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
- align: Align,
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
{
- let parts = self.get_ptr_access(ptr, size, align)?;
+ let parts = self.get_ptr_access(ptr, size)?;
if let Some((alloc_id, offset, prov)) = parts {
let tcx = *self.tcx;
// FIXME: can we somehow avoid looking up the allocation twice here?
@@ -1021,7 +1011,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
}
/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
- pub(crate) fn has_provenance(&self) -> bool {
+ pub fn has_provenance(&self) -> bool {
!self.alloc.provenance().range_empty(self.range, &self.tcx)
}
}
@@ -1036,7 +1026,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ptr: Pointer<Option<M::Provenance>>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
- let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
// zero-sized access
return Ok(&[]);
};
@@ -1062,7 +1052,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(lower, len, "can only write iterators with a precise length");
let size = Size::from_bytes(len);
- let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
+ let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
// zero-sized access
assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
return Ok(());
@@ -1087,29 +1077,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn mem_copy(
&mut self,
src: Pointer<Option<M::Provenance>>,
- src_align: Align,
dest: Pointer<Option<M::Provenance>>,
- dest_align: Align,
size: Size,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
- self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
+ self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
}
pub fn mem_copy_repeatedly(
&mut self,
src: Pointer<Option<M::Provenance>>,
- src_align: Align,
dest: Pointer<Option<M::Provenance>>,
- dest_align: Align,
size: Size,
num_copies: u64,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
let tcx = self.tcx;
// We need to do our own bounds-checks.
- let src_parts = self.get_ptr_access(src, size, src_align)?;
- let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
+ let src_parts = self.get_ptr_access(src, size)?;
+ let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
// FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
// and once below to get the underlying `&[mut] Allocation`.
@@ -1249,6 +1235,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Turning a "maybe pointer" into a proper pointer (and some information
/// about where it points), or an absolute address.
+ ///
+ /// The result must be used immediately; it is not allowed to convert
+ /// the returned data back into a `Pointer` and store that in machine state.
+ /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
+ /// we don't have an operation to turn it back into `M::Provenance`.)
pub fn ptr_try_get_alloc_id(
&self,
ptr: Pointer<Option<M::Provenance>>,
@@ -1267,6 +1258,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
/// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
+ ///
+ /// The result must be used immediately; it is not allowed to convert
+ /// the returned data back into a `Pointer` and store that in machine state.
+ /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
+ /// we don't have an operation to turn it back into `M::Provenance`.)
#[inline(always)]
pub fn ptr_get_alloc_id(
&self,
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 69eb22028..7d286d103 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -21,12 +21,14 @@ mod visitor;
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
pub use self::eval_context::{Frame, FrameInfo, InterpCx, StackPopCleanup};
-pub use self::intern::{intern_const_alloc_recursive, InternKind};
+pub use self::intern::{
+ intern_const_alloc_for_constprop, intern_const_alloc_recursive, InternKind,
+};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
-pub use self::projection::Projectable;
+pub use self::projection::{OffsetMode, Projectable};
pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking};
pub use self::visitor::ValueVisitor;
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index a32ea204f..255dd1eba 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -10,11 +10,12 @@ use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty};
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
+use rustc_target::abi::{self, Abi, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult,
- MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable, Provenance, Scalar,
+ MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy, Pointer, Projectable,
+ Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -43,12 +44,16 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
}
impl<Prov: Provenance> Immediate<Prov> {
- pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_pointer(ptr, cx))
- }
-
- pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_maybe_pointer(ptr, cx))
+ pub fn new_pointer_with_meta(
+ ptr: Pointer<Option<Prov>>,
+ meta: MemPlaceMeta<Prov>,
+ cx: &impl HasDataLayout,
+ ) -> Self {
+ let ptr = Scalar::from_maybe_pointer(ptr, cx);
+ match meta {
+ MemPlaceMeta::None => Immediate::from(ptr),
+ MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(ptr, meta),
+ }
}
pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
@@ -102,10 +107,10 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter
fn p<'a, 'tcx, Prov: Provenance>(
- cx: FmtPrinter<'a, 'tcx>,
+ cx: &mut FmtPrinter<'a, 'tcx>,
s: Scalar<Prov>,
ty: Ty<'tcx>,
- ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
+ ) -> Result<(), std::fmt::Error> {
match s {
Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true),
Scalar::Ptr(ptr, _sz) => {
@@ -120,8 +125,9 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
match self.imm {
Immediate::Scalar(s) => {
if let Some(ty) = tcx.lift(self.layout.ty) {
- let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
- f.write_str(&p(cx, s, ty)?.into_buffer())?;
+ let s =
+ FmtPrinter::print_string(tcx, Namespace::ValueNS, |cx| p(cx, s, ty))?;
+ f.write_str(&s)?;
return Ok(());
}
write!(f, "{:x}: {}", s, self.layout.ty)
@@ -163,6 +169,16 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
ImmTy { imm: val.into(), layout }
}
+ #[inline]
+ pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(
+ matches!(layout.abi, Abi::ScalarPair(..)),
+ "`ImmTy::from_scalar_pair` on non-scalar-pair layout"
+ );
+ let imm = Immediate::ScalarPair(a, b);
+ ImmTy { imm, layout }
+ }
+
#[inline(always)]
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(
@@ -219,6 +235,17 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
/// given layout.
// Not called `offset` to avoid confusion with the trait method.
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+ debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
+ // `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
+ // remains in-bounds. This cannot actually be violated since projections are type-checked
+ // and bounds-checked.
+ assert!(
+ offset + layout.size <= self.layout.size,
+ "attempting to project to field at offset {} with size {} into immediate with layout {:#?}",
+ offset.bytes(),
+ layout.size.bytes(),
+ self.layout,
+ );
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let inner_val: Immediate<_> = match (**self, self.layout.abi) {
@@ -286,6 +313,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ _mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
@@ -315,14 +343,6 @@ pub(super) enum Operand<Prov: Provenance = AllocId> {
pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
op: Operand<Prov>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for an `OpTy`!
- /// `None` means "alignment does not matter since this is a by-value operand"
- /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
- /// Also CTFE ignores alignment anyway, so this is for Miri only.
- pub align: Option<Align>,
}
impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
@@ -338,18 +358,14 @@ impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+ OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
}
}
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- OpTy {
- op: Operand::Indirect(*mplace.mplace()),
- layout: mplace.layout,
- align: Some(mplace.align),
- }
+ OpTy { op: Operand::Indirect(*mplace.mplace()), layout: mplace.layout }
}
}
@@ -380,14 +396,14 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
match self.as_mplace_or_imm() {
- Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()),
+ Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()),
Right(imm) => {
- debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
// Every part of an uninit is uninit.
Ok(imm.offset_(offset, layout, ecx).into())
@@ -622,7 +638,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_inval!(ConstPropNonsense);
}
}
- Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ Ok(OpTy { op, layout })
}
/// Every place can be read from, so we can turn them into an operand.
@@ -637,16 +653,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Right((frame, local, offset)) => {
debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
let base = self.local_to_op(&self.stack()[frame], local, None)?;
- let mut field = match offset {
+ Ok(match offset {
Some(offset) => base.offset(offset, place.layout, self)?,
None => {
// In the common case this hasn't been projected.
debug_assert_eq!(place.layout, base.layout);
base
}
- };
- field.align = Some(place.align);
- Ok(field)
+ })
}
}
}
@@ -670,19 +684,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("eval_place_to_op: got {:?}", op);
// Sanity-check the type we ended up with.
- debug_assert!(
- mir_assign_valid_types(
+ if cfg!(debug_assertions) {
+ let normalized_place_ty = self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ )?;
+ if !mir_assign_valid_types(
*self.tcx,
self.param_env,
- self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
- )?)?,
+ self.layout_of(normalized_place_ty)?,
op.layout,
- ),
- "eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
- op.layout.ty,
- );
+ ) {
+ span_bug!(
+ self.cur_span(),
+ "eval_place of a MIR place with type {} produced an interpreter operand with type {}",
+ normalized_place_ty,
+ op.layout.ty,
+ )
+ }
+ }
Ok(op)
}
@@ -729,27 +748,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
})
};
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
- let op = match val_val {
+ let imm = match val_val {
mir::ConstValue::Indirect { alloc_id, offset } => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen.
let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?;
- Operand::Indirect(MemPlace::from_ptr(ptr.into()))
+ return Ok(self.ptr_to_mplace(ptr.into(), layout).into());
}
- mir::ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
- mir::ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+ mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(),
+ mir::ConstValue::ZeroSized => Immediate::Uninit,
mir::ConstValue::Slice { data, meta } => {
// We rely on mutability being set correctly in `data` to prevent writes
// where none should happen.
let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO);
- Operand::Immediate(Immediate::new_slice(
- self.global_base_pointer(ptr)?.into(),
- meta,
- self,
- ))
+ Immediate::new_slice(self.global_base_pointer(ptr)?.into(), meta, self)
}
};
- Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ Ok(OpTy { op: Operand::Immediate(imm), layout })
}
}
@@ -762,6 +777,6 @@ mod size_asserts {
static_assert_size!(Immediate, 48);
static_assert_size!(ImmTy<'_>, 64);
static_assert_size!(Operand, 56);
- static_assert_size!(OpTy<'_>, 80);
+ static_assert_size!(OpTy<'_>, 72);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index b084864f3..a3ba9530f 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,7 +1,7 @@
-use rustc_apfloat::Float;
+use rustc_apfloat::{Float, FloatConvert};
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
@@ -104,7 +104,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(ImmTy::from_bool(res, *self.tcx), false)
}
- fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
+ fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
layout: TyAndLayout<'tcx>,
@@ -113,6 +113,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
+ // Performs appropriate non-deterministic adjustments of NaN results.
+ let adjust_nan = |f: F| -> F {
+ if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f }
+ };
+
let val = match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
@@ -120,11 +125,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => ImmTy::from_bool(l >= r, *self.tcx),
- Add => ImmTy::from_scalar((l + r).value.into(), layout),
- Sub => ImmTy::from_scalar((l - r).value.into(), layout),
- Mul => ImmTy::from_scalar((l * r).value.into(), layout),
- Div => ImmTy::from_scalar((l / r).value.into(), layout),
- Rem => ImmTy::from_scalar((l % r).value.into(), layout),
+ Add => ImmTy::from_scalar(adjust_nan((l + r).value).into(), layout),
+ Sub => ImmTy::from_scalar(adjust_nan((l - r).value).into(), layout),
+ Mul => ImmTy::from_scalar(adjust_nan((l * r).value).into(), layout),
+ Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
+ Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
(val, false)
@@ -332,7 +337,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let offset_count = right.to_scalar().to_target_isize(self)?;
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
- let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
+ // We cannot overflow i64 as a type's size must be <= isize::MAX.
+ let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ // The computed offset, in bytes, must not overflow an isize.
+ // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
+ // the difference to be noticeable.
+ let offset_bytes =
+ offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+
+ let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
Ok((
ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
false,
@@ -456,6 +469,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok((ImmTy::from_bool(res, *self.tcx), false))
}
ty::Float(fty) => {
+ // No NaN adjustment here, `-` is a bitwise operation!
let res = match (un_op, fty) {
(Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 503004cbb..09ffdec7d 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -15,9 +15,9 @@ use rustc_middle::ty::Ty;
use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{
- alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, ImmTy,
- Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, Pointer,
- PointerArithmetic, Projectable, Provenance, Readable, Scalar,
+ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckAlignMsg, ImmTy,
+ Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy,
+ Operand, Pointer, PointerArithmetic, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -57,19 +57,11 @@ pub(super) struct MemPlace<Prov: Provenance = AllocId> {
/// Must not be present for sized types, but can be missing for unsized types
/// (e.g., `extern type`).
pub meta: MemPlaceMeta<Prov>,
+ /// Stores whether this place was created based on a sufficiently aligned pointer.
+ misaligned: Option<Misalignment>,
}
impl<Prov: Provenance> MemPlace<Prov> {
- #[inline(always)]
- pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
- MemPlace { ptr, meta: MemPlaceMeta::None }
- }
-
- #[inline(always)]
- pub fn from_ptr_with_meta(ptr: Pointer<Option<Prov>>, meta: MemPlaceMeta<Prov>) -> Self {
- MemPlace { ptr, meta }
- }
-
/// Adjust the provenance of the main pointer (metadata is unaffected).
pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
@@ -78,27 +70,32 @@ impl<Prov: Provenance> MemPlace<Prov> {
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
#[inline]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
- match self.meta {
- MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
- MemPlaceMeta::Meta(meta) => {
- Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx), meta)
- }
- }
+ Immediate::new_pointer_with_meta(self.ptr, self.meta, cx)
}
#[inline]
// Not called `offset_with_meta` to avoid confusion with the trait method.
- fn offset_with_meta_<'tcx>(
+ fn offset_with_meta_<'mir, 'tcx, M: Machine<'mir, 'tcx, Provenance = Prov>>(
self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
debug_assert!(
!meta.has_meta() || self.meta.has_meta(),
"cannot use `offset_with_meta` to add metadata to a place"
);
- Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
+ if offset > ecx.data_layout().max_size_of_val() {
+ throw_ub!(PointerArithOverflow);
+ }
+ let ptr = match mode {
+ OffsetMode::Inbounds => {
+ ecx.ptr_offset_inbounds(self.ptr, offset.bytes().try_into().unwrap())?
+ }
+ OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
+ };
+ Ok(MemPlace { ptr, meta, misaligned: self.misaligned })
}
}
@@ -107,11 +104,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
mplace: MemPlace<Prov>,
pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `MPlaceTy`!
- pub align: Align,
}
impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
@@ -133,25 +125,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
assert!(layout.is_zst());
let align = layout.align.abi;
let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
- MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
- }
-
- #[inline]
- pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
- MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
- }
-
- #[inline]
- pub fn from_aligned_ptr_with_meta(
- ptr: Pointer<Option<Prov>>,
- layout: TyAndLayout<'tcx>,
- meta: MemPlaceMeta<Prov>,
- ) -> Self {
- MPlaceTy {
- mplace: MemPlace::from_ptr_with_meta(ptr, meta),
- layout,
- align: layout.align.abi,
- }
+ MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None, misaligned: None }, layout }
}
/// Adjust the provenance of the main pointer (metadata is unaffected).
@@ -189,15 +163,12 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
- Ok(MPlaceTy {
- mplace: self.mplace.offset_with_meta_(offset, meta, ecx)?,
- align: self.align.restrict_for_offset(offset),
- layout,
- })
+ Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout })
}
fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
@@ -228,11 +199,6 @@ pub(super) enum Place<Prov: Provenance = AllocId> {
pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
place: Place<Prov>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `PlaceTy`!
- pub align: Align,
}
impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
@@ -248,7 +214,7 @@ impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout, align: mplace.align }
+ PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
}
}
@@ -264,7 +230,7 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
&self,
) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
match self.place {
- Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout }),
Place::Local { frame, local, offset } => Right((frame, local, offset)),
}
}
@@ -301,27 +267,27 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(match self.as_mplace_or_local() {
- Left(mplace) => mplace.offset_with_meta(offset, meta, layout, ecx)?.into(),
+ Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
Right((frame, local, old_offset)) => {
debug_assert!(layout.is_sized(), "unsized locals should live in memory");
assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
- let new_offset = ecx
- .data_layout()
- .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
- PlaceTy {
- place: Place::Local {
- frame,
- local,
- offset: Some(Size::from_bytes(new_offset)),
- },
- align: self.align.restrict_for_offset(offset),
- layout,
- }
+ // `Place::Local` are always in-bounds of their surrounding local, so we can just
+ // check directly if this remains in-bounds. This cannot actually be violated since
+ // projections are type-checked and bounds-checked.
+ assert!(offset + layout.size <= self.layout.size);
+
+ let new_offset = Size::from_bytes(
+ ecx.data_layout()
+ .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?,
+ );
+
+ PlaceTy { place: Place::Local { frame, local, offset: Some(new_offset) }, layout }
}
})
}
@@ -339,9 +305,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
match self.op() {
- Operand::Indirect(mplace) => {
- Left(MPlaceTy { mplace: *mplace, layout: self.layout, align: self.align.unwrap() })
- }
+ Operand::Indirect(mplace) => Left(MPlaceTy { mplace: *mplace, layout: self.layout }),
Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
}
}
@@ -362,7 +326,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
fn as_mplace_or_local(
&self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>;
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, TyAndLayout<'tcx>)>;
fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
@@ -374,10 +338,9 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
- {
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, TyAndLayout<'tcx>)> {
self.as_mplace_or_local()
- .map_right(|(frame, local, offset)| (frame, local, offset, self.align, self.layout))
+ .map_right(|(frame, local, offset)| (frame, local, offset, self.layout))
}
#[inline(always)]
@@ -393,8 +356,7 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
- {
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, TyAndLayout<'tcx>)> {
Left(self.clone())
}
@@ -413,6 +375,25 @@ where
Prov: Provenance,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
+ pub fn ptr_with_meta_to_mplace(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ meta: MemPlaceMeta<M::Provenance>,
+ layout: TyAndLayout<'tcx>,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ let misaligned = self.is_ptr_misaligned(ptr, layout.align.abi);
+ MPlaceTy { mplace: MemPlace { ptr, meta, misaligned }, layout }
+ }
+
+ pub fn ptr_to_mplace(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ layout: TyAndLayout<'tcx>,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ assert!(layout.is_sized());
+ self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout)
+ }
+
/// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
///
@@ -434,7 +415,8 @@ where
// `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
// we hence can't call `size_and_align_of` since that asserts more validity than we want.
- Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.to_pointer(self)?, layout, meta))
+ let ptr = ptr.to_pointer(self)?;
+ Ok(self.ptr_with_meta_to_mplace(ptr, meta, layout))
}
/// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
@@ -464,7 +446,6 @@ where
}
let mplace = self.ref_to_mplace(&val)?;
- self.check_mplace(&mplace)?;
Ok(mplace)
}
@@ -477,8 +458,11 @@ where
let (size, _align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- // Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc(mplace.ptr(), size, mplace.align)
+ // We check alignment separately, and *after* checking everything else.
+ // If an access is both OOB and misaligned, we want to see the bounds error.
+ let a = self.get_ptr_alloc(mplace.ptr(), size)?;
+ self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+ Ok(a)
}
#[inline]
@@ -490,20 +474,13 @@ where
let (size, _align) = self
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- // Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc_mut(mplace.ptr(), size, mplace.align)
- }
-
- /// Check if this mplace is dereferenceable and sufficiently aligned.
- pub fn check_mplace(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
- let (size, _align) = self
- .size_and_align_of_mplace(&mplace)?
- .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
- // Due to packed places, only `mplace.align` matters.
- let align =
- if M::enforce_alignment(self).should_check() { mplace.align } else { Align::ONE };
- self.check_ptr_access_align(mplace.ptr(), size, align, CheckInAllocMsg::DerefTest)?;
- Ok(())
+ // We check alignment separately, and raise that error *after* checking everything else.
+ // If an access is both OOB and misaligned, we want to see the bounds error.
+ // However we have to call `check_misalign` first to make the borrow checker happy.
+ let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
+ let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
+ misalign_err?;
+ Ok(a)
}
/// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
@@ -518,8 +495,8 @@ where
let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
let array = Ty::new_array(self.tcx.tcx, e_ty, len);
let layout = self.layout_of(array)?;
- assert_eq!(layout.size, mplace.layout.size);
- Ok((MPlaceTy { layout, ..*mplace }, len))
+ let mplace = mplace.transmute(layout, self)?;
+ Ok((mplace, len))
}
/// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
@@ -555,7 +532,7 @@ where
Operand::Indirect(mplace) => Place::Ptr(*mplace),
}
};
- Ok(PlaceTy { place, layout, align: layout.align.abi })
+ Ok(PlaceTy { place, layout })
}
/// Computes a place. You should only use this if you intend to write into this
@@ -573,19 +550,24 @@ where
trace!("{:?}", self.dump_place(&place));
// Sanity-check the type we ended up with.
- debug_assert!(
- mir_assign_valid_types(
+ if cfg!(debug_assertions) {
+ let normalized_place_ty = self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ )?;
+ if !mir_assign_valid_types(
*self.tcx,
self.param_env,
- self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
- )?)?,
+ self.layout_of(normalized_place_ty)?,
place.layout,
- ),
- "eval_place of a MIR place with type {:?} produced an interpreter place with type {}",
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
- place.layout.ty,
- );
+ ) {
+ span_bug!(
+ self.cur_span(),
+ "eval_place of a MIR place with type {} produced an interpreter place with type {}",
+ normalized_place_ty,
+ place.layout.ty,
+ )
+ }
+ }
Ok(place)
}
@@ -640,7 +622,7 @@ where
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
// but not factored as a separate function.
let mplace = match dest.as_mplace_or_local() {
- Right((frame, local, offset, align, layout)) => {
+ Right((frame, local, offset, layout)) => {
if offset.is_some() {
// This has been projected to a part of this local. We could have complicated
// logic to still keep this local as an `Operand`... but it's much easier to
@@ -681,7 +663,7 @@ where
}
Operand::Indirect(mplace) => {
// The local is in memory, go on below.
- MPlaceTy { mplace: *mplace, align, layout }
+ MPlaceTy { mplace: *mplace, layout }
}
}
}
@@ -690,7 +672,7 @@ where
};
// This is already in memory, write there.
- self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.align, mplace.mplace)
+ self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)
}
/// Write an immediate to memory.
@@ -700,7 +682,6 @@ where
&mut self,
value: Immediate<M::Provenance>,
layout: TyAndLayout<'tcx>,
- align: Align,
dest: MemPlace<M::Provenance>,
) -> InterpResult<'tcx> {
// Note that it is really important that the type here is the right one, and matches the
@@ -709,9 +690,7 @@ where
// wrong type.
let tcx = *self.tcx;
- let Some(mut alloc) =
- self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })?
- else {
+ let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
// zero-sized access
return Ok(());
};
@@ -729,9 +708,6 @@ where
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
- // We checked `ptr_align` above, so all fields will have the alignment they need.
- // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
- // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let Abi::ScalarPair(a, b) = layout.abi else {
span_bug!(
self.cur_span(),
@@ -760,7 +736,7 @@ where
) -> InterpResult<'tcx> {
let mplace = match dest.as_mplace_or_local() {
Left(mplace) => mplace,
- Right((frame, local, offset, align, layout)) => {
+ Right((frame, local, offset, layout)) => {
if offset.is_some() {
// This has been projected to a part of this local. We could have complicated
// logic to still keep this local as an `Operand`... but it's much easier to
@@ -776,7 +752,7 @@ where
}
Operand::Indirect(mplace) => {
// The local is in memory, go on below.
- MPlaceTy { mplace: *mplace, layout, align }
+ MPlaceTy { mplace: *mplace, layout }
}
}
}
@@ -869,7 +845,6 @@ where
self.write_immediate_to_mplace_no_validate(
*src_val,
src.layout(),
- dest_mem.align,
dest_mem.mplace,
)
};
@@ -896,14 +871,12 @@ where
// type does not have Scalar/ScalarPair layout.
// (Or as the `Assign` docs put it, assignments "not producing primitives" must be
// non-overlapping.)
- self.mem_copy(
- src.ptr(),
- src.align,
- dest.ptr(),
- dest.align,
- dest_size,
- /*nonoverlapping*/ true,
- )
+ // We check alignment separately, and *after* checking everything else.
+ // If an access is both OOB and misaligned, we want to see the bounds error.
+ self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
+ self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+ self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+ Ok(())
}
/// Ensures that a place is in memory, and returns where it is.
@@ -937,7 +910,6 @@ where
self.write_immediate_to_mplace_no_validate(
local_val,
local_layout,
- local_layout.align.abi,
mplace.mplace,
)?;
}
@@ -952,7 +924,13 @@ where
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
};
if let Some(offset) = offset {
- whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
+ // This offset is always inbounds, no need to check it again.
+ whole_local.offset_with_meta_(
+ offset,
+ OffsetMode::Wrapping,
+ MemPlaceMeta::None,
+ self,
+ )?
} else {
// Preserve wide place metadata, do not call `offset`.
whole_local
@@ -961,7 +939,7 @@ where
Place::Ptr(mplace) => mplace,
};
// Return with the original layout and align, so that the caller can go on
- Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
+ Ok(MPlaceTy { mplace, layout: place.layout })
}
pub fn allocate_dyn(
@@ -974,7 +952,7 @@ where
span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
};
let ptr = self.allocate_ptr(size, align, kind)?;
- Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), layout, meta))
+ Ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout))
}
pub fn allocate(
@@ -986,7 +964,7 @@ where
self.allocate_dyn(layout, kind, MemPlaceMeta::None)
}
- /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
+ /// Returns a wide MPlace of type `str` to a new 1-aligned allocation.
pub fn allocate_str(
&mut self,
str: &str,
@@ -995,15 +973,8 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
- let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
-
- let ty = Ty::new_ref(
- self.tcx.tcx,
- self.tcx.lifetimes.re_static,
- ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
- );
- let layout = self.layout_of(ty).unwrap();
- Ok(MPlaceTy { mplace, layout, align: layout.align.abi })
+ let layout = self.layout_of(self.tcx.types.str_).unwrap();
+ Ok(self.ptr_with_meta_to_mplace(ptr.into(), MemPlaceMeta::Meta(meta), layout))
}
/// Writes the aggregate to the destination.
@@ -1042,7 +1013,7 @@ where
let _ = self.tcx.global_alloc(raw.alloc_id);
let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
let layout = self.layout_of(raw.ty)?;
- Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ Ok(self.ptr_to_mplace(ptr.into(), layout))
}
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
@@ -1058,12 +1029,10 @@ where
let vtable = mplace.meta().unwrap_meta().to_pointer(self)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
-
- let mplace = MPlaceTy {
- mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace },
- layout,
- align: layout.align.abi,
- };
+ // This is a kind of transmute, from a place with unsized type and metadata to
+ // a place with sized type and no metadata.
+ let mplace =
+ MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace }, layout };
Ok((mplace, vtable))
}
@@ -1095,10 +1064,10 @@ mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
// tidy-alphabetical-start
- static_assert_size!(MemPlace, 40);
+ static_assert_size!(MemPlace, 48);
static_assert_size!(MemPlaceMeta, 24);
static_assert_size!(MPlaceTy<'_>, 64);
- static_assert_size!(Place, 40);
+ static_assert_size!(Place, 48);
static_assert_size!(PlaceTy<'_>, 64);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 70df3d8fd..6694c43c9 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -19,6 +19,15 @@ use rustc_target::abi::{self, VariantIdx};
use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
+/// Describes the constraints placed on offset-projections.
+#[derive(Copy, Clone, Debug)]
+pub enum OffsetMode {
+ /// The offset has to be inbounds, like `ptr::offset`.
+ Inbounds,
+ /// No constraints, just wrap around the edge of the address space.
+ Wrapping,
+}
+
/// A thing that we can project into, and that has a layout.
pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
/// Get the layout.
@@ -53,12 +62,12 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
+ mode: OffsetMode,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self>;
- #[inline]
fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
@@ -66,10 +75,9 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, ecx)
+ self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx)
}
- #[inline]
fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
layout: TyAndLayout<'tcx>,
@@ -77,7 +85,7 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
) -> InterpResult<'tcx, Self> {
assert!(self.layout().is_sized() && layout.is_sized());
assert_eq!(self.layout().size, layout.size);
- self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, ecx)
+ self.offset_with_meta(Size::ZERO, OffsetMode::Wrapping, MemPlaceMeta::None, layout, ecx)
}
/// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
@@ -104,7 +112,17 @@ impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Option<(u64, P)>> {
let Some(idx) = self.range.next() else { return Ok(None) };
- Ok(Some((idx, self.base.offset(self.stride * idx, self.field_layout, ecx)?)))
+ // We use `Wrapping` here since the offset has already been checked when the iterator was created.
+ Ok(Some((
+ idx,
+ self.base.offset_with_meta(
+ self.stride * idx,
+ OffsetMode::Wrapping,
+ MemPlaceMeta::None,
+ self.field_layout,
+ ecx,
+ )?,
+ )))
}
}
@@ -159,7 +177,7 @@ where
(MemPlaceMeta::None, offset)
};
- base.offset_with_meta(offset, meta, field_layout, self)
+ base.offset_with_meta(offset, OffsetMode::Inbounds, meta, field_layout, self)
}
/// Downcasting to an enum variant.
@@ -248,6 +266,10 @@ where
};
let len = base.len(self)?;
let field_layout = base.layout().field(self, 0);
+ // Ensure that all the offsets are in-bounds once, up-front.
+ debug!("project_array_fields: {base:?} {len}");
+ base.offset(len * stride, self.layout_of(self.tcx.types.unit).unwrap(), self)?;
+ // Create the iterator.
Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
}
@@ -305,7 +327,7 @@ where
};
let layout = self.layout_of(ty)?;
- base.offset_with_meta(from_offset, meta, layout, self)
+ base.offset_with_meta(from_offset, OffsetMode::Inbounds, meta, layout, self)
}
/// Applying a general projection
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 284e13407..b6993d939 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -206,15 +206,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let elem_size = first.layout.size;
let first_ptr = first.ptr();
let rest_ptr = first_ptr.offset(elem_size, self)?;
- // For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
- // that place might be more aligned than its type mandates (a `u8` array could
- // be 4-aligned if it sits at the right spot in a struct). We have to also factor
- // in element size.
+ // No alignment requirement since `copy_op` above already checked it.
self.mem_copy_repeatedly(
first_ptr,
- dest.align,
rest_ptr,
- dest.align.restrict_for_offset(elem_size),
elem_size,
length - 1,
/*nonoverlapping:*/ true,
@@ -268,7 +263,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
NullaryOp(ref null_op, ty) => {
let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
let layout = self.layout_of(ty)?;
- if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op && layout.is_unsized() {
+ if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op
+ && layout.is_unsized()
+ {
span_bug!(
self.frame().current_span(),
"{null_op:?} MIR operator called for unsized type {ty}",
@@ -278,7 +275,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
mir::NullOp::OffsetOf(fields) => {
- layout.offset_of_subfield(self, fields.iter().map(|f| f.index())).bytes()
+ layout.offset_of_subfield(self, fields.iter()).bytes()
}
};
self.write_scalar(Scalar::from_target_usize(val, self), &dest)?;
@@ -300,7 +297,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Discriminant(place) => {
let op = self.eval_place_to_op(place, None)?;
let variant = self.read_discriminant(&op)?;
- let discr = self.discriminant_for_variant(op.layout, variant)?;
+ let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
self.write_immediate(*discr, &dest)?;
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 578dd6622..b54c66814 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -1,6 +1,5 @@
use std::borrow::Cow;
-use either::Either;
use rustc_ast::ast::InlineAsmOptions;
use rustc_middle::{
mir,
@@ -219,7 +218,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Unreachable => throw_ub!(Unreachable),
// These should never occur for MIR we actually run.
- FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | GeneratorDrop => span_bug!(
+ FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
terminator.source_info.span,
"{:#?} should have been eliminated by MIR pass",
terminator.kind
@@ -729,13 +728,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
callee_ty: callee_fn_abi.ret.layout.ty
});
}
- // Ensure the return place is aligned and dereferenceable, and protect it for
- // in-place return value passing.
- if let Either::Left(mplace) = destination.as_mplace_or_local() {
- self.check_mplace(&mplace)?;
- } else {
- // Nothing to do for locals, they are always properly allocated and aligned.
- }
+ // Protect return place for in-place return value passing.
M::protect_in_place_function_argument(self, destination)?;
// Don't forget to mark "initially live" locals as live.
@@ -890,11 +883,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
+ // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
- if attrs
- .target_features
- .iter()
- .any(|feature| !self.tcx.sess.target_features.contains(feature))
+ if !self.tcx.sess.target.is_like_wasm
+ && attrs
+ .target_features
+ .iter()
+ .any(|feature| !self.tcx.sess.target_features.contains(feature))
{
throw_ub_custom!(
fluent::const_eval_unavailable_target_features_for_fn,
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index eb639ded7..416443f5f 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -34,7 +34,7 @@ where
match *ty.kind() {
ty::Param(_) => ControlFlow::Break(FoundParam),
ty::Closure(def_id, args)
- | ty::Generator(def_id, args, ..)
+ | ty::Coroutine(def_id, args, ..)
| ty::FnDef(def_id, args) => {
let instance = ty::InstanceDef::Item(def_id);
let unused_params = self.tcx.unused_generic_params(instance);
@@ -42,10 +42,10 @@ where
let index = index
.try_into()
.expect("more generic parameters than can fit into a `u32`");
- // Only recurse when generic parameters in fns, closures and generators
+ // Only recurse when generic parameters in fns, closures and coroutines
// are used and have to be instantiated.
//
- // Just in case there are closures or generators within this subst,
+ // Just in case there are closures or coroutines within this subst,
// recurse.
if unused_params.is_used(index) && subst.has_param() {
return subst.visit_with(self);
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 3e023a896..d21fef58f 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -13,7 +13,7 @@ use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_middle::mir::interpret::{
- ExpectedKind, InterpError, InvalidMetaKind, PointerKind, ValidationErrorInfo,
+ ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, ValidationErrorInfo,
ValidationErrorKind, ValidationErrorKind::*,
};
use rustc_middle::ty;
@@ -112,13 +112,13 @@ macro_rules! try_validation {
pub enum PathElem {
Field(Symbol),
Variant(Symbol),
- GeneratorState(VariantIdx),
+ CoroutineState(VariantIdx),
CapturedVar(Symbol),
ArrayElem(usize),
TupleElem(usize),
Deref,
EnumTag,
- GeneratorTag,
+ CoroutineTag,
DynDowncast,
}
@@ -171,8 +171,8 @@ fn write_path(out: &mut String, path: &[PathElem]) {
Field(name) => write!(out, ".{name}"),
EnumTag => write!(out, ".<enum-tag>"),
Variant(name) => write!(out, ".<enum-variant({name})>"),
- GeneratorTag => write!(out, ".<generator-tag>"),
- GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
+ CoroutineTag => write!(out, ".<coroutine-tag>"),
+ CoroutineState(idx) => write!(out, ".<coroutine-state({})>", idx.index()),
CapturedVar(name) => write!(out, ".<captured-var({name})>"),
TupleElem(idx) => write!(out, ".{idx}"),
ArrayElem(idx) => write!(out, "[{idx}]"),
@@ -206,7 +206,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if tag_field == field {
return match layout.ty.kind() {
ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
- ty::Generator(..) => PathElem::GeneratorTag,
+ ty::Coroutine(..) => PathElem::CoroutineTag,
_ => bug!("non-variant type {:?}", layout.ty),
};
}
@@ -216,8 +216,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Now we know we are projecting to a field, so figure out which one.
match layout.ty.kind() {
- // generators and closures.
- ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ // coroutines and closures.
+ ty::Closure(def_id, _) | ty::Coroutine(def_id, _, _) => {
let mut name = None;
// FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
// https://github.com/rust-lang/project-rfc-2229/issues/46
@@ -225,7 +225,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let captures = self.ecx.tcx.closure_captures(local_def_id);
if let Some(captured_place) = captures.get(field) {
// Sometimes the index is beyond the number of upvars (seen
- // for a generator).
+ // for a coroutine).
let var_hir_id = captured_place.get_root_variable();
let node = self.ecx.tcx.hir().get(var_hir_id);
if let hir::Node::Pat(pat) = node {
@@ -355,7 +355,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
value: &OpTy<'tcx, M::Provenance>,
ptr_kind: PointerKind,
) -> InterpResult<'tcx> {
- // Not using `deref_pointer` since we do the dereferenceable check ourselves below.
+ // Not using `deref_pointer` since we want to use our `read_immediate` wrapper.
let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
// Handle wide pointers.
// Check metadata early, for better diagnostics
@@ -378,18 +378,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
.unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
- self.ecx.check_ptr_access_align(
+ self.ecx.check_ptr_access(
place.ptr(),
size,
- align,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
- Ub(AlignmentCheckFailed { required, has }) => UnalignedPtr {
- ptr_kind,
- required_bytes: required.bytes(),
- found_bytes: has.bytes()
- },
Ub(DanglingIntPointer(0, _)) => NullPtr { ptr_kind },
Ub(DanglingIntPointer(i, _)) => DanglingPtrNoProvenance {
ptr_kind,
@@ -405,6 +399,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
ptr_kind,
},
);
+ try_validation!(
+ self.ecx.check_ptr_align(
+ place.ptr(),
+ align,
+ ),
+ self.path,
+ Ub(AlignmentCheckFailed(Misalignment { required, has }, _msg)) => UnalignedPtr {
+ ptr_kind,
+ required_bytes: required.bytes(),
+ found_bytes: has.bytes()
+ },
+ );
// Do not allow pointers to uninhabited types.
if place.layout.abi.is_uninhabited() {
let ty = place.layout.ty;
@@ -574,7 +580,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Str
| ty::Dynamic(..)
| ty::Closure(..)
- | ty::Generator(..) => Ok(false),
+ | ty::Coroutine(..) => Ok(false),
// Some types only occur during typechecking, they have no layout.
// We should not see them here and we could not check them anyway.
ty::Error(_)
@@ -583,7 +589,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Bound(..)
| ty::Param(..)
| ty::Alias(..)
- | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
+ | ty::CoroutineWitness(..) => bug!("Encountered invalid type {:?}", ty),
}
}
@@ -645,7 +651,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
#[inline(always)]
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
- &self.ecx
+ self.ecx
}
fn read_discriminant(
@@ -686,8 +692,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.kind() {
ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
- // Generators also have variants
- ty::Generator(..) => PathElem::GeneratorState(variant_id),
+ // Coroutines also have variants
+ ty::Coroutine(..) => PathElem::CoroutineState(variant_id),
_ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
};
self.with_elem(name, move |this| this.visit_value(new_op))
@@ -781,14 +787,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// Optimization: we just check the entire range at once.
// NOTE: Keep this in sync with the handling of integer and float
// types above, in `visit_primitive`.
- // In run-time mode, we accept pointers in here. This is actually more
- // permissive than a per-element check would be, e.g., we accept
- // a &[u8] that contains a pointer even though bytewise checking would
- // reject it. However, that's good: We don't inherently want
- // to reject those pointers, we just do not have the machinery to
- // talk about parts of a pointer.
- // We also accept uninit, for consistency with the slow path.
- let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size, mplace.align)?.expect("we already excluded size 0");
+ // No need for an alignment check here, this is not an actual memory access.
+ let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size)?.expect("we already excluded size 0");
match alloc.get_bytes_strip_provenance() {
// In the happy case, we needn't check anything else.
@@ -929,7 +929,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// - no pointers to statics.
/// - no `UnsafeCell` or non-ZST `&mut`.
#[inline(always)]
- pub fn const_validate_operand(
+ pub(crate) fn const_validate_operand(
&self,
op: &OpTy<'tcx, M::Provenance>,
path: Vec<PathElem>,
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index 8bb409cea..1e21c4940 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -4,6 +4,9 @@ Rust MIR: a lowered representation of Rust.
*/
+#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
#![deny(rustc::untranslatable_diagnostic)]
#![feature(assert_matches)]
#![feature(box_patterns)]
@@ -46,13 +49,13 @@ pub fn provide(providers: &mut Providers) {
const_eval::provide(providers);
providers.eval_to_const_value_raw = const_eval::eval_to_const_value_raw_provider;
providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
- providers.const_caller_location = const_eval::const_caller_location;
+ providers.hooks.const_caller_location = util::caller_location::const_caller_location_provider;
providers.eval_to_valtree = |tcx, param_env_and_value| {
let (param_env, raw) = param_env_and_value.into_parts();
const_eval::eval_to_valtree(tcx, param_env, raw)
};
- providers.hooks.try_destructure_mir_constant_for_diagnostics =
- const_eval::try_destructure_mir_constant_for_diagnostics;
+ providers.hooks.try_destructure_mir_constant_for_user_output =
+ const_eval::try_destructure_mir_constant_for_user_output;
providers.valtree_to_const_val = |tcx, (ty, valtree)| {
const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree)
};
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 8c2346c4e..76116e339 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -9,16 +9,17 @@ use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::traits::BuiltinImplSource;
+use rustc_middle::ty::GenericArgs;
use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, InstanceDef, Ty, TyCtxt};
-use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_middle::ty::{TraitRef, TypeVisitableExt};
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::{self, ObligationCauseCode, ObligationCtxt, SelectionContext};
+use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitor};
use std::mem;
-use std::ops::Deref;
+use std::ops::{ControlFlow, Deref};
use super::ops::{self, NonConstOp, Status};
use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop};
@@ -188,6 +189,24 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
}
}
+struct LocalReturnTyVisitor<'ck, 'mir, 'tcx> {
+ kind: LocalKind,
+ checker: &'ck mut Checker<'mir, 'tcx>,
+}
+
+impl<'ck, 'mir, 'tcx> TypeVisitor<TyCtxt<'tcx>> for LocalReturnTyVisitor<'ck, 'mir, 'tcx> {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ ty::FnPtr(_) => ControlFlow::Continue(()),
+ ty::Ref(_, _, hir::Mutability::Mut) => {
+ self.checker.check_op(ops::ty::MutRef(self.kind));
+ t.super_visit_with(self)
+ }
+ _ => t.super_visit_with(self),
+ }
+ }
+}
+
pub struct Checker<'mir, 'tcx> {
ccx: &'mir ConstCx<'mir, 'tcx>,
qualifs: Qualifs<'mir, 'tcx>,
@@ -228,7 +247,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
// `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
// no need to emit duplicate errors here.
- if self.ccx.is_async() || body.generator.is_some() {
+ if self.ccx.is_async() || body.coroutine.is_some() {
tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`");
return;
}
@@ -237,7 +256,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
if self.const_kind() == hir::ConstContext::ConstFn {
for (idx, local) in body.local_decls.iter_enumerated() {
// Handle the return place below.
- if idx == RETURN_PLACE || local.internal {
+ if idx == RETURN_PLACE {
continue;
}
@@ -304,7 +323,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
let gate = match op.status_in_item(self.ccx) {
Status::Allowed => return,
- Status::Unstable(gate) if self.tcx.features().enabled(gate) => {
+ Status::Unstable(gate) if self.tcx.features().active(gate) => {
let unstable_in_stable = self.ccx.is_const_stable_const_fn()
&& !super::rustc_allow_const_fn_unstable(self.tcx, self.def_id(), gate);
if unstable_in_stable {
@@ -346,20 +365,9 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
fn check_local_or_return_ty(&mut self, ty: Ty<'tcx>, local: Local) {
let kind = self.body.local_kind(local);
- for ty in ty.walk() {
- let ty = match ty.unpack() {
- GenericArgKind::Type(ty) => ty,
-
- // No constraints on lifetimes or constants, except potentially
- // constants' types, but `walk` will get to them as well.
- GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
- };
+ let mut visitor = LocalReturnTyVisitor { kind, checker: self };
- match *ty.kind() {
- ty::Ref(_, _, hir::Mutability::Mut) => self.check_op(ops::ty::MutRef(kind)),
- _ => {}
- }
- }
+ visitor.visit_ty(ty);
}
fn check_mut_borrow(&mut self, local: Local, kind: hir::BorrowKind) {
@@ -455,10 +463,11 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| Rvalue::Len(_) => {}
Rvalue::Aggregate(kind, ..) => {
- if let AggregateKind::Generator(def_id, ..) = kind.as_ref()
- && let Some(generator_kind @ hir::GeneratorKind::Async(..)) = self.tcx.generator_kind(def_id)
+ if let AggregateKind::Coroutine(def_id, ..) = kind.as_ref()
+ && let Some(coroutine_kind @ hir::CoroutineKind::Async(..)) =
+ self.tcx.coroutine_kind(def_id)
{
- self.check_op(ops::Generator(generator_kind));
+ self.check_op(ops::Coroutine(coroutine_kind));
}
}
@@ -571,8 +580,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
}
- Rvalue::BinaryOp(op, box (lhs, rhs))
- | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+ Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
let lhs_ty = lhs.ty(self.body, self.tcx);
let rhs_ty = rhs.ty(self.body, self.tcx);
@@ -580,18 +588,16 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Int, bool, and char operations are fine.
} else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
assert_eq!(lhs_ty, rhs_ty);
- assert!(
- matches!(
- op,
- BinOp::Eq
+ assert!(matches!(
+ op,
+ BinOp::Eq
| BinOp::Ne
| BinOp::Le
| BinOp::Lt
| BinOp::Ge
| BinOp::Gt
| BinOp::Offset
- )
- );
+ ));
self.check_op(ops::RawPtrComparison);
} else if lhs_ty.is_floating_point() || rhs_ty.is_floating_point() {
@@ -743,7 +749,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let errors = ocx.select_all_or_error();
if !errors.is_empty() {
- infcx.err_ctxt().report_fulfillment_errors(&errors);
+ infcx.err_ctxt().report_fulfillment_errors(errors);
}
// Attempting to call a trait method?
@@ -887,7 +893,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// At this point, we are calling a function, `callee`, whose `DefId` is known...
- // `begin_panic` and `panic_display` are generic functions that accept
+ // `begin_panic` and `#[rustc_const_panic_str]` functions accept generic
// types other than str. Check to enforce that only str can be used in
// const-eval.
@@ -899,8 +905,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
}
- // const-eval of the `panic_display` fn assumes the argument is `&&str`
- if Some(callee) == tcx.lang_items().panic_display() {
+ // const-eval of `#[rustc_const_panic_str]` functions assumes the argument is `&&str`
+ if tcx.has_attr(callee, sym::rustc_const_panic_str) {
match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
{
@@ -939,7 +945,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
if self.span.allows_unstable(gate) {
return;
}
- if let Some(implied_by_gate) = implied_by && self.span.allows_unstable(implied_by_gate) {
+ if let Some(implied_by_gate) = implied_by
+ && self.span.allows_unstable(implied_by_gate)
+ {
return;
}
@@ -1034,8 +1042,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
- TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
- self.check_op(ops::Generator(hir::GeneratorKind::Gen))
+ TerminatorKind::CoroutineDrop | TerminatorKind::Yield { .. } => {
+ self.check_op(ops::Coroutine(hir::CoroutineKind::Coroutine))
}
TerminatorKind::UnwindTerminate(_) => {
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 1f3cda35c..40183bacc 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -311,10 +311,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
ccx.const_kind(),
));
- if let Some(feature) = feature && ccx.tcx.sess.is_nightly_build() {
- err.help(format!(
- "add `#![feature({feature})]` to the crate attributes to enable",
- ));
+ if let Some(feature) = feature
+ && ccx.tcx.sess.is_nightly_build()
+ {
+ err.help(format!("add `#![feature({feature})]` to the crate attributes to enable",));
}
if let ConstContext::Static(_) = ccx.const_kind() {
@@ -357,10 +357,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
}
#[derive(Debug)]
-pub struct Generator(pub hir::GeneratorKind);
-impl<'tcx> NonConstOp<'tcx> for Generator {
+pub struct Coroutine(pub hir::CoroutineKind);
+impl<'tcx> NonConstOp<'tcx> for Coroutine {
fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
- if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ if let hir::CoroutineKind::Async(hir::CoroutineSource::Block) = self.0 {
Status::Unstable(sym::const_async_blocks)
} else {
Status::Forbidden
@@ -372,8 +372,8 @@ impl<'tcx> NonConstOp<'tcx> for Generator {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let msg = format!("{}s are not allowed in {}s", self.0.descr(), ccx.const_kind());
- if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ let msg = format!("{:#}s are not allowed in {}s", self.0, ccx.const_kind());
+ if let hir::CoroutineKind::Async(hir::CoroutineSource::Block) = self.0 {
ccx.tcx.sess.create_feature_err(
errors::UnallowedOpInConstContext { span, msg },
sym::const_async_blocks,
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index fd6bc2ee9..aff256b3e 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -111,7 +111,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
| mir::TerminatorKind::Assert { .. }
| mir::TerminatorKind::FalseEdge { .. }
| mir::TerminatorKind::FalseUnwind { .. }
- | mir::TerminatorKind::GeneratorDrop
+ | mir::TerminatorKind::CoroutineDrop
| mir::TerminatorKind::Goto { .. }
| mir::TerminatorKind::InlineAsm { .. }
| mir::TerminatorKind::UnwindResume
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index 5d8b1956a..32af537e2 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -970,7 +970,7 @@ pub fn promote_candidates<'tcx>(
0,
vec![],
body.span,
- body.generator_kind(),
+ body.coroutine_kind(),
body.tainted_by_errors,
);
promoted.phase = MirPhase::Analysis(AnalysisPhase::Initial);
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index ec1bc20ed..5922922d4 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -67,7 +67,7 @@ impl<'tcx> MirPass<'tcx> for Validator {
let body_abi = match body_ty.kind() {
ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
ty::Closure(..) => Abi::RustCall,
- ty::Generator(..) => Abi::Rust,
+ ty::Coroutine(..) => Abi::Rust,
_ => {
span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase)
}
@@ -472,11 +472,11 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
self.check_unwind_edge(location, *unwind);
}
TerminatorKind::Yield { resume, drop, .. } => {
- if self.body.generator.is_none() {
- self.fail(location, "`Yield` cannot appear outside generator bodies");
+ if self.body.coroutine.is_none() {
+ self.fail(location, "`Yield` cannot appear outside coroutine bodies");
}
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
- self.fail(location, "`Yield` should have been replaced by generator lowering");
+ self.fail(location, "`Yield` should have been replaced by coroutine lowering");
}
self.check_edge(location, *resume, EdgeKind::Normal);
if let Some(drop) = drop {
@@ -509,14 +509,14 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
}
self.check_unwind_edge(location, *unwind);
}
- TerminatorKind::GeneratorDrop => {
- if self.body.generator.is_none() {
- self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+ TerminatorKind::CoroutineDrop => {
+ if self.body.coroutine.is_none() {
+ self.fail(location, "`CoroutineDrop` cannot appear outside coroutine bodies");
}
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
- "`GeneratorDrop` should have been replaced by generator lowering",
+ "`CoroutineDrop` should have been replaced by coroutine lowering",
);
}
}
@@ -716,7 +716,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
check_equal(self, location, f_ty);
}
- &ty::Generator(def_id, args, _) => {
+ &ty::Coroutine(def_id, args, _) => {
let f_ty = if let Some(var) = parent_ty.variant_index {
let gen_body = if def_id == self.body.source.def_id() {
self.body
@@ -724,10 +724,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.tcx.optimized_mir(def_id)
};
- let Some(layout) = gen_body.generator_layout() else {
+ let Some(layout) = gen_body.coroutine_layout() else {
self.fail(
location,
- format!("No generator layout for {parent_ty:?}"),
+ format!("No coroutine layout for {parent_ty:?}"),
);
return;
};
@@ -747,7 +747,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
} else {
- let Some(&f_ty) = args.as_generator().prefix_tys().get(f.index())
+ let Some(&f_ty) = args.as_coroutine().prefix_tys().get(f.index())
else {
fail_out_of_bounds(self, location);
return;
@@ -1056,16 +1056,23 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
}
- Rvalue::NullaryOp(NullOp::OffsetOf(fields), container) => {
+ Rvalue::NullaryOp(NullOp::OffsetOf(indices), container) => {
let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
};
let mut current_ty = *container;
- for field in fields.iter() {
+ for (variant, field) in indices.iter() {
match current_ty.kind() {
ty::Tuple(fields) => {
+ if variant != FIRST_VARIANT {
+ self.fail(
+ location,
+ format!("tried to get variant {variant:?} of tuple"),
+ );
+ return;
+ }
let Some(&f_ty) = fields.get(field.as_usize()) else {
fail_out_of_bounds(self, location, field, current_ty);
return;
@@ -1074,15 +1081,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
}
ty::Adt(adt_def, args) => {
- if adt_def.is_enum() {
- self.fail(
- location,
- format!("Cannot get field offset from enum {current_ty:?}"),
- );
- return;
- }
-
- let Some(field) = adt_def.non_enum_variant().fields.get(field) else {
+ let Some(field) = adt_def.variant(variant).fields.get(field) else {
fail_out_of_bounds(self, location, field, current_ty);
return;
};
@@ -1093,7 +1092,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
_ => {
self.fail(
location,
- format!("Cannot get field offset from non-adt type {current_ty:?}"),
+ format!("Cannot get offset ({variant:?}, {field:?}) from type {current_ty:?}"),
);
return;
}
@@ -1211,11 +1210,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
}
let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
- if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Alias(ty::Opaque, ..)) {
+ if !matches!(pty, ty::Adt(..) | ty::Coroutine(..) | ty::Alias(ty::Opaque, ..)) {
self.fail(
location,
format!(
- "`SetDiscriminant` is only allowed on ADTs and generators, not {pty:?}"
+ "`SetDiscriminant` is only allowed on ADTs and coroutines, not {pty:?}"
),
);
}
@@ -1295,7 +1294,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::InlineAsm { .. }
- | TerminatorKind::GeneratorDrop
+ | TerminatorKind::CoroutineDrop
| TerminatorKind::UnwindResume
| TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
index 2e0643afb..8642dfccd 100644
--- a/compiler/rustc_const_eval/src/util/alignment.rs
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -21,10 +21,18 @@ where
};
let ty = place.ty(local_decls, tcx).ty;
+ let unsized_tail = || tcx.struct_tail_with_normalize(ty, |ty| ty, || {});
match tcx.layout_of(param_env.and(ty)) {
- Ok(layout) if layout.align.abi <= pack => {
+ Ok(layout)
+ if layout.align.abi <= pack
+ && (layout.is_sized()
+ || matches!(unsized_tail().kind(), ty::Slice(..) | ty::Str)) =>
+ {
// If the packed alignment is greater or equal to the field alignment, the type won't be
// further disaligned.
+ // However we need to ensure the field is sized; for unsized fields, `layout.align` is
+ // just an approximation -- except when the unsized tail is a slice, where the alignment
+ // is fully determined by the type.
debug!(
"is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
place,
diff --git a/compiler/rustc_const_eval/src/util/caller_location.rs b/compiler/rustc_const_eval/src/util/caller_location.rs
new file mode 100644
index 000000000..4a3cfd50b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/caller_location.rs
@@ -0,0 +1,66 @@
+use rustc_hir::LangItem;
+use rustc_middle::mir;
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_span::symbol::Symbol;
+use rustc_type_ir::Mutability;
+
+use crate::const_eval::{mk_eval_cx, CanAccessStatics, CompileTimeEvalContext};
+use crate::interpret::*;
+
+/// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
+fn alloc_caller_location<'mir, 'tcx>(
+ ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+ filename: Symbol,
+ line: u32,
+ col: u32,
+) -> MPlaceTy<'tcx> {
+ let loc_details = ecx.tcx.sess.opts.unstable_opts.location_detail;
+ // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+ // pointless, since that would require allocating more memory than these short strings.
+ let file = if loc_details.file {
+ ecx.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not).unwrap()
+ } else {
+ // FIXME: This creates a new allocation each time. It might be preferable to
+ // perform this allocation only once, and re-use the `MPlaceTy`.
+ // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
+ ecx.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
+ };
+ let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
+ let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
+
+ // Allocate memory for `CallerLocation` struct.
+ let loc_ty = ecx
+ .tcx
+ .type_of(ecx.tcx.require_lang_item(LangItem::PanicLocation, None))
+ .instantiate(*ecx.tcx, ecx.tcx.mk_args(&[ecx.tcx.lifetimes.re_erased.into()]));
+ let loc_layout = ecx.layout_of(loc_ty).unwrap();
+ let location = ecx.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
+
+ // Initialize fields.
+ ecx.write_immediate(file.to_ref(ecx), &ecx.project_field(&location, 0).unwrap())
+ .expect("writing to memory we just allocated cannot fail");
+ ecx.write_scalar(line, &ecx.project_field(&location, 1).unwrap())
+ .expect("writing to memory we just allocated cannot fail");
+ ecx.write_scalar(col, &ecx.project_field(&location, 2).unwrap())
+ .expect("writing to memory we just allocated cannot fail");
+
+ location
+}
+
+pub(crate) fn const_caller_location_provider(
+ tcx: TyCtxtAt<'_>,
+ file: Symbol,
+ line: u32,
+ col: u32,
+) -> mir::ConstValue<'_> {
+ trace!("const_caller_location: {}:{}:{}", file, line, col);
+ let mut ecx = mk_eval_cx(tcx.tcx, tcx.span, ty::ParamEnv::reveal_all(), CanAccessStatics::No);
+
+ let loc_place = alloc_caller_location(&mut ecx, file, line, col);
+ if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
+ bug!("intern_const_alloc_recursive should not error in this case")
+ }
+ mir::ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 040b3071e..1e58bd645 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -1,6 +1,7 @@
use rustc_middle::mir;
mod alignment;
+pub(crate) mod caller_location;
mod check_validity_requirement;
mod compare_types;
mod type_name;
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index a924afda6..a82b65b19 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -3,7 +3,7 @@ use rustc_hir::def_id::CrateNum;
use rustc_hir::definitions::DisambiguatedDefPathData;
use rustc_middle::ty::{
self,
- print::{PrettyPrinter, Print, Printer},
+ print::{PrettyPrinter, Print, PrintError, Printer},
GenericArg, GenericArgKind, Ty, TyCtxt,
};
use std::fmt::Write;
@@ -14,23 +14,15 @@ struct AbsolutePathPrinter<'tcx> {
}
impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
- type Error = std::fmt::Error;
-
- type Path = Self;
- type Region = Self;
- type Type = Self;
- type DynExistential = Self;
- type Const = Self;
-
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
- fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
- Ok(self)
+ fn print_region(&mut self, _region: ty::Region<'_>) -> Result<(), PrintError> {
+ Ok(())
}
- fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ fn print_type(&mut self, ty: Ty<'tcx>) -> Result<(), PrintError> {
match *ty.kind() {
// Types without identity.
ty::Bool
@@ -51,7 +43,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
// Placeholders (all printed as `_` to uniformize them).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
write!(self, "_")?;
- Ok(self)
+ Ok(())
}
// Types with identity (print the module path).
@@ -59,53 +51,53 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
| ty::FnDef(def_id, args)
| ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. })
| ty::Closure(def_id, args)
- | ty::Generator(def_id, args, _) => self.print_def_path(def_id, args),
+ | ty::Coroutine(def_id, args, _) => self.print_def_path(def_id, args),
ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"),
ty::Alias(ty::Inherent, _) => bug!("type_name: unexpected inherent projection"),
- ty::GeneratorWitness(..) => bug!("type_name: unexpected `GeneratorWitness`"),
+ ty::CoroutineWitness(..) => bug!("type_name: unexpected `CoroutineWitness`"),
}
}
- fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ fn print_const(&mut self, ct: ty::Const<'tcx>) -> Result<(), PrintError> {
self.pretty_print_const(ct, false)
}
fn print_dyn_existential(
- self,
+ &mut self,
predicates: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
- ) -> Result<Self::DynExistential, Self::Error> {
+ ) -> Result<(), PrintError> {
self.pretty_print_dyn_existential(predicates)
}
- fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ fn path_crate(&mut self, cnum: CrateNum) -> Result<(), PrintError> {
self.path.push_str(self.tcx.crate_name(cnum).as_str());
- Ok(self)
+ Ok(())
}
fn path_qualified(
- self,
+ &mut self,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
- ) -> Result<Self::Path, Self::Error> {
+ ) -> Result<(), PrintError> {
self.pretty_path_qualified(self_ty, trait_ref)
}
fn path_append_impl(
- self,
- print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ &mut self,
+ print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
_disambiguated_data: &DisambiguatedDefPathData,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
- ) -> Result<Self::Path, Self::Error> {
+ ) -> Result<(), PrintError> {
self.pretty_path_append_impl(
- |mut cx| {
- cx = print_prefix(cx)?;
+ |cx| {
+ print_prefix(cx)?;
cx.path.push_str("::");
- Ok(cx)
+ Ok(())
},
self_ty,
trait_ref,
@@ -113,29 +105,29 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
}
fn path_append(
- mut self,
- print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ &mut self,
+ print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
disambiguated_data: &DisambiguatedDefPathData,
- ) -> Result<Self::Path, Self::Error> {
- self = print_prefix(self)?;
+ ) -> Result<(), PrintError> {
+ print_prefix(self)?;
write!(self.path, "::{}", disambiguated_data.data).unwrap();
- Ok(self)
+ Ok(())
}
fn path_generic_args(
- mut self,
- print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ &mut self,
+ print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
args: &[GenericArg<'tcx>],
- ) -> Result<Self::Path, Self::Error> {
- self = print_prefix(self)?;
+ ) -> Result<(), PrintError> {
+ print_prefix(self)?;
let args =
args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
if args.clone().next().is_some() {
self.generic_delimiters(|cx| cx.comma_sep(args))
} else {
- Ok(self)
+ Ok(())
}
}
}
@@ -144,31 +136,31 @@ impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
fn should_print_region(&self, _region: ty::Region<'_>) -> bool {
false
}
- fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+ fn comma_sep<T>(&mut self, mut elems: impl Iterator<Item = T>) -> Result<(), PrintError>
where
- T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+ T: Print<'tcx, Self>,
{
if let Some(first) = elems.next() {
- self = first.print(self)?;
+ first.print(self)?;
for elem in elems {
self.path.push_str(", ");
- self = elem.print(self)?;
+ elem.print(self)?;
}
}
- Ok(self)
+ Ok(())
}
fn generic_delimiters(
- mut self,
- f: impl FnOnce(Self) -> Result<Self, Self::Error>,
- ) -> Result<Self, Self::Error> {
+ &mut self,
+ f: impl FnOnce(&mut Self) -> Result<(), PrintError>,
+ ) -> Result<(), PrintError> {
write!(self, "<")?;
- self = f(self)?;
+ f(self)?;
write!(self, ">")?;
- Ok(self)
+ Ok(())
}
fn should_print_verbose(&self) -> bool {
@@ -185,5 +177,7 @@ impl Write for AbsolutePathPrinter<'_> {
}
pub fn type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> String {
- AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path
+ let mut printer = AbsolutePathPrinter { tcx, path: String::new() };
+ printer.print_type(ty).unwrap();
+ printer.path
}