summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval')
-rw-r--r--compiler/rustc_const_eval/Cargo.toml27
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs252
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs395
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs82
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs527
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs163
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs475
-rw-r--r--compiler/rustc_const_eval/src/errors.rs89
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs365
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs1019
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs486
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs696
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs130
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs196
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs525
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs1224
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs34
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs831
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs463
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs900
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs391
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs320
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs671
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs59
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs73
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs986
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs534
-rw-r--r--compiler/rustc_const_eval/src/lib.rs68
-rw-r--r--compiler/rustc_const_eval/src/might_permit_raw_init.rs40
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs1032
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs132
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs771
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs123
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs384
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs384
-rw-r--r--compiler/rustc_const_eval/src/transform/mod.rs3
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs1066
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs913
-rw-r--r--compiler/rustc_const_eval/src/util/aggregate.rs77
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs63
-rw-r--r--compiler/rustc_const_eval/src/util/call_kind.rs146
-rw-r--r--compiler/rustc_const_eval/src/util/collect_writes.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/find_self_call.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs10
44 files changed, 17197 insertions, 0 deletions
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
new file mode 100644
index 000000000..32e8233a0
--- /dev/null
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rustc_const_eval"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+tracing = "0.1"
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_span = { path = "../rustc_span" }
+rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
new file mode 100644
index 000000000..322bfd5ce
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -0,0 +1,252 @@
+use std::error::Error;
+use std::fmt;
+
+use rustc_errors::Diagnostic;
+use rustc_hir as hir;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
+use rustc_span::{Span, Symbol};
+
+use super::InterpCx;
+use crate::interpret::{
+ struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType, UnsupportedOpInfo,
+};
+
+/// The CTFE machine has some custom error kinds.
+#[derive(Clone, Debug)]
+pub enum ConstEvalErrKind {
+ NeedsRfc(String),
+ ConstAccessesStatic,
+ ModifiedGlobal,
+ AssertFailure(AssertKind<ConstInt>),
+ Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+ Abort(String),
+}
+
+impl MachineStopType for ConstEvalErrKind {
+ fn is_hard_err(&self) -> bool {
+ matches!(self, Self::Panic { .. })
+ }
+}
+
+// The errors become `MachineStop` with plain strings when being raised.
+// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
+// handle these.
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
+ fn into(self) -> InterpErrorInfo<'tcx> {
+ err_machine_stop!(self).into()
+ }
+}
+
+impl fmt::Display for ConstEvalErrKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::ConstEvalErrKind::*;
+ match *self {
+ NeedsRfc(ref msg) => {
+ write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
+ }
+ ConstAccessesStatic => write!(f, "constant accesses static"),
+ ModifiedGlobal => {
+ write!(f, "modifying a static's initial value from another static's initializer")
+ }
+ AssertFailure(ref msg) => write!(f, "{:?}", msg),
+ Panic { msg, line, col, file } => {
+ write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
+ }
+ Abort(ref msg) => write!(f, "{}", msg),
+ }
+ }
+}
+
+impl Error for ConstEvalErrKind {}
+
+/// When const-evaluation errors, this type is constructed with the resulting information,
+/// and then used to emit the error as a lint or hard error.
+#[derive(Debug)]
+pub struct ConstEvalErr<'tcx> {
+ pub span: Span,
+ pub error: InterpError<'tcx>,
+ pub stacktrace: Vec<FrameInfo<'tcx>>,
+}
+
+impl<'tcx> ConstEvalErr<'tcx> {
+ /// Turn an interpreter error into something to report to the user.
+ /// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
+ /// Should be called only if the error is actually going to to be reported!
+ pub fn new<'mir, M: Machine<'mir, 'tcx>>(
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ error: InterpErrorInfo<'tcx>,
+ span: Option<Span>,
+ ) -> ConstEvalErr<'tcx>
+ where
+ 'tcx: 'mir,
+ {
+ error.print_backtrace();
+ let mut stacktrace = ecx.generate_stacktrace();
+ // Filter out `requires_caller_location` frames.
+ stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*ecx.tcx));
+ // If `span` is missing, use topmost remaining frame, or else the "root" span from `ecx.tcx`.
+ let span = span.or_else(|| stacktrace.first().map(|f| f.span)).unwrap_or(ecx.tcx.span);
+ ConstEvalErr { error: error.into_kind(), stacktrace, span }
+ }
+
+ pub fn struct_error(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ decorate: impl FnOnce(&mut Diagnostic),
+ ) -> ErrorHandled {
+ self.struct_generic(tcx, message, decorate, None)
+ }
+
+ pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
+ self.struct_error(tcx, message, |_| {})
+ }
+
+ pub fn report_as_lint(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ lint_root: hir::HirId,
+ span: Option<Span>,
+ ) -> ErrorHandled {
+ self.struct_generic(
+ tcx,
+ message,
+ |lint: &mut Diagnostic| {
+ // Apply the span.
+ if let Some(span) = span {
+ let primary_spans = lint.span.primary_spans().to_vec();
+ // point at the actual error as the primary span
+ lint.replace_span_with(span);
+ // point to the `const` statement as a secondary span
+ // they don't have any label
+ for sp in primary_spans {
+ if sp != span {
+ lint.span_label(sp, "");
+ }
+ }
+ }
+ },
+ Some(lint_root),
+ )
+ }
+
+ /// Create a diagnostic for this const eval error.
+ ///
+ /// Sets the message passed in via `message` and adds span labels with detailed error
+ /// information before handing control back to `decorate` to do any final annotations,
+ /// after which the diagnostic is emitted.
+ ///
+ /// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
+ /// (Except that for some errors, we ignore all that -- see `must_error` below.)
+ #[instrument(skip(self, tcx, decorate, lint_root), level = "debug")]
+ fn struct_generic(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ decorate: impl FnOnce(&mut Diagnostic),
+ lint_root: Option<hir::HirId>,
+ ) -> ErrorHandled {
+ let finish = |err: &mut Diagnostic, span_msg: Option<String>| {
+ trace!("reporting const eval failure at {:?}", self.span);
+ if let Some(span_msg) = span_msg {
+ err.span_label(self.span, span_msg);
+ }
+ // Add some more context for select error types.
+ match self.error {
+ InterpError::Unsupported(
+ UnsupportedOpInfo::ReadPointerAsBytes
+ | UnsupportedOpInfo::PartialPointerOverwrite(_)
+ ) => {
+ err.help("this code performed an operation that depends on the underlying bytes representing a pointer");
+ err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported");
+ }
+ _ => {}
+ }
+ // Add spans for the stacktrace. Don't print a single-line backtrace though.
+ if self.stacktrace.len() > 1 {
+ // Helper closure to print duplicated lines.
+ let mut flush_last_line = |last_frame, times| {
+ if let Some((line, span)) = last_frame {
+ err.span_label(span, &line);
+ // Don't print [... additional calls ...] if the number of lines is small
+ if times < 3 {
+ for _ in 0..times {
+ err.span_label(span, &line);
+ }
+ } else {
+ err.span_label(
+ span,
+ format!("[... {} additional calls {} ...]", times, &line),
+ );
+ }
+ }
+ };
+
+ let mut last_frame = None;
+ let mut times = 0;
+ for frame_info in &self.stacktrace {
+ let frame = (frame_info.to_string(), frame_info.span);
+ if last_frame.as_ref() == Some(&frame) {
+ times += 1;
+ } else {
+ flush_last_line(last_frame, times);
+ last_frame = Some(frame);
+ times = 0;
+ }
+ }
+ flush_last_line(last_frame, times);
+ }
+ // Let the caller attach any additional information it wants.
+ decorate(err);
+ };
+
+ debug!("self.error: {:?}", self.error);
+ // Special handling for certain errors
+ match &self.error {
+ // Don't emit a new diagnostic for these errors
+ err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
+ return ErrorHandled::TooGeneric;
+ }
+ err_inval!(AlreadyReported(error_reported)) => {
+ return ErrorHandled::Reported(*error_reported);
+ }
+ err_inval!(Layout(LayoutError::SizeOverflow(_))) => {
+ // We must *always* hard error on these, even if the caller wants just a lint.
+ // The `message` makes little sense here, this is a more serious error than the
+ // caller thinks anyway.
+ // See <https://github.com/rust-lang/rust/pull/63152>.
+ let mut err = struct_error(tcx, &self.error.to_string());
+ finish(&mut err, None);
+ return ErrorHandled::Reported(err.emit());
+ }
+ _ => {}
+ };
+
+ let err_msg = self.error.to_string();
+
+ // Regular case - emit a lint.
+ if let Some(lint_root) = lint_root {
+ // Report as lint.
+ let hir_id =
+ self.stacktrace.iter().rev().find_map(|frame| frame.lint_root).unwrap_or(lint_root);
+ tcx.struct_span_lint_hir(
+ rustc_session::lint::builtin::CONST_ERR,
+ hir_id,
+ tcx.span,
+ |lint| {
+ let mut lint = lint.build(message);
+ finish(&mut lint, Some(err_msg));
+ lint.emit();
+ },
+ );
+ ErrorHandled::Linted
+ } else {
+ // Report as hard error.
+ let mut err = struct_error(tcx, message);
+ finish(&mut err, Some(err_msg));
+ ErrorHandled::Reported(err.emit())
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
new file mode 100644
index 000000000..975fb4b22
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -0,0 +1,395 @@
+use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr};
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+ intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
+ Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
+ ScalarMaybeUninit, StackPopCleanup, InterpError,
+};
+
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::mir::pretty::display_allocation;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, subst::Subst, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_target::abi::{self, Abi};
+use std::borrow::Cow;
+use std::convert::TryInto;
+
+const NOTE_ON_UNDEFINED_BEHAVIOR_ERROR: &str = "The rules on what exactly is undefined behavior aren't clear, \
+ so this check might be overzealous. Please open an issue on the rustc \
+ repository if you believe it should not be considered undefined behavior.";
+
+// Returns a pointer to where the result lives
+fn eval_body_using_ecx<'mir, 'tcx>(
+ ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+ cid: GlobalId<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
+ debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
+ let tcx = *ecx.tcx;
+ assert!(
+ cid.promoted.is_some()
+ || matches!(
+ ecx.tcx.def_kind(cid.instance.def_id()),
+ DefKind::Const
+ | DefKind::Static(_)
+ | DefKind::ConstParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::AssocConst
+ ),
+ "Unexpected DefKind: {:?}",
+ ecx.tcx.def_kind(cid.instance.def_id())
+ );
+ let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
+ assert!(!layout.is_unsized());
+ let ret = ecx.allocate(layout, MemoryKind::Stack)?;
+
+ trace!(
+ "eval_body_using_ecx: pushing stack frame for global: {}{}",
+ with_no_trimmed_paths!(ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()))),
+ cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
+ );
+
+ ecx.push_stack_frame(
+ cid.instance,
+ body,
+ &ret.into(),
+ StackPopCleanup::Root { cleanup: false },
+ )?;
+
+ // The main interpreter loop.
+ ecx.run()?;
+
+ // Intern the result
+ let intern_kind = if cid.promoted.is_some() {
+ InternKind::Promoted
+ } else {
+ match tcx.static_mutability(cid.instance.def_id()) {
+ Some(m) => InternKind::Static(m),
+ None => InternKind::Constant,
+ }
+ };
+ intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+
+ debug!("eval_body_using_ecx done: {:?}", *ret);
+ Ok(ret)
+}
+
+/// The `InterpCx` is only meant to be used to do field and index projections into constants for
+/// `simd_shuffle` and const patterns in match arms.
+///
+/// The function containing the `match` that is currently being analyzed may have generic bounds
+/// that inform us about the generic bounds of the constant. E.g., using an associated constant
+/// of a function's generic parameter will require knowledge about the bounds on the generic
+/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
+pub(super) fn mk_eval_cx<'mir, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ root_span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ can_access_statics: bool,
+) -> CompileTimeEvalContext<'mir, 'tcx> {
+ debug!("mk_eval_cx: {:?}", param_env);
+ InterpCx::new(
+ tcx,
+ root_span,
+ param_env,
+ CompileTimeInterpreter::new(tcx.const_eval_limit(), can_access_statics),
+ )
+}
+
+/// This function converts an interpreter value into a constant that is meant for use in the
+/// type system.
+#[instrument(skip(ecx), level = "debug")]
+pub(super) fn op_to_const<'tcx>(
+ ecx: &CompileTimeEvalContext<'_, 'tcx>,
+ op: &OpTy<'tcx>,
+) -> ConstValue<'tcx> {
+ // We do not have value optimizations for everything.
+ // Only scalars and slices, since they are very common.
+ // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
+ // from scalar unions that are initialized with one of their zero sized variants. We could
+ // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
+ // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
+ // `Undef` situation.
+ let try_as_immediate = match op.layout.abi {
+ Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
+ Abi::ScalarPair(..) => match op.layout.ty.kind() {
+ ty::Ref(_, inner, _) => match *inner.kind() {
+ ty::Slice(elem) => elem == ecx.tcx.types.u8,
+ ty::Str => true,
+ _ => false,
+ },
+ _ => false,
+ },
+ _ => false,
+ };
+ let immediate = if try_as_immediate {
+ Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
+ } else {
+ // It is guaranteed that any non-slice scalar pair is actually ByRef here.
+ // When we come back from raw const eval, we are always by-ref. The only way our op here is
+ // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we
+ // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
+ // structs containing such.
+ op.try_as_mplace()
+ };
+
+ debug!(?immediate);
+
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let to_const_value = |mplace: &MPlaceTy<'_>| {
+ debug!("to_const_value(mplace: {:?})", mplace);
+ match mplace.ptr.into_parts() {
+ (Some(alloc_id), offset) => {
+ let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+ ConstValue::ByRef { alloc, offset }
+ }
+ (None, offset) => {
+ assert!(mplace.layout.is_zst());
+ assert_eq!(
+ offset.bytes() % mplace.layout.align.abi.bytes(),
+ 0,
+ "this MPlaceTy must come from a validated constant, thus we can assume the \
+ alignment is correct",
+ );
+ ConstValue::ZeroSized
+ }
+ }
+ };
+ match immediate {
+ Ok(ref mplace) => to_const_value(mplace),
+ // see comment on `let try_as_immediate` above
+ Err(imm) => match *imm {
+ _ if imm.layout.is_zst() => ConstValue::ZeroSized,
+ Immediate::Scalar(x) => match x {
+ ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
+ ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
+ },
+ Immediate::ScalarPair(a, b) => {
+ debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (data, start) = match a.to_pointer(ecx).unwrap().into_parts() {
+ (Some(alloc_id), offset) => {
+ (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
+ }
+ (None, _offset) => (
+ ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
+ b"" as &[u8],
+ )),
+ 0,
+ ),
+ };
+ let len = b.to_machine_usize(ecx).unwrap();
+ let start = start.try_into().unwrap();
+ let len: usize = len.try_into().unwrap();
+ ConstValue::Slice { data, start, end: start + len }
+ }
+ Immediate::Uninit => to_const_value(&op.assert_mem_place()),
+ },
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn turn_into_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ constant: ConstAlloc<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ConstValue<'tcx> {
+ let cid = key.value;
+ let def_id = cid.instance.def.def_id();
+ let is_static = tcx.is_static(def_id);
+ let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
+
+ let mplace = ecx.raw_const_to_mplace(constant).expect(
+ "can only fail if layout computation failed, \
+ which should have given a good error before ever invoking this function",
+ );
+ assert!(
+ !is_static || cid.promoted.is_some(),
+ "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
+ );
+
+ // Turn this into a proper constant.
+ let const_val = op_to_const(&ecx, &mplace.into());
+ debug!(?const_val);
+
+ const_val
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_const_value_raw_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
+ assert!(key.param_env.is_const());
+ // see comment in eval_to_allocation_raw_provider for what we're doing here
+ if key.param_env.reveal() == Reveal::All {
+ let mut key = key;
+ key.param_env = key.param_env.with_user_facing();
+ match tcx.eval_to_const_value_raw(key) {
+ // try again with reveal all as requested
+ Err(ErrorHandled::TooGeneric) => {}
+ // deduplicate calls
+ other => return other,
+ }
+ }
+
+ // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
+ // Catch such calls and evaluate them instead of trying to load a constant's MIR.
+ if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
+ let ty = key.value.instance.ty(tcx, key.param_env);
+ let ty::FnDef(_, substs) = ty.kind() else {
+ bug!("intrinsic with type {:?}", ty);
+ };
+ return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+ let span = tcx.def_span(def_id);
+ let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
+ error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
+ });
+ }
+
+ tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_allocation_raw_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
+ assert!(key.param_env.is_const());
+ // Because the constant is computed twice (once per value of `Reveal`), we are at risk of
+ // reporting the same error twice here. To resolve this, we check whether we can evaluate the
+ // constant in the more restrictive `Reveal::UserFacing`, which most likely already was
+ // computed. For a large percentage of constants that will already have succeeded. Only
+ // associated constants of generic functions will fail due to not enough monomorphization
+ // information being available.
+
+ // In case we fail in the `UserFacing` variant, we just do the real computation.
+ if key.param_env.reveal() == Reveal::All {
+ let mut key = key;
+ key.param_env = key.param_env.with_user_facing();
+ match tcx.eval_to_allocation_raw(key) {
+ // try again with reveal all as requested
+ Err(ErrorHandled::TooGeneric) => {}
+ // deduplicate calls
+ other => return other,
+ }
+ }
+ if cfg!(debug_assertions) {
+ // Make sure we format the instance even if we do not print it.
+ // This serves as a regression test against an ICE on printing.
+ // The next two lines concatenated contain some discussion:
+ // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
+ // subject/anon_const_instance_printing/near/135980032
+ let instance = with_no_trimmed_paths!(key.value.instance.to_string());
+ trace!("const eval: {:?} ({})", key, instance);
+ }
+
+ let cid = key.value;
+ let def = cid.instance.def.with_opt_param();
+ let is_static = tcx.is_static(def.did);
+
+ let mut ecx = InterpCx::new(
+ tcx,
+ tcx.def_span(def.did),
+ key.param_env,
+ // Statics (and promoteds inside statics) may access other statics, because unlike consts
+ // they do not have to behave "as if" they were evaluated at runtime.
+ CompileTimeInterpreter::new(tcx.const_eval_limit(), /*can_access_statics:*/ is_static),
+ );
+
+ let res = ecx.load_mir(cid.instance.def, cid.promoted);
+ match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
+ Err(error) => {
+ let err = ConstEvalErr::new(&ecx, error, None);
+ // Some CTFE errors raise just a lint, not a hard error; see
+ // <https://github.com/rust-lang/rust/issues/71800>.
+ let is_hard_err = if let Some(def) = def.as_local() {
+ // (Associated) consts only emit a lint, since they might be unused.
+ !matches!(tcx.def_kind(def.did.to_def_id()), DefKind::Const | DefKind::AssocConst)
+ // check if the inner InterpError is hard
+ || err.error.is_hard_err()
+ } else {
+ // use of broken constant from other crate: always an error
+ true
+ };
+
+ if is_hard_err {
+ let msg = if is_static {
+ Cow::from("could not evaluate static initializer")
+ } else {
+ // If the current item has generics, we'd like to enrich the message with the
+ // instance and its substs: to show the actual compile-time values, in addition to
+ // the expression, leading to the const eval error.
+ let instance = &key.value.instance;
+ if !instance.substs.is_empty() {
+ let instance = with_no_trimmed_paths!(instance.to_string());
+ let msg = format!("evaluation of `{}` failed", instance);
+ Cow::from(msg)
+ } else {
+ Cow::from("evaluation of constant value failed")
+ }
+ };
+
+ Err(err.report_as_error(ecx.tcx.at(err.span), &msg))
+ } else {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.as_local().unwrap().did);
+ Err(err.report_as_lint(
+ tcx.at(tcx.def_span(def.did)),
+ "any use of this value will cause an error",
+ hir_id,
+ Some(err.span),
+ ))
+ }
+ }
+ Ok(mplace) => {
+ // Since evaluation had no errors, validate the resulting constant.
+ // This is a separate `try` block to provide more targeted error reporting.
+ let validation = try {
+ let mut ref_tracking = RefTracking::new(mplace);
+ let mut inner = false;
+ while let Some((mplace, path)) = ref_tracking.todo.pop() {
+ let mode = match tcx.static_mutability(cid.instance.def_id()) {
+ Some(_) if cid.promoted.is_some() => {
+ // Promoteds in statics are allowed to point to statics.
+ CtfeValidationMode::Const { inner, allow_static_ptrs: true }
+ }
+ Some(_) => CtfeValidationMode::Regular, // a `static`
+ None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
+ };
+ ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
+ inner = true;
+ }
+ };
+ let alloc_id = mplace.ptr.provenance.unwrap();
+ if let Err(error) = validation {
+ // Validation failed, report an error. This is always a hard error.
+ let err = ConstEvalErr::new(&ecx, error, None);
+ Err(err.struct_error(
+ ecx.tcx,
+ "it is undefined behavior to use this value",
+ |diag| {
+ if matches!(err.error, InterpError::UndefinedBehavior(_)) {
+ diag.note(NOTE_ON_UNDEFINED_BEHAVIOR_ERROR);
+ }
+ diag.note(&format!(
+ "the raw bytes of the constant ({}",
+ display_allocation(
+ *ecx.tcx,
+ ecx.tcx.global_alloc(alloc_id).unwrap_memory().inner()
+ )
+ ));
+ },
+ ))
+ } else {
+ // Convert to raw constant
+ Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
new file mode 100644
index 000000000..f1674d04f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -0,0 +1,82 @@
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{DefIdTree, TyCtxt};
+use rustc_span::symbol::Symbol;
+
+/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
+ if tcx.is_const_fn_raw(def_id) {
+ let const_stab = tcx.lookup_const_stability(def_id)?;
+ if const_stab.is_const_unstable() { Some(const_stab.feature) } else { None }
+ } else {
+ None
+ }
+}
+
+pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ let parent_id = tcx.local_parent(def_id);
+ tcx.def_kind(parent_id) == DefKind::Impl && tcx.constness(parent_id) == hir::Constness::Const
+}
+
+/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
+/// it is a trait impl/function, return if it has a `const` modifier. If it is an intrinsic,
+/// report whether said intrinsic has a `rustc_const_{un,}stable` attribute. Otherwise, return
+/// `Constness::NotConst`.
+fn constness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Constness {
+ let def_id = def_id.expect_local();
+ let node = tcx.hir().get_by_def_id(def_id);
+
+ match node {
+ hir::Node::Ctor(_) => hir::Constness::Const,
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
+ hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
+ // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+ // foreign items cannot be evaluated at compile-time.
+ let is_const = if tcx.is_intrinsic(def_id) {
+ tcx.lookup_const_stability(def_id).is_some()
+ } else {
+ false
+ };
+ if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+ }
+ _ => {
+ if let Some(fn_kind) = node.fn_kind() {
+ if fn_kind.constness() == hir::Constness::Const {
+ return hir::Constness::Const;
+ }
+
+ // If the function itself is not annotated with `const`, it may still be a `const fn`
+ // if it resides in a const trait impl.
+ let is_const = is_parent_const_impl_raw(tcx, def_id);
+ if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+ } else {
+ hir::Constness::NotConst
+ }
+ }
+ }
+}
+
+fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.is_const_fn(def_id)
+ && match tcx.lookup_const_stability(def_id) {
+ Some(stab) => {
+ if cfg!(debug_assertions) && stab.promotable {
+ let sig = tcx.fn_sig(def_id);
+ assert_eq!(
+ sig.unsafety(),
+ hir::Unsafety::Normal,
+ "don't mark const unsafe fns as promotable",
+ // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
+ );
+ }
+ stab.promotable
+ }
+ None => false,
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { constness, is_promotable_const_fn, ..*providers };
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
new file mode 100644
index 000000000..fc2e6652a
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -0,0 +1,527 @@
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::hash::Hash;
+
+use rustc_data_structures::fx::FxHashMap;
+use std::fmt;
+
+use rustc_ast::Mutability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::AssertMessage;
+use rustc_session::Limit;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use crate::interpret::{
+ self, compile_time_machine, AllocId, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
+ OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
+};
+
+use super::error::*;
+
+impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
+ /// "Intercept" a function call to a panic-related function
+ /// because we have something special to do for it.
+ /// If this returns successfully (`Ok`), the function should just be evaluated normally.
+ fn hook_special_const_fn(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx>],
+ ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
+ // All `#[rustc_do_not_const_check]` functions should be hooked here.
+ let def_id = instance.def_id();
+
+ if Some(def_id) == self.tcx.lang_items().const_eval_select() {
+ // redirect to const_eval_select_ct
+ if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_eval_select,
+ instance.substs,
+ )
+ .unwrap()
+ .unwrap(),
+ ));
+ }
+ } else if Some(def_id) == self.tcx.lang_items().panic_display()
+ || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+ {
+ // &str or &&str
+ assert!(args.len() == 1);
+
+ let mut msg_place = self.deref_operand(&args[0])?;
+ while msg_place.layout.ty.is_ref() {
+ msg_place = self.deref_operand(&msg_place.into())?;
+ }
+
+ let msg = Symbol::intern(self.read_str(&msg_place)?);
+ let span = self.find_closest_untracked_caller_location();
+ let (file, line, col) = self.location_triple_for_span(span);
+ return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
+ } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
+ // For panic_fmt, call const_panic_fmt instead.
+ if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_panic_fmt,
+ self.tcx.intern_substs(&[]),
+ )
+ .unwrap()
+ .unwrap(),
+ ));
+ }
+ }
+ Ok(None)
+ }
+}
+
+/// Extra machine state for CTFE, and the Machine instance
+pub struct CompileTimeInterpreter<'mir, 'tcx> {
+ /// For now, the number of terminators that can be evaluated before we throw a resource
+ /// exhaustion error.
+ ///
+ /// Setting this to `0` disables the limit and allows the interpreter to run forever.
+ pub steps_remaining: usize,
+
+ /// The virtual call stack.
+ pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
+
+ /// We need to make sure consts never point to anything mutable, even recursively. That is
+ /// relied on for pattern matching on consts with references.
+ /// To achieve this, two pieces have to work together:
+ /// * Interning makes everything outside of statics immutable.
+ /// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
+ /// This boolean here controls the second part.
+ pub(super) can_access_statics: bool,
+}
+
+impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+ pub(crate) fn new(const_eval_limit: Limit, can_access_statics: bool) -> Self {
+ CompileTimeInterpreter {
+ steps_remaining: const_eval_limit.0,
+ stack: Vec::new(),
+ can_access_statics,
+ }
+ }
+}
+
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
+ #[inline(always)]
+ fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ {
+ FxHashMap::contains_key(self, k)
+ }
+
+ #[inline(always)]
+ fn insert(&mut self, k: K, v: V) -> Option<V> {
+ FxHashMap::insert(self, k, v)
+ }
+
+ #[inline(always)]
+ fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>,
+ {
+ FxHashMap::remove(self, k)
+ }
+
+ #[inline(always)]
+ fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
+ self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
+ }
+
+ #[inline(always)]
+ fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
+ match self.get(&k) {
+ Some(v) => Ok(v),
+ None => {
+ vacant()?;
+ bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
+ match self.entry(k) {
+ Entry::Occupied(e) => Ok(e.into_mut()),
+ Entry::Vacant(e) => {
+ let v = vacant()?;
+ Ok(e.insert(v))
+ }
+ }
+ }
+}
+
+pub(crate) type CompileTimeEvalContext<'mir, 'tcx> =
+ InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum MemoryKind {
+ Heap,
+}
+
+impl fmt::Display for MemoryKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MemoryKind::Heap => write!(f, "heap allocation"),
+ }
+ }
+}
+
+impl interpret::MayLeak for MemoryKind {
+ #[inline(always)]
+ fn may_leak(self) -> bool {
+ match self {
+ MemoryKind::Heap => false,
+ }
+ }
+}
+
+impl interpret::MayLeak for ! {
+ #[inline(always)]
+ fn may_leak(self) -> bool {
+ // `self` is uninhabited
+ self
+ }
+}
+
+impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+ fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ Ok(match (a, b) {
+ // Comparisons between integers are always known.
+ (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
+ // Equality with integers can never be known for sure.
+ (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
+ // FIXME: return `true` for when both sides are the same pointer, *except* that
+ // some things (like functions and vtables) do not have stable addresses
+ // so we need to be careful around them (see e.g. #73722).
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ })
+ }
+
+ fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ Ok(match (a, b) {
+ // Comparisons between integers are always known.
+ (Scalar::Int(_), Scalar::Int(_)) => a != b,
+ // Comparisons of abstract pointers with null pointers are known if the pointer
+ // is in bounds, because if they are in bounds, the pointer can't be null.
+ // Inequality with integers other than null can never be known for sure.
+ (Scalar::Int(int), ptr @ Scalar::Ptr(..))
+ | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
+ int.is_null() && !self.scalar_may_be_null(ptr)?
+ }
+ // FIXME: return `true` for at least some comparisons where we can reliably
+ // determine the result of runtime inequality tests at compile-time.
+ // Examples include comparison of addresses in different static items.
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ })
+ }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
+ compile_time_machine!(<'mir, 'tcx>);
+
+ type MemoryKind = MemoryKind;
+
+ const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
+
+ fn load_mir(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ match instance {
+ ty::InstanceDef::Item(def) => {
+ if ecx.tcx.is_ctfe_mir_available(def.did) {
+ Ok(ecx.tcx.mir_for_ctfe_opt_const_arg(def))
+ } else if ecx.tcx.def_kind(def.did) == DefKind::AssocConst {
+ let guar = ecx.tcx.sess.delay_span_bug(
+ rustc_span::DUMMY_SP,
+ "This is likely a const item that is missing from its impl",
+ );
+ throw_inval!(AlreadyReported(guar));
+ } else {
+ let path = ecx.tcx.def_path_str(def.did);
+ Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
+ .into())
+ }
+ }
+ _ => Ok(ecx.tcx.instance_mir(instance)),
+ }
+ }
+
+ fn find_mir_or_eval_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ _abi: CallAbi,
+ args: &[OpTy<'tcx>],
+ _dest: &PlaceTy<'tcx>,
+ _ret: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind, // unwinding is not supported in consts
+ ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
+ debug!("find_mir_or_eval_fn: {:?}", instance);
+
+ // Only check non-glue functions
+ if let ty::InstanceDef::Item(def) = instance.def {
+ // Execution might have wandered off into other crates, so we cannot do a stability-
+ // sensitive check here. But we can at least rule out functions that are not const
+ // at all.
+ if !ecx.tcx.is_const_fn_raw(def.did) {
+ // allow calling functions inside a trait marked with #[const_trait].
+ if !ecx.tcx.is_const_default_method(def.did) {
+ // We certainly do *not* want to actually call the fn
+ // though, so be sure we return here.
+ throw_unsup_format!("calling non-const function `{}`", instance)
+ }
+ }
+
+ if let Some(new_instance) = ecx.hook_special_const_fn(instance, args)? {
+ // We call another const fn instead.
+ // However, we return the *original* instance to make backtraces work out
+ // (and we hope this does not confuse the FnAbi checks too much).
+ return Ok(Self::find_mir_or_eval_fn(
+ ecx,
+ new_instance,
+ _abi,
+ args,
+ _dest,
+ _ret,
+ _unwind,
+ )?
+ .map(|(body, _instance)| (body, instance)));
+ }
+ }
+ // This is a const fn. Call it.
+ Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
+ }
+
+ fn call_intrinsic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ // Shared intrinsics.
+ if ecx.emulate_intrinsic(instance, args, dest, target)? {
+ return Ok(());
+ }
+ let intrinsic_name = ecx.tcx.item_name(instance.def_id());
+
+ // CTFE-specific intrinsics.
+ let Some(ret) = target else {
+ return Err(ConstEvalErrKind::NeedsRfc(format!(
+ "calling intrinsic `{}`",
+ intrinsic_name
+ ))
+ .into());
+ };
+ match intrinsic_name {
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ let a = ecx.read_immediate(&args[0])?.to_scalar()?;
+ let b = ecx.read_immediate(&args[1])?.to_scalar()?;
+ let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
+ ecx.guaranteed_eq(a, b)?
+ } else {
+ ecx.guaranteed_ne(a, b)?
+ };
+ ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
+ }
+ sym::const_allocate => {
+ let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+
+ let align = match Align::from_bytes(align) {
+ Ok(a) => a,
+ Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+ };
+
+ let ptr = ecx.allocate_ptr(
+ Size::from_bytes(size as u64),
+ align,
+ interpret::MemoryKind::Machine(MemoryKind::Heap),
+ )?;
+ ecx.write_pointer(ptr, dest)?;
+ }
+ sym::const_deallocate => {
+ let ptr = ecx.read_pointer(&args[0])?;
+ let size = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[2])?.to_machine_usize(ecx)?;
+
+ let size = Size::from_bytes(size);
+ let align = match Align::from_bytes(align) {
+ Ok(a) => a,
+ Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+ };
+
+ // If an allocation is created in an another const,
+ // we don't deallocate it.
+ let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr)?;
+ let is_allocated_in_another_const = matches!(
+ ecx.tcx.try_get_global_alloc(alloc_id),
+ Some(interpret::GlobalAlloc::Memory(_))
+ );
+
+ if !is_allocated_in_another_const {
+ ecx.deallocate_ptr(
+ ptr,
+ Some((size, align)),
+ interpret::MemoryKind::Machine(MemoryKind::Heap),
+ )?;
+ }
+ }
+ _ => {
+ return Err(ConstEvalErrKind::NeedsRfc(format!(
+ "calling intrinsic `{}`",
+ intrinsic_name
+ ))
+ .into());
+ }
+ }
+
+ ecx.go_to_block(ret);
+ Ok(())
+ }
+
+ fn assert_panic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ msg: &AssertMessage<'tcx>,
+ _unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::AssertKind::*;
+ // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
+ let eval_to_int =
+ |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+ let err = match msg {
+ BoundsCheck { ref len, ref index } => {
+ let len = eval_to_int(len)?;
+ let index = eval_to_int(index)?;
+ BoundsCheck { len, index }
+ }
+ Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
+ OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
+ DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
+ RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
+ ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
+ ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
+ };
+ Err(ConstEvalErrKind::AssertFailure(err).into())
+ }
+
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
+ Err(ConstEvalErrKind::Abort(msg).into())
+ }
+
+ fn binary_ptr_op(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _bin_op: mir::BinOp,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
+ }
+
+ fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ // The step limit has already been hit in a previous call to `before_terminator`.
+ if ecx.machine.steps_remaining == 0 {
+ return Ok(());
+ }
+
+ ecx.machine.steps_remaining -= 1;
+ if ecx.machine.steps_remaining == 0 {
+ throw_exhaust!(StepLimitReached)
+ }
+
+ Ok(())
+ }
+
+ #[inline(always)]
+ fn expose_ptr(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _ptr: Pointer<AllocId>,
+ ) -> InterpResult<'tcx> {
+ Err(ConstEvalErrKind::NeedsRfc("exposing pointers".to_string()).into())
+ }
+
+ #[inline(always)]
+ fn init_frame_extra(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+ // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
+ if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
+ throw_exhaust!(StackFrameLimitReached)
+ } else {
+ Ok(frame)
+ }
+ }
+
+ #[inline(always)]
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+ &ecx.machine.stack
+ }
+
+ #[inline(always)]
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+ &mut ecx.machine.stack
+ }
+
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ machine: &Self,
+ alloc_id: AllocId,
+ alloc: ConstAllocation<'tcx>,
+ static_def_id: Option<DefId>,
+ is_write: bool,
+ ) -> InterpResult<'tcx> {
+ let alloc = alloc.inner();
+ if is_write {
+ // Write access. These are never allowed, but we give a targeted error message.
+ if alloc.mutability == Mutability::Not {
+ Err(err_ub!(WriteToReadOnly(alloc_id)).into())
+ } else {
+ Err(ConstEvalErrKind::ModifiedGlobal.into())
+ }
+ } else {
+ // Read access. These are usually allowed, with some exceptions.
+ if machine.can_access_statics {
+ // Machine configuration allows us read from anything (e.g., `static` initializer).
+ Ok(())
+ } else if static_def_id.is_some() {
+ // Machine configuration does not allow us to read statics
+ // (e.g., `const` initializer).
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // this check is so important: if we could read statics, we could read pointers
+ // to mutable allocations *inside* statics. These allocations are not themselves
+ // statics, so pointers to them can get around the check in `validity.rs`.
+ Err(ConstEvalErrKind::ConstAccessesStatic.into())
+ } else {
+ // Immutable global, this read is fine.
+ // But make sure we never accept a read from something mutable, that would be
+ // unsound. The reason is that as the content of this allocation may be different
+ // now and at run-time, so if we permit reading now we might return the wrong value.
+ assert_eq!(alloc.mutability, Mutability::Not);
+ Ok(())
+ }
+ }
+ }
+}
+
+// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
+// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
+// at the bottom of this file.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
new file mode 100644
index 000000000..948c33494
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -0,0 +1,163 @@
+// Not in interpret to make sure we do not use private implementation details
+
+use rustc_hir::Mutability;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
+
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta,
+ Scalar,
+};
+
+mod error;
+mod eval_queries;
+mod fn_queries;
+mod machine;
+mod valtrees;
+
+pub use error::*;
+pub use eval_queries::*;
+pub use fn_queries::*;
+pub use machine::*;
+pub(crate) use valtrees::{const_to_valtree_inner, valtree_to_const_value};
+
+pub(crate) fn const_caller_location(
+ tcx: TyCtxt<'_>,
+ (file, line, col): (Symbol, u32, u32),
+) -> ConstValue<'_> {
+ trace!("const_caller_location: {}:{}:{}", file, line, col);
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
+
+ let loc_place = ecx.alloc_caller_location(file, line, col);
+ if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
+ bug!("intern_const_alloc_recursive should not error in this case")
+ }
+ ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx))
+}
+
+// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
+const VALTREE_MAX_NODES: usize = 100000;
+
+pub(crate) enum ValTreeCreationError {
+ NodesOverflow,
+ NonSupportedType,
+ Other,
+}
+pub(crate) type ValTreeCreationResult<'tcx> = Result<ty::ValTree<'tcx>, ValTreeCreationError>;
+
+/// Evaluates a constant and turns it into a type-level constant value.
+pub(crate) fn eval_to_valtree<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+) -> EvalToValTreeResult<'tcx> {
+ let const_alloc = tcx.eval_to_allocation_raw(param_env.and(cid))?;
+
+ // FIXME Need to provide a span to `eval_to_valtree`
+ let ecx = mk_eval_cx(
+ tcx, DUMMY_SP, param_env,
+ // It is absolutely crucial for soundness that
+ // we do not read from static items or other mutable memory.
+ false,
+ );
+ let place = ecx.raw_const_to_mplace(const_alloc).unwrap();
+ debug!(?place);
+
+ let mut num_nodes = 0;
+ let valtree_result = const_to_valtree_inner(&ecx, &place, &mut num_nodes);
+
+ match valtree_result {
+ Ok(valtree) => Ok(Some(valtree)),
+ Err(err) => {
+ let did = cid.instance.def_id();
+ let s = cid.display(tcx);
+ match err {
+ ValTreeCreationError::NodesOverflow => {
+ let msg = format!("maximum number of nodes exceeded in constant {}", &s);
+ let mut diag = match tcx.hir().span_if_local(did) {
+ Some(span) => tcx.sess.struct_span_err(span, &msg),
+ None => tcx.sess.struct_err(&msg),
+ };
+ diag.emit();
+
+ Ok(None)
+ }
+ ValTreeCreationError::NonSupportedType | ValTreeCreationError::Other => Ok(None),
+ }
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn try_destructure_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ val: mir::ConstantKind<'tcx>,
+) -> InterpResult<'tcx, mir::DestructuredMirConstant<'tcx>> {
+ trace!("destructure_mir_constant: {:?}", val);
+ let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+ let op = ecx.mir_const_to_op(&val, None)?;
+
+ // We go to `usize` as we cannot allocate anything bigger anyway.
+ let (field_count, variant, down) = match val.ty().kind() {
+ ty::Array(_, len) => (len.eval_usize(tcx, param_env) as usize, None, op),
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ throw_ub!(Unreachable)
+ }
+ ty::Adt(def, _) => {
+ let variant = ecx.read_discriminant(&op)?.1;
+ let down = ecx.operand_downcast(&op, variant)?;
+ (def.variants()[variant].fields.len(), Some(variant), down)
+ }
+ ty::Tuple(substs) => (substs.len(), None, op),
+ _ => bug!("cannot destructure mir constant {:?}", val),
+ };
+
+ let fields_iter = (0..field_count)
+ .map(|i| {
+ let field_op = ecx.operand_field(&down, i)?;
+ let val = op_to_const(&ecx, &field_op);
+ Ok(mir::ConstantKind::Val(val, field_op.layout.ty))
+ })
+ .collect::<InterpResult<'tcx, Vec<_>>>()?;
+ let fields = tcx.arena.alloc_from_iter(fields_iter);
+
+ Ok(mir::DestructuredMirConstant { variant, fields })
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn deref_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ val: mir::ConstantKind<'tcx>,
+) -> mir::ConstantKind<'tcx> {
+ let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+ let op = ecx.mir_const_to_op(&val, None).unwrap();
+ let mplace = ecx.deref_operand(&op).unwrap();
+ if let Some(alloc_id) = mplace.ptr.provenance {
+ assert_eq!(
+ tcx.global_alloc(alloc_id).unwrap_memory().0.0.mutability,
+ Mutability::Not,
+ "deref_mir_constant cannot be used with mutable allocations as \
+ that could allow pattern matching to observe mutable statics",
+ );
+ }
+
+ let ty = match mplace.meta {
+ MemPlaceMeta::None => mplace.layout.ty,
+ // In case of unsized types, figure out the real type behind.
+ MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
+ ty::Str => bug!("there's no sized equivalent of a `str`"),
+ ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
+ _ => bug!(
+ "type {} should not have metadata, but had {:?}",
+ mplace.layout.ty,
+ mplace.meta
+ ),
+ },
+ };
+
+ mir::ConstantKind::Val(op_to_const(&ecx, &mplace.into()), ty)
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
new file mode 100644
index 000000000..8fff4571d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -0,0 +1,475 @@
+use super::eval_queries::{mk_eval_cx, op_to_const};
+use super::machine::CompileTimeEvalContext;
+use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
+ MemoryKind, PlaceTy, Scalar, ScalarMaybeUninit,
+};
+use crate::interpret::{MPlaceTy, Value};
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_span::source_map::DUMMY_SP;
+use rustc_target::abi::{Align, VariantIdx};
+
+#[instrument(skip(ecx), level = "debug")]
+fn branches<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ n: usize,
+ variant: Option<VariantIdx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let place = match variant {
+ Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
+ None => *place,
+ };
+ let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
+ debug!(?place, ?variant);
+
+ let mut fields = Vec::with_capacity(n);
+ for i in 0..n {
+ let field = ecx.mplace_field(&place, i).unwrap();
+ let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
+ fields.push(Some(valtree));
+ }
+
+ // For enums, we prepend their variant index before the variant's fields so we can figure out
+ // the variant again when just seeing a valtree.
+ let branches = variant
+ .into_iter()
+ .chain(fields.into_iter())
+ .collect::<Option<Vec<_>>>()
+ .expect("should have already checked for errors in ValTree creation");
+
+ // Have to account for ZSTs here
+ if branches.len() == 0 {
+ *num_nodes += 1;
+ }
+
+ Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn slice_branches<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let n = place
+ .len(&ecx.tcx.tcx)
+ .unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
+
+ let mut elems = Vec::with_capacity(n as usize);
+ for i in 0..n {
+ let place_elem = ecx.mplace_index(place, i).unwrap();
+ let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
+ elems.push(valtree);
+ }
+
+ Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(elems)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+pub(crate) fn const_to_valtree_inner<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let ty = place.layout.ty;
+ debug!("ty kind: {:?}", ty.kind());
+
+ if *num_nodes >= VALTREE_MAX_NODES {
+ return Err(ValTreeCreationError::NodesOverflow);
+ }
+
+ match ty.kind() {
+ ty::FnDef(..) => {
+ *num_nodes += 1;
+ Ok(ty::ValTree::zst())
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let Ok(val) = ecx.read_immediate(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ let val = val.to_scalar().unwrap();
+ *num_nodes += 1;
+
+ Ok(ty::ValTree::Leaf(val.assert_int()))
+ }
+
+ // Raw pointers are not allowed in type level constants, as we cannot properly test them for
+ // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
+ // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
+ // agree with runtime equality tests.
+ ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
+
+ ty::Ref(_, _, _) => {
+ let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ debug!(?derefd_place);
+
+ const_to_valtree_inner(ecx, &derefd_place, num_nodes)
+ }
+
+ ty::Str | ty::Slice(_) | ty::Array(_, _) => {
+ slice_branches(ecx, place, num_nodes)
+ }
+ // Trait objects are not allowed in type level constants, as we have no concept for
+ // resolving their backing type, even if we can do that at const eval time. We may
+ // hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
+ // but it is unclear if this is useful.
+ ty::Dynamic(..) => Err(ValTreeCreationError::NonSupportedType),
+
+ ty::Tuple(elem_tys) => {
+ branches(ecx, place, elem_tys.len(), None, num_nodes)
+ }
+
+ ty::Adt(def, _) => {
+ if def.is_union() {
+ return Err(ValTreeCreationError::NonSupportedType);
+ } else if def.variants().is_empty() {
+ bug!("uninhabited types should have errored and never gotten converted to valtree")
+ }
+
+ let Ok((_, variant)) = ecx.read_discriminant(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
+ }
+
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ // FIXME(oli-obk): we could look behind opaque types
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ // FIXME(oli-obk): we can probably encode closures just like structs
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+ }
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn create_mplace_from_layout<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+) -> MPlaceTy<'tcx> {
+ let tcx = ecx.tcx;
+ let param_env = ecx.param_env;
+ let layout = tcx.layout_of(param_env.and(ty)).unwrap();
+ debug!(?layout);
+
+ ecx.allocate(layout, MemoryKind::Stack).unwrap()
+}
+
+// Walks custom DSTs and gets the type of the unsized field and the number of elements
+// in the unsized field.
+fn get_info_on_unsized_field<'tcx>(
+ ty: Ty<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+ tcx: TyCtxt<'tcx>,
+) -> (Ty<'tcx>, usize) {
+ let mut last_valtree = valtree;
+ let tail = tcx.struct_tail_with_normalize(
+ ty,
+ |ty| ty,
+ || {
+ let branches = last_valtree.unwrap_branch();
+ last_valtree = branches[branches.len() - 1];
+ debug!(?branches, ?last_valtree);
+ },
+ );
+ let unsized_inner_ty = match tail.kind() {
+ ty::Slice(t) => *t,
+ ty::Str => tail,
+ _ => bug!("expected Slice or Str"),
+ };
+
+ // Have to adjust type for ty::Str
+ let unsized_inner_ty = match unsized_inner_ty.kind() {
+ ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ _ => unsized_inner_ty,
+ };
+
+ // Get the number of elements in the unsized field
+ let num_elems = last_valtree.unwrap_branch().len();
+
+ (unsized_inner_ty, num_elems)
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn create_pointee_place<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+) -> MPlaceTy<'tcx> {
+ let tcx = ecx.tcx.tcx;
+
+ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) {
+ // We need to create `Allocation`s for custom DSTs
+
+ let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
+ let unsized_inner_ty = match unsized_inner_ty.kind() {
+ ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ _ => unsized_inner_ty,
+ };
+ let unsized_inner_ty_size =
+ tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size();
+ debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems);
+
+ // for custom DSTs only the last field/element is unsized, but we need to also allocate
+ // space for the other fields/elements
+ let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap();
+ let size_of_sized_part = layout.layout.size();
+
+ // Get the size of the memory behind the DST
+ let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap();
+
+ let size = size_of_sized_part.checked_add(dst_size, &tcx).unwrap();
+ let align = Align::from_bytes(size.bytes().next_power_of_two()).unwrap();
+ let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap();
+ debug!(?ptr);
+
+ let place = MPlaceTy::from_aligned_ptr_with_meta(
+ ptr.into(),
+ layout,
+ MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)),
+ );
+ debug!(?place);
+
+ place
+ } else {
+ create_mplace_from_layout(ecx, ty)
+ }
+}
+
+/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
+/// construction has finished.
+// FIXME Merge `valtree_to_const_value` and `valtree_into_mplace` into one function
+#[instrument(skip(tcx), level = "debug")]
+pub fn valtree_to_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ valtree: ty::ValTree<'tcx>,
+) -> ConstValue<'tcx> {
+ // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
+ // (those for constants with type bool, int, uint, float or char).
+ // For all other types we create an `MPlace` and fill that by walking
+ // the `ValTree` and using `place_projection` and `place_field` to
+ // create inner `MPlace`s which are filled recursively.
+ // FIXME Does this need an example?
+
+ let (param_env, ty) = param_env_ty.into_parts();
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+
+ match ty.kind() {
+ ty::FnDef(..) => {
+ assert!(valtree.unwrap_branch().is_empty());
+ ConstValue::ZeroSized
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
+ ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => bug!(
+ "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
+ ),
+ },
+ ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
+ let mut place = match ty.kind() {
+ ty::Ref(_, inner_ty, _) => {
+ // Need to create a place for the pointee to fill for Refs
+ create_pointee_place(&mut ecx, *inner_ty, valtree)
+ }
+ _ => create_mplace_from_layout(&mut ecx, ty),
+ };
+ debug!(?place);
+
+ valtree_into_mplace(&mut ecx, &mut place, valtree);
+ dump_place(&ecx, place.into());
+ intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
+
+ let const_val = match ty.kind() {
+ ty::Ref(_, _, _) => {
+ let ref_place = place.to_ref(&tcx);
+ let imm =
+ ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap());
+
+ op_to_const(&ecx, &imm.into())
+ }
+ _ => op_to_const(&ecx, &place.into()),
+ };
+ debug!(?const_val);
+
+ const_val
+ }
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(_)
+ | ty::Str
+ | ty::Slice(_)
+ | ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()),
+ }
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn valtree_into_mplace<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &mut MPlaceTy<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+) {
+ // This will match on valtree and write the value(s) corresponding to the ValTree
+ // inside the place recursively.
+
+ let tcx = ecx.tcx.tcx;
+ let ty = place.layout.ty;
+
+ match ty.kind() {
+ ty::FnDef(_, _) => {
+ ecx.write_immediate(Immediate::Uninit, &place.into()).unwrap();
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let scalar_int = valtree.unwrap_leaf();
+ debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
+ ecx.write_immediate(
+ Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())),
+ &place.into(),
+ )
+ .unwrap();
+ }
+ ty::Ref(_, inner_ty, _) => {
+ let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
+ debug!(?pointee_place);
+
+ valtree_into_mplace(ecx, &mut pointee_place, valtree);
+ dump_place(ecx, pointee_place.into());
+ intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
+
+ let imm = match inner_ty.kind() {
+ ty::Slice(_) | ty::Str => {
+ let len = valtree.unwrap_branch().len();
+ let len_scalar =
+ ScalarMaybeUninit::Scalar(Scalar::from_machine_usize(len as u64, &tcx));
+
+ Immediate::ScalarPair(
+ ScalarMaybeUninit::from_maybe_pointer((*pointee_place).ptr, &tcx),
+ len_scalar,
+ )
+ }
+ _ => pointee_place.to_ref(&tcx),
+ };
+ debug!(?imm);
+
+ ecx.write_immediate(imm, &place.into()).unwrap();
+ }
+ ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
+ let branches = valtree.unwrap_branch();
+
+ // Need to downcast place for enums
+ let (place_adjusted, branches, variant_idx) = match ty.kind() {
+ ty::Adt(def, _) if def.is_enum() => {
+ // First element of valtree corresponds to variant
+ let scalar_int = branches[0].unwrap_leaf();
+ let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
+ let variant = def.variant(variant_idx);
+ debug!(?variant);
+
+ (
+ place.project_downcast(ecx, variant_idx).unwrap(),
+ &branches[1..],
+ Some(variant_idx),
+ )
+ }
+ _ => (*place, branches, None),
+ };
+ debug!(?place_adjusted, ?branches);
+
+ // Create the places (by indexing into `place`) for the fields and fill
+ // them recursively
+ for (i, inner_valtree) in branches.iter().enumerate() {
+ debug!(?i, ?inner_valtree);
+
+ let mut place_inner = match ty.kind() {
+ ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
+ _ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty())
+ && i == branches.len() - 1 =>
+ {
+ // Note: For custom DSTs we need to manually process the last unsized field.
+ // We created a `Pointer` for the `Allocation` of the complete sized version of
+ // the Adt in `create_pointee_place` and now we fill that `Allocation` with the
+ // values in the ValTree. For the unsized field we have to additionally add the meta
+ // data.
+
+ let (unsized_inner_ty, num_elems) =
+ get_info_on_unsized_field(ty, valtree, tcx);
+ debug!(?unsized_inner_ty);
+
+ let inner_ty = match ty.kind() {
+ ty::Adt(def, substs) => {
+ def.variant(VariantIdx::from_u32(0)).fields[i].ty(tcx, substs)
+ }
+ ty::Tuple(inner_tys) => inner_tys[i],
+ _ => bug!("unexpected unsized type {:?}", ty),
+ };
+
+ let inner_layout =
+ tcx.layout_of(ty::ParamEnv::empty().and(inner_ty)).unwrap();
+ debug!(?inner_layout);
+
+ let offset = place_adjusted.layout.fields.offset(i);
+ place
+ .offset_with_meta(
+ offset,
+ MemPlaceMeta::Meta(Scalar::from_machine_usize(
+ num_elems as u64,
+ &tcx,
+ )),
+ inner_layout,
+ &tcx,
+ )
+ .unwrap()
+ }
+ _ => ecx.mplace_field(&place_adjusted, i).unwrap(),
+ };
+
+ debug!(?place_inner);
+ valtree_into_mplace(ecx, &mut place_inner, *inner_valtree);
+ dump_place(&ecx, place_inner.into());
+ }
+
+ debug!("dump of place_adjusted:");
+ dump_place(ecx, place_adjusted.into());
+
+ if let Some(variant_idx) = variant_idx {
+ // don't forget filling the place with the discriminant of the enum
+ ecx.write_discriminant(variant_idx, &place.into()).unwrap();
+ }
+
+ debug!("dump of place after writing discriminant:");
+ dump_place(ecx, place.into());
+ }
+ _ => bug!("shouldn't have created a ValTree for {:?}", ty),
+ }
+}
+
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
+ trace!("{:?}", ecx.dump_place(*place));
+}
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
new file mode 100644
index 000000000..a463fe7b9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -0,0 +1,89 @@
+use rustc_hir::ConstContext;
+use rustc_macros::SessionDiagnostic;
+use rustc_span::Span;
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::unstable_in_stable)]
+pub(crate) struct UnstableInStable {
+ pub gate: String,
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(
+ const_eval::unstable_sugg,
+ code = "#[rustc_const_unstable(feature = \"...\", issue = \"...\")]\n",
+ applicability = "has-placeholders"
+ )]
+ #[suggestion(
+ const_eval::bypass_sugg,
+ code = "#[rustc_allow_const_fn_unstable({gate})]\n",
+ applicability = "has-placeholders"
+ )]
+ pub attr_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::thread_local_access, code = "E0625")]
+pub(crate) struct NonConstOpErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::static_access, code = "E0013")]
+#[help]
+pub(crate) struct StaticAccessErr {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+ #[note(const_eval::teach_note)]
+ #[help(const_eval::teach_help)]
+ pub teach: Option<()>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::raw_ptr_to_int)]
+#[note]
+#[note(const_eval::note2)]
+pub(crate) struct RawPtrToIntErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::raw_ptr_comparison)]
+#[note]
+pub(crate) struct RawPtrComparisonErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::panic_non_str)]
+pub(crate) struct PanicNonStrErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::mut_deref, code = "E0658")]
+pub(crate) struct MutDerefErr {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::transient_mut_borrow, code = "E0658")]
+pub(crate) struct TransientMutBorrowErr {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::transient_mut_borrow_raw, code = "E0658")]
+pub(crate) struct TransientMutBorrowErrRaw {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
new file mode 100644
index 000000000..c97c31eb9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -0,0 +1,365 @@
+use std::assert_matches::assert_matches;
+use std::convert::TryFrom;
+
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
+use rustc_middle::mir::CastKind;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
+use rustc_target::abi::Integer;
+use rustc_type_ir::sty::TyKind::*;
+
+use super::{
+ util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub fn cast(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ cast_kind: CastKind,
+ cast_ty: Ty<'tcx>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::CastKind::*;
+ // FIXME: In which cases should we trigger UB when the source is uninit?
+ match cast_kind {
+ Pointer(PointerCast::Unsize) => {
+ let cast_ty = self.layout_of(cast_ty)?;
+ self.unsize_into(src, cast_ty, dest)?;
+ }
+
+ PointerExposeAddress => {
+ let src = self.read_immediate(src)?;
+ let res = self.pointer_expose_address_cast(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ PointerFromExposedAddress => {
+ let src = self.read_immediate(src)?;
+ let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ Misc => {
+ let src = self.read_immediate(src)?;
+ let res = self.misc_cast(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer) => {
+ // These are NOPs, but can be wide pointers.
+ let v = self.read_immediate(src)?;
+ self.write_immediate(*v, dest)?;
+ }
+
+ Pointer(PointerCast::ReifyFnPointer) => {
+ // The src operand does not matter, just its type
+ match *src.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ // All reifications must be monomorphic, bail out otherwise.
+ ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ *self.tcx,
+ self.param_env,
+ def_id,
+ substs,
+ )
+ .ok_or_else(|| err_inval!(TooGeneric))?;
+
+ let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ self.write_pointer(fn_ptr, dest)?;
+ }
+ _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+ }
+ }
+
+ Pointer(PointerCast::UnsafeFnPointer) => {
+ let src = self.read_immediate(src)?;
+ match cast_ty.kind() {
+ ty::FnPtr(_) => {
+ // No change to value
+ self.write_immediate(*src, dest)?;
+ }
+ _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+ }
+ }
+
+ Pointer(PointerCast::ClosureFnPointer(_)) => {
+ // The src operand does not matter, just its type
+ match *src.layout.ty.kind() {
+ ty::Closure(def_id, substs) => {
+ // All reifications must be monomorphic, bail out otherwise.
+ ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+ let instance = ty::Instance::resolve_closure(
+ *self.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .ok_or_else(|| err_inval!(TooGeneric))?;
+ let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ self.write_pointer(fn_ptr, dest)?;
+ }
+ _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+ }
+ }
+ }
+ Ok(())
+ }
+
+ pub fn misc_cast(
+ &mut self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ use rustc_type_ir::sty::TyKind::*;
+ trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
+
+ match src.layout.ty.kind() {
+ // Floating point
+ Float(FloatTy::F32) => {
+ return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into());
+ }
+ Float(FloatTy::F64) => {
+ return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into());
+ }
+ // The rest is integer/pointer-"like", including fn ptr casts
+ _ => assert!(
+ src.layout.ty.is_bool()
+ || src.layout.ty.is_char()
+ || src.layout.ty.is_integral()
+ || src.layout.ty.is_any_ptr(),
+ "Unexpected cast from type {:?}",
+ src.layout.ty
+ ),
+ }
+
+ // # First handle non-scalar source values.
+
+ // Handle casting any ptr to raw ptr (might be a fat ptr).
+ if src.layout.ty.is_any_ptr() && cast_ty.is_unsafe_ptr() {
+ let dest_layout = self.layout_of(cast_ty)?;
+ if dest_layout.size == src.layout.size {
+ // Thin or fat pointer that just hast the ptr kind of target type changed.
+ return Ok(**src);
+ } else {
+ // Casting the metadata away from a fat ptr.
+ assert_eq!(src.layout.size, 2 * self.pointer_size());
+ assert_eq!(dest_layout.size, self.pointer_size());
+ assert!(src.layout.ty.is_unsafe_ptr());
+ return match **src {
+ Immediate::ScalarPair(data, _) => Ok(data.check_init()?.into()),
+ Immediate::Scalar(..) => span_bug!(
+ self.cur_span(),
+ "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+ *src,
+ src.layout.ty,
+ cast_ty
+ ),
+ Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ };
+ }
+ }
+
+ // # The remaining source values are scalar and "int-like".
+ let scalar = src.to_scalar()?;
+ Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+ }
+
+ pub fn pointer_expose_address_cast(
+ &mut self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
+ assert!(cast_ty.is_integral());
+
+ let scalar = src.to_scalar()?;
+ let ptr = scalar.to_pointer(self)?;
+ match ptr.into_pointer_or_addr() {
+ Ok(ptr) => M::expose_ptr(self, ptr)?,
+ Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
+ };
+ Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+ }
+
+ pub fn pointer_from_exposed_address_cast(
+ &mut self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ assert!(src.layout.ty.is_integral());
+ assert_matches!(cast_ty.kind(), ty::RawPtr(_));
+
+ // First cast to usize.
+ let scalar = src.to_scalar()?;
+ let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
+ let addr = addr.to_machine_usize(self)?;
+
+ // Then turn address into pointer.
+ let ptr = M::ptr_from_addr_cast(&self, addr)?;
+ Ok(Scalar::from_maybe_pointer(ptr, self).into())
+ }
+
+ pub fn cast_from_int_like(
+ &self,
+ scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
+ src_layout: TyAndLayout<'tcx>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ // Let's make sure v is sign-extended *if* it has a signed type.
+ let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+
+ let v = scalar.to_bits(src_layout.size)?;
+ let v = if signed { self.sign_extend(v, src_layout) } else { v };
+ trace!("cast_from_scalar: {}, {} -> {}", v, src_layout.ty, cast_ty);
+
+ Ok(match *cast_ty.kind() {
+ Int(_) | Uint(_) => {
+ let size = match *cast_ty.kind() {
+ Int(t) => Integer::from_int_ty(self, t).size(),
+ Uint(t) => Integer::from_uint_ty(self, t).size(),
+ _ => bug!(),
+ };
+ let v = size.truncate(v);
+ Scalar::from_uint(v, size)
+ }
+
+ Float(FloatTy::F32) if signed => Scalar::from_f32(Single::from_i128(v as i128).value),
+ Float(FloatTy::F64) if signed => Scalar::from_f64(Double::from_i128(v as i128).value),
+ Float(FloatTy::F32) => Scalar::from_f32(Single::from_u128(v).value),
+ Float(FloatTy::F64) => Scalar::from_f64(Double::from_u128(v).value),
+
+ Char => {
+ // `u8` to `char` cast
+ Scalar::from_u32(u8::try_from(v).unwrap().into())
+ }
+
+ // Casts to bool are not permitted by rustc, no need to handle them here.
+ _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+ })
+ }
+
+ fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::Provenance>
+ where
+ F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
+ {
+ use rustc_type_ir::sty::TyKind::*;
+ match *dest_ty.kind() {
+ // float -> uint
+ Uint(t) => {
+ let size = Integer::from_uint_ty(self, t).size();
+ // `to_u128` is a saturating cast, which is what we need
+ // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+ let v = f.to_u128(size.bits_usize()).value;
+ // This should already fit the bit width
+ Scalar::from_uint(v, size)
+ }
+ // float -> int
+ Int(t) => {
+ let size = Integer::from_int_ty(self, t).size();
+ // `to_i128` is a saturating cast, which is what we need
+ // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+ let v = f.to_i128(size.bits_usize()).value;
+ Scalar::from_int(v, size)
+ }
+ // float -> f32
+ Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
+ // float -> f64
+ Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
+ // That's it.
+ _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+ }
+ }
+
+ fn unsize_into_ptr(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ // The pointee types
+ source_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx> {
+ // A<Struct> -> A<Trait> conversion
+ let (src_pointee_ty, dest_pointee_ty) =
+ self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, cast_ty, self.param_env);
+
+ match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
+ (&ty::Array(_, length), &ty::Slice(_)) => {
+ let ptr = self.read_immediate(src)?.to_scalar()?;
+ // u64 cast is from usize to u64, which is always good
+ let val =
+ Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
+ self.write_immediate(val, dest)
+ }
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ let (old_data, old_vptr) = self.read_immediate(src)?.to_scalar_pair()?;
+ let old_vptr = old_vptr.to_pointer(self)?;
+ let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
+ if old_trait != data_a.principal() {
+ throw_ub_format!("upcast on a pointer whose vtable does not match its type");
+ }
+ let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
+ self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
+ }
+ (_, &ty::Dynamic(ref data, _)) => {
+ // Initial cast from sized to dyn trait
+ let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
+ let ptr = self.read_immediate(src)?.to_scalar()?;
+ let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
+ self.write_immediate(val, dest)
+ }
+
+ _ => {
+ span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
+ }
+ }
+ }
+
+ fn unsize_into(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ cast_ty: TyAndLayout<'tcx>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+ match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
+ (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
+ | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
+ self.unsize_into_ptr(src, dest, *s, *c)
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ // unsizing of generic struct with pointer fields
+ // Example: `Arc<T>` -> `Arc<Trait>`
+ // here we need to increase the size of every &T thin ptr field to a fat ptr
+ for i in 0..src.layout.fields.count() {
+ let cast_ty_field = cast_ty.field(self, i);
+ if cast_ty_field.is_zst() {
+ continue;
+ }
+ let src_field = self.operand_field(src, i)?;
+ let dst_field = self.place_field(dest, i)?;
+ if src_field.layout.ty == cast_ty_field.ty {
+ self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
+ } else {
+ self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
+ }
+ }
+ Ok(())
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "unsize_into: invalid conversion: {:?} -> {:?}",
+ src.layout,
+ dest.layout
+ ),
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
new file mode 100644
index 000000000..150d6589b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -0,0 +1,1019 @@
+use std::cell::Cell;
+use std::fmt;
+use std::mem;
+
+use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpError, InvalidProgramInfo};
+use rustc_middle::ty::layout::{
+ self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
+ TyAndLayout,
+};
+use rustc_middle::ty::{
+ self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
+};
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_session::Limit;
+use rustc_span::{Pos, Span};
+use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout};
+
+use super::{
+ AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
+ MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
+ Scalar, ScalarMaybeUninit, StackPopJump,
+};
+use crate::transform::validate::equal_up_to_regions;
+
+pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ /// Stores the `Machine` instance.
+ ///
+ /// Note: the stack is provided by the machine.
+ pub machine: M,
+
+ /// The results of the type checker, from rustc.
+ /// The span in this is the "root" of the evaluation, i.e., the const
+ /// we are evaluating (if this is CTFE).
+ pub tcx: TyCtxtAt<'tcx>,
+
+ /// Bounds in scope for polymorphic evaluations.
+ pub(crate) param_env: ty::ParamEnv<'tcx>,
+
+ /// The virtual memory system.
+ pub memory: Memory<'mir, 'tcx, M>,
+
+ /// The recursion limit (cached from `tcx.recursion_limit(())`)
+ pub recursion_limit: Limit,
+}
+
+// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
+// boundary and dropped in the other thread, it would exit the span in the other thread.
+struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
+
+impl SpanGuard {
+ /// By default a `SpanGuard` does nothing.
+ fn new() -> Self {
+ Self(tracing::Span::none(), std::marker::PhantomData)
+ }
+
+ /// If a span is entered, we exit the previous span (if any, normally none) and enter the
+ /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
+ /// `Frame` by creating a dummy span to being with and then entering it once the frame has
+ /// been pushed.
+ fn enter(&mut self, span: tracing::Span) {
+ // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
+ // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
+ // can't protect the tracing stack, but that'll just lead to weird logging, no actual
+ // problems.
+ *self = Self(span, std::marker::PhantomData);
+ self.0.with_subscriber(|(id, dispatch)| {
+ dispatch.enter(id);
+ });
+ }
+}
+
+impl Drop for SpanGuard {
+ fn drop(&mut self) {
+ self.0.with_subscriber(|(id, dispatch)| {
+ dispatch.exit(id);
+ });
+ }
+}
+
+/// A stack frame.
+pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
+ ////////////////////////////////////////////////////////////////////////////////
+ // Function and callsite information
+ ////////////////////////////////////////////////////////////////////////////////
+ /// The MIR for the function called on this frame.
+ pub body: &'mir mir::Body<'tcx>,
+
+ /// The def_id and substs of the current function.
+ pub instance: ty::Instance<'tcx>,
+
+ /// Extra data for the machine.
+ pub extra: Extra,
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Return place and locals
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Work to perform when returning from this function.
+ pub return_to_block: StackPopCleanup,
+
+ /// The location where the result of the current stack frame should be written to,
+ /// and its layout in the caller.
+ pub return_place: PlaceTy<'tcx, Prov>,
+
+ /// The list of locals for this stack frame, stored in order as
+ /// `[return_ptr, arguments..., variables..., temporaries...]`.
+ /// The locals are stored as `Option<Value>`s.
+ /// `None` represents a local that is currently dead, while a live local
+ /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
+ ///
+ /// Do *not* access this directly; always go through the machine hook!
+ pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
+
+ /// The span of the `tracing` crate is stored here.
+ /// When the guard is dropped, the span is exited. This gives us
+ /// a full stack trace on all tracing statements.
+ tracing_span: SpanGuard,
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Current position within the function
+ ////////////////////////////////////////////////////////////////////////////////
+ /// If this is `Err`, we are not currently executing any particular statement in
+ /// this frame (can happen e.g. during frame initialization, and during unwinding on
+ /// frames without cleanup code).
+ /// We basically abuse `Result` as `Either`.
+ ///
+ /// Needs to be public because ConstProp does unspeakable things to it.
+ pub loc: Result<mir::Location, Span>,
+}
+
+/// What we store about a frame in an interpreter backtrace.
+#[derive(Debug)]
+pub struct FrameInfo<'tcx> {
+ pub instance: ty::Instance<'tcx>,
+ pub span: Span,
+ pub lint_root: Option<hir::HirId>,
+}
+
+/// Unwind information.
+#[derive(Clone, Copy, Eq, PartialEq, Debug)]
+pub enum StackPopUnwind {
+ /// The cleanup block.
+ Cleanup(mir::BasicBlock),
+ /// No cleanup needs to be done.
+ Skip,
+ /// Unwinding is not allowed (UB).
+ NotAllowed,
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these
+pub enum StackPopCleanup {
+ /// Jump to the next block in the caller, or cause UB if None (that's a function
+ /// that may never return). Also store layout of return place so
+ /// we can validate it at that layout.
+ /// `ret` stores the block we jump to on a normal return, while `unwind`
+ /// stores the block used for cleanup during unwinding.
+ Goto { ret: Option<mir::BasicBlock>, unwind: StackPopUnwind },
+ /// The root frame of the stack: nowhere else to jump to.
+ /// `cleanup` says whether locals are deallocated. Static computation
+ /// wants them leaked to intern what they need (and just throw away
+ /// the entire `ecx` when it is done).
+ Root { cleanup: bool },
+}
+
+/// State of a local variable including a memoized layout
+#[derive(Clone, Debug)]
+pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
+ pub value: LocalValue<Prov>,
+ /// Don't modify if `Some`, this is only used to prevent computing the layout twice
+ pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+/// Current value of a local variable
+#[derive(Copy, Clone, Debug)] // Miri debug-prints these
+pub enum LocalValue<Prov: Provenance = AllocId> {
+ /// This local is not currently alive, and cannot be used at all.
+ Dead,
+ /// A normal, live local.
+ /// Mostly for convenience, we re-use the `Operand` type here.
+ /// This is an optimization over just always having a pointer here;
+ /// we can thus avoid doing an allocation when the local just stores
+ /// immediate values *and* never has its address taken.
+ Live(Operand<Prov>),
+}
+
+impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
+ /// Read the local's value or error if the local is not yet live or not live anymore.
+ ///
+ /// Note: This may only be invoked from the `Machine::access_local` hook and not from
+ /// anywhere else. You may be invalidating machine invariants if you do!
+ #[inline]
+ pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
+ match &self.value {
+ LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
+ LocalValue::Live(val) => Ok(val),
+ }
+ }
+
+ /// Overwrite the local. If the local can be overwritten in place, return a reference
+ /// to do so; otherwise return the `MemPlace` to consult instead.
+ ///
+ /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
+ /// anywhere else. You may be invalidating machine invariants if you do!
+ #[inline]
+ pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
+ match &mut self.value {
+ LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
+ LocalValue::Live(val) => Ok(val),
+ }
+ }
+}
+
+impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
+ pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Prov, Extra> {
+ Frame {
+ body: self.body,
+ instance: self.instance,
+ return_to_block: self.return_to_block,
+ return_place: self.return_place,
+ locals: self.locals,
+ loc: self.loc,
+ extra,
+ tracing_span: self.tracing_span,
+ }
+ }
+}
+
+impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
+ /// Get the current location within the Frame.
+ ///
+ /// If this is `Err`, we are not currently executing any particular statement in
+ /// this frame (can happen e.g. during frame initialization, and during unwinding on
+ /// frames without cleanup code).
+ /// We basically abuse `Result` as `Either`.
+ ///
+ /// Used by priroda.
+ pub fn current_loc(&self) -> Result<mir::Location, Span> {
+ self.loc
+ }
+
+ /// Return the `SourceInfo` of the current instruction.
+ pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
+ self.loc.ok().map(|loc| self.body.source_info(loc))
+ }
+
+ pub fn current_span(&self) -> Span {
+ match self.loc {
+ Ok(loc) => self.body.source_info(loc).span,
+ Err(span) => span,
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for FrameInfo<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ if tcx.def_key(self.instance.def_id()).disambiguated_data.data
+ == DefPathData::ClosureExpr
+ {
+ write!(f, "inside closure")?;
+ } else {
+ write!(f, "inside `{}`", self.instance)?;
+ }
+ if !self.span.is_dummy() {
+ let sm = tcx.sess.source_map();
+ let lo = sm.lookup_char_pos(self.span.lo());
+ write!(
+ f,
+ " at {}:{}:{}",
+ sm.filename_for_diagnostics(&lo.file.name),
+ lo.line,
+ lo.col.to_usize() + 1
+ )?;
+ }
+ Ok(())
+ })
+ }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+ M: Machine<'mir, 'tcx>,
+{
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ *self.tcx
+ }
+}
+
+impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+ M: Machine<'mir, 'tcx>,
+{
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+ type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
+
+ #[inline]
+ fn layout_tcx_at_span(&self) -> Span {
+ // Using the cheap root span for performance.
+ self.tcx.span
+ }
+
+ #[inline]
+ fn handle_layout_err(
+ &self,
+ err: LayoutError<'tcx>,
+ _: Span,
+ _: Ty<'tcx>,
+ ) -> InterpErrorInfo<'tcx> {
+ err_inval!(Layout(err)).into()
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+ type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>;
+
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ _span: Span,
+ _fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> InterpErrorInfo<'tcx> {
+ match err {
+ FnAbiError::Layout(err) => err_inval!(Layout(err)).into(),
+ FnAbiError::AdjustForForeignAbi(err) => {
+ err_inval!(FnAbiAdjustForForeignAbi(err)).into()
+ }
+ }
+ }
+}
+
+/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
+/// This test should be symmetric, as it is primarily about layout compatibility.
+pub(super) fn mir_assign_valid_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ src: TyAndLayout<'tcx>,
+ dest: TyAndLayout<'tcx>,
+) -> bool {
+ // Type-changing assignments can happen when subtyping is used. While
+ // all normal lifetimes are erased, higher-ranked types with their
+ // late-bound lifetimes are still around and can lead to type
+ // differences. So we compare ignoring lifetimes.
+ if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
+ // Make sure the layout is equal, too -- just to be safe. Miri really
+ // needs layout equality. For performance reason we skip this check when
+ // the types are equal. Equal types *can* have different layouts when
+ // enum downcast is involved (as enum variants carry the type of the
+ // enum), but those should never occur in assignments.
+ if cfg!(debug_assertions) || src.ty != dest.ty {
+ assert_eq!(src.layout, dest.layout);
+ }
+ true
+ } else {
+ false
+ }
+}
+
+/// Use the already known layout if given (but sanity check in debug mode),
+/// or compute the layout.
+#[cfg_attr(not(debug_assertions), inline(always))]
+pub(super) fn from_known_layout<'tcx>(
+ tcx: TyCtxtAt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ known_layout: Option<TyAndLayout<'tcx>>,
+ compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
+) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ match known_layout {
+ None => compute(),
+ Some(known_layout) => {
+ if cfg!(debug_assertions) {
+ let check_layout = compute()?;
+ if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
+ span_bug!(
+ tcx.span,
+ "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+ known_layout.ty,
+ check_layout.ty,
+ );
+ }
+ }
+ Ok(known_layout)
+ }
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ root_span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ machine: M,
+ ) -> Self {
+ InterpCx {
+ machine,
+ tcx: tcx.at(root_span),
+ param_env,
+ memory: Memory::new(),
+ recursion_limit: tcx.recursion_limit(),
+ }
+ }
+
+ #[inline(always)]
+ pub fn cur_span(&self) -> Span {
+ // This deliberately does *not* honor `requires_caller_location` since it is used for much
+ // more than just panics.
+ self.stack().last().map_or(self.tcx.span, |f| f.current_span())
+ }
+
+ #[inline(always)]
+ pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
+ M::stack(self)
+ }
+
+ #[inline(always)]
+ pub(crate) fn stack_mut(
+ &mut self,
+ ) -> &mut Vec<Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>> {
+ M::stack_mut(self)
+ }
+
+ #[inline(always)]
+ pub fn frame_idx(&self) -> usize {
+ let stack = self.stack();
+ assert!(!stack.is_empty());
+ stack.len() - 1
+ }
+
+ #[inline(always)]
+ pub fn frame(&self) -> &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+ self.stack().last().expect("no call frames exist")
+ }
+
+ #[inline(always)]
+ pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+ self.stack_mut().last_mut().expect("no call frames exist")
+ }
+
+ #[inline(always)]
+ pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+ self.frame().body
+ }
+
+ #[inline(always)]
+ pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+ assert!(ty.abi.is_signed());
+ ty.size.sign_extend(value)
+ }
+
+ #[inline(always)]
+ pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+ ty.size.truncate(value)
+ }
+
+ #[inline]
+ pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx, self.param_env)
+ }
+
+ pub fn load_mir(
+ &self,
+ instance: ty::InstanceDef<'tcx>,
+ promoted: Option<mir::Promoted>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ let def = instance.with_opt_param();
+ trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
+ let body = if let Some(promoted) = promoted {
+ &self.tcx.promoted_mir_opt_const_arg(def)[promoted]
+ } else {
+ M::load_mir(self, instance)?
+ };
+ // do not continue if typeck errors occurred (can only occur in local crate)
+ if let Some(err) = body.tainted_by_errors {
+ throw_inval!(AlreadyReported(err));
+ }
+ Ok(body)
+ }
+
+ /// Call this on things you got out of the MIR (so it is as generic as the current
+ /// stack frame), to bring it into the proper environment for this interpreter.
+ pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+ &self,
+ value: T,
+ ) -> Result<T, InterpError<'tcx>> {
+ self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
+ }
+
+ /// Call this on things you got out of the MIR (so it is as generic as the provided
+ /// stack frame), to bring it into the proper environment for this interpreter.
+ pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+ &self,
+ frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+ value: T,
+ ) -> Result<T, InterpError<'tcx>> {
+ frame
+ .instance
+ .try_subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, value)
+ .map_err(|e| {
+ self.tcx.sess.delay_span_bug(
+ self.cur_span(),
+ format!("failed to normalize {}", e.get_type_for_failure()).as_str(),
+ );
+
+ InterpError::InvalidProgram(InvalidProgramInfo::TooGeneric)
+ })
+ }
+
+ /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
+ pub(super) fn resolve(
+ &self,
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
+ trace!("resolve: {:?}, {:#?}", def, substs);
+ trace!("param_env: {:#?}", self.param_env);
+ trace!("substs: {:#?}", substs);
+ match ty::Instance::resolve_opt_const_arg(*self.tcx, self.param_env, def, substs) {
+ Ok(Some(instance)) => Ok(instance),
+ Ok(None) => throw_inval!(TooGeneric),
+
+ // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
+ Err(error_reported) => throw_inval!(AlreadyReported(error_reported)),
+ }
+ }
+
+ #[inline(always)]
+ pub fn layout_of_local(
+ &self,
+ frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+ local: mir::Local,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ // `const_prop` runs into this with an invalid (empty) frame, so we
+ // have to support that case (mostly by skipping all caching).
+ match frame.locals.get(local).and_then(|state| state.layout.get()) {
+ None => {
+ let layout = from_known_layout(self.tcx, self.param_env, layout, || {
+ let local_ty = frame.body.local_decls[local].ty;
+ let local_ty =
+ self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
+ self.layout_of(local_ty)
+ })?;
+ if let Some(state) = frame.locals.get(local) {
+ // Layouts of locals are requested a lot, so we cache them.
+ state.layout.set(Some(layout));
+ }
+ Ok(layout)
+ }
+ Some(layout) => Ok(layout),
+ }
+ }
+
+ /// Returns the actual dynamic size and alignment of the place at the given type.
+ /// Only the "meta" (metadata) part of the place matters.
+ /// This can fail to provide an answer for extern types.
+ pub(super) fn size_and_align_of(
+ &self,
+ metadata: &MemPlaceMeta<M::Provenance>,
+ layout: &TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+ if !layout.is_unsized() {
+ return Ok(Some((layout.size, layout.align.abi)));
+ }
+ match layout.ty.kind() {
+ ty::Adt(..) | ty::Tuple(..) => {
+ // First get the size of all statically known fields.
+ // Don't use type_of::sizing_type_of because that expects t to be sized,
+ // and it also rounds up to alignment, which we want to avoid,
+ // as the unsized field's alignment could be smaller.
+ assert!(!layout.ty.is_simd());
+ assert!(layout.fields.count() > 0);
+ trace!("DST layout: {:?}", layout);
+
+ let sized_size = layout.fields.offset(layout.fields.count() - 1);
+ let sized_align = layout.align.abi;
+ trace!(
+ "DST {} statically sized prefix size: {:?} align: {:?}",
+ layout.ty,
+ sized_size,
+ sized_align
+ );
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field). Can't have foreign types here, how would we
+ // adjust alignment and size for them?
+ let field = layout.field(self, layout.fields.count() - 1);
+ let Some((unsized_size, unsized_align)) = self.size_and_align_of(metadata, &field)? else {
+ // A field with an extern type. We don't know the actual dynamic size
+ // or the alignment.
+ return Ok(None);
+ };
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = sized_size + unsized_size; // `Size` addition
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let align = sized_align.max(unsized_align);
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ let size = size.align_to(align);
+
+ // Check if this brought us over the size limit.
+ if size > self.max_size_of_val() {
+ throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
+ }
+ Ok(Some((size, align)))
+ }
+ ty::Dynamic(..) => {
+ let vtable = metadata.unwrap_meta().to_pointer(self)?;
+ // Read size and align from vtable (already checks size).
+ Ok(Some(self.get_vtable_size_and_align(vtable)?))
+ }
+
+ ty::Slice(_) | ty::Str => {
+ let len = metadata.unwrap_meta().to_machine_usize(self)?;
+ let elem = layout.field(self, 0);
+
+ // Make sure the slice is not too big.
+ let size = elem.size.bytes().saturating_mul(len); // we rely on `max_size_of_val` being smaller than `u64::MAX`.
+ let size = Size::from_bytes(size);
+ if size > self.max_size_of_val() {
+ throw_ub!(InvalidMeta("slice is bigger than largest supported object"));
+ }
+ Ok(Some((size, elem.align.abi)))
+ }
+
+ ty::Foreign(_) => Ok(None),
+
+ _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+ }
+ }
+ #[inline]
+ pub fn size_and_align_of_mplace(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+ self.size_and_align_of(&mplace.meta, &mplace.layout)
+ }
+
+ #[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
+ pub fn push_stack_frame(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ return_place: &PlaceTy<'tcx, M::Provenance>,
+ return_to_block: StackPopCleanup,
+ ) -> InterpResult<'tcx> {
+ trace!("body: {:#?}", body);
+ // first push a stack frame so we have access to the local substs
+ let pre_frame = Frame {
+ body,
+ loc: Err(body.span), // Span used for errors caused during preamble.
+ return_to_block,
+ return_place: return_place.clone(),
+ // empty local array, we fill it in below, after we are inside the stack frame and
+ // all methods actually know about the frame
+ locals: IndexVec::new(),
+ instance,
+ tracing_span: SpanGuard::new(),
+ extra: (),
+ };
+ let frame = M::init_frame_extra(self, pre_frame)?;
+ self.stack_mut().push(frame);
+
+ // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
+ for const_ in &body.required_consts {
+ let span = const_.span;
+ let const_ =
+ self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal)?;
+ self.mir_const_to_op(&const_, None).map_err(|err| {
+ // If there was an error, set the span of the current frame to this constant.
+ // Avoiding doing this when evaluation succeeds.
+ self.frame_mut().loc = Err(span);
+ err
+ })?;
+ }
+
+ // Most locals are initially dead.
+ let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
+ let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
+
+ // Now mark those locals as live that have no `Storage*` annotations.
+ let always_live = always_storage_live_locals(self.body());
+ for local in locals.indices() {
+ if always_live.contains(local) {
+ locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ }
+ }
+ // done
+ self.frame_mut().locals = locals;
+ M::after_stack_push(self)?;
+ self.frame_mut().loc = Ok(mir::Location::START);
+
+ let span = info_span!("frame", "{}", instance);
+ self.frame_mut().tracing_span.enter(span);
+
+ Ok(())
+ }
+
+ /// Jump to the given block.
+ #[inline]
+ pub fn go_to_block(&mut self, target: mir::BasicBlock) {
+ self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
+ }
+
+ /// *Return* to the given `target` basic block.
+ /// Do *not* use for unwinding! Use `unwind_to_block` instead.
+ ///
+ /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
+ pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
+ if let Some(target) = target {
+ self.go_to_block(target);
+ Ok(())
+ } else {
+ throw_ub!(Unreachable)
+ }
+ }
+
+ /// *Unwind* to the given `target` basic block.
+ /// Do *not* use for returning! Use `return_to_block` instead.
+ ///
+ /// If `target` is `StackPopUnwind::Skip`, that indicates the function does not need cleanup
+ /// during unwinding, and we will just keep propagating that upwards.
+ ///
+ /// If `target` is `StackPopUnwind::NotAllowed`, that indicates the function does not allow
+ /// unwinding, and doing so is UB.
+ pub fn unwind_to_block(&mut self, target: StackPopUnwind) -> InterpResult<'tcx> {
+ self.frame_mut().loc = match target {
+ StackPopUnwind::Cleanup(block) => Ok(mir::Location { block, statement_index: 0 }),
+ StackPopUnwind::Skip => Err(self.frame_mut().body.span),
+ StackPopUnwind::NotAllowed => {
+ throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
+ }
+ };
+ Ok(())
+ }
+
+ /// Pops the current frame from the stack, deallocating the
+ /// memory for allocated locals.
+ ///
+ /// If `unwinding` is `false`, then we are performing a normal return
+ /// from a function. In this case, we jump back into the frame of the caller,
+ /// and continue execution as normal.
+ ///
+ /// If `unwinding` is `true`, then we are in the middle of a panic,
+ /// and need to unwind this frame. In this case, we jump to the
+ /// `cleanup` block for the function, which is responsible for running
+ /// `Drop` impls for any locals that have been initialized at this point.
+ /// The cleanup block ends with a special `Resume` terminator, which will
+ /// cause us to continue unwinding.
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
+ info!(
+ "popping stack frame ({})",
+ if unwinding { "during unwinding" } else { "returning from function" }
+ );
+
+ // Check `unwinding`.
+ assert_eq!(
+ unwinding,
+ match self.frame().loc {
+ Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
+ Err(_) => true,
+ }
+ );
+ if unwinding && self.frame_idx() == 0 {
+ throw_ub_format!("unwinding past the topmost frame of the stack");
+ }
+
+ // Copy return value. Must of course happen *before* we deallocate the locals.
+ let copy_ret_result = if !unwinding {
+ let op = self
+ .local_to_op(self.frame(), mir::RETURN_PLACE, None)
+ .expect("return place should always be live");
+ let dest = self.frame().return_place.clone();
+ let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
+ trace!("return value: {:?}", self.dump_place(*dest));
+ // We delay actually short-circuiting on this error until *after* the stack frame is
+ // popped, since we want this error to be attributed to the caller, whose type defines
+ // this transmute.
+ err
+ } else {
+ Ok(())
+ };
+
+ // Cleanup: deallocate locals.
+ // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
+ // We do this while the frame is still on the stack, so errors point to the callee.
+ let return_to_block = self.frame().return_to_block;
+ let cleanup = match return_to_block {
+ StackPopCleanup::Goto { .. } => true,
+ StackPopCleanup::Root { cleanup, .. } => cleanup,
+ };
+ if cleanup {
+ // We need to take the locals out, since we need to mutate while iterating.
+ let locals = mem::take(&mut self.frame_mut().locals);
+ for local in &locals {
+ self.deallocate_local(local.value)?;
+ }
+ }
+
+ // All right, now it is time to actually pop the frame.
+ // Note that its locals are gone already, but that's fine.
+ let frame =
+ self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
+ // Report error from return value copy, if any.
+ copy_ret_result?;
+
+ // If we are not doing cleanup, also skip everything else.
+ if !cleanup {
+ assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
+ assert!(!unwinding, "tried to skip cleanup during unwinding");
+ // Skip machine hook.
+ return Ok(());
+ }
+ if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
+ // The hook already did everything.
+ return Ok(());
+ }
+
+ // Normal return, figure out where to jump.
+ if unwinding {
+ // Follow the unwind edge.
+ let unwind = match return_to_block {
+ StackPopCleanup::Goto { unwind, .. } => unwind,
+ StackPopCleanup::Root { .. } => {
+ panic!("encountered StackPopCleanup::Root when unwinding!")
+ }
+ };
+ self.unwind_to_block(unwind)
+ } else {
+ // Follow the normal return edge.
+ match return_to_block {
+ StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
+ StackPopCleanup::Root { .. } => {
+ assert!(
+ self.stack().is_empty(),
+ "only the topmost frame can have StackPopCleanup::Root"
+ );
+ Ok(())
+ }
+ }
+ }
+ }
+
+ /// Mark a storage as live, killing the previous content.
+ pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+ assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+ trace!("{:?} is now live", local);
+
+ let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ // StorageLive expects the local to be dead, and marks it live.
+ let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
+ if !matches!(old, LocalValue::Dead) {
+ throw_ub_format!("StorageLive on a local that was already live");
+ }
+ Ok(())
+ }
+
+ pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+ assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
+ trace!("{:?} is now dead", local);
+
+ // It is entirely okay for this local to be already dead (at least that's how we currently generate MIR)
+ let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
+ self.deallocate_local(old)?;
+ Ok(())
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
+ if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
+ // All locals have a backing allocation, even if the allocation is empty
+ // due to the local having ZST type. Hence we can `unwrap`.
+ trace!(
+ "deallocating local {:?}: {:?}",
+ local,
+ // Locals always have a `alloc_id` (they are never the result of a int2ptr).
+ self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap())
+ );
+ self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
+ };
+ Ok(())
+ }
+
+ pub fn eval_to_allocation(
+ &self,
+ gid: GlobalId<'tcx>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
+ // and thus don't care about the parameter environment. While we could just use
+ // `self.param_env`, that would mean we invoke the query to evaluate the static
+ // with different parameter environments, thus causing the static to be evaluated
+ // multiple times.
+ let param_env = if self.tcx.is_static(gid.instance.def_id()) {
+ ty::ParamEnv::reveal_all()
+ } else {
+ self.param_env
+ };
+ let param_env = param_env.with_const();
+ // Use a precise span for better cycle errors.
+ let val = self.tcx.at(self.cur_span()).eval_to_allocation_raw(param_env.and(gid))?;
+ self.raw_const_to_mplace(val)
+ }
+
+ #[must_use]
+ pub fn dump_place(&self, place: Place<M::Provenance>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+ PlacePrinter { ecx: self, place }
+ }
+
+ #[must_use]
+ pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+ let mut frames = Vec::new();
+ // This deliberately does *not* honor `requires_caller_location` since it is used for much
+ // more than just panics.
+ for frame in self.stack().iter().rev() {
+ let lint_root = frame.current_source_info().and_then(|source_info| {
+ match &frame.body.source_scopes[source_info.scope].local_data {
+ mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
+ mir::ClearCrossCrate::Clear => None,
+ }
+ });
+ let span = frame.current_span();
+
+ frames.push(FrameInfo { span, instance: frame.instance, lint_root });
+ }
+ trace!("generate stacktrace: {:#?}", frames);
+ frames
+ }
+}
+
+#[doc(hidden)]
+/// Helper struct for the `dump_place` function.
+pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ ecx: &'a InterpCx<'mir, 'tcx, M>,
+ place: Place<M::Provenance>,
+}
+
+impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
+ for PlacePrinter<'a, 'mir, 'tcx, M>
+{
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self.place {
+ Place::Local { frame, local } => {
+ let mut allocs = Vec::new();
+ write!(fmt, "{:?}", local)?;
+ if frame != self.ecx.frame_idx() {
+ write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
+ }
+ write!(fmt, ":")?;
+
+ match self.ecx.stack()[frame].locals[local].value {
+ LocalValue::Dead => write!(fmt, " is dead")?,
+ LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => {
+ write!(fmt, " is uninitialized")?
+ }
+ LocalValue::Live(Operand::Indirect(mplace)) => {
+ write!(
+ fmt,
+ " by {} ref {:?}:",
+ match mplace.meta {
+ MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+ MemPlaceMeta::None => String::new(),
+ },
+ mplace.ptr,
+ )?;
+ allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
+ }
+ LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
+ write!(fmt, " {:?}", val)?;
+ if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
+ allocs.push(ptr.provenance.get_alloc_id());
+ }
+ }
+ LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
+ write!(fmt, " ({:?}, {:?})", val1, val2)?;
+ if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
+ allocs.push(ptr.provenance.get_alloc_id());
+ }
+ if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
+ allocs.push(ptr.provenance.get_alloc_id());
+ }
+ }
+ }
+
+ write!(fmt, ": {:?}", self.ecx.dump_allocs(allocs.into_iter().flatten().collect()))
+ }
+ Place::Ptr(mplace) => match mplace.ptr.provenance.and_then(Provenance::get_alloc_id) {
+ Some(alloc_id) => {
+ write!(fmt, "by ref {:?}: {:?}", mplace.ptr, self.ecx.dump_alloc(alloc_id))
+ }
+ ptr => write!(fmt, " integral by ref: {:?}", ptr),
+ },
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
new file mode 100644
index 000000000..376b8872c
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -0,0 +1,486 @@
+//! This module specifies the type based interner for constants.
+//!
+//! After a const evaluation has computed a value, before we destroy the const evaluator's session
+//! memory, we need to extract all memory allocations to the global memory pool so they stay around.
+//!
+//! In principle, this is not very complicated: we recursively walk the final value, follow all the
+//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
+//! is picking the right mutability for the allocations in a `static` initializer: we want to make
+//! as many allocations as possible immutable so LLVM can put them into read-only memory. At the
+//! same time, we need to make memory that could be mutated by the program mutable to avoid
+//! incorrect compilations. To achieve this, we do a type-based traversal of the final value,
+//! tracking mutable and shared references and `UnsafeCell` to determine the current mutability.
+//! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be
+//! always immutable. At least for `const` however we use this opportunity to reject any `const`
+//! that contains allocations whose mutability we cannot identify.)
+
+use super::validity::RefTracking;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
+
+use rustc_ast::Mutability;
+
+use super::{
+ AllocId, Allocation, ConstAllocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy,
+ ValueVisitor,
+};
+use crate::const_eval;
+
+pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
+ 'mir,
+ 'tcx,
+ MemoryKind = T,
+ Provenance = AllocId,
+ ExtraFnVal = !,
+ FrameExtra = (),
+ AllocExtra = (),
+ MemoryMap = FxHashMap<AllocId, (MemoryKind<T>, Allocation)>,
+>;
+
+struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
+ /// The ectx from which we intern.
+ ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+ /// Previously encountered safe references.
+ ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
+ /// A list of all encountered allocations. After type-based interning, we traverse this list to
+ /// also intern allocations that are only referenced by a raw pointer or inside a union.
+ leftover_allocations: &'rt mut FxHashSet<AllocId>,
+ /// The root kind of the value that we're looking at. This field is never mutated for a
+ /// particular allocation. It is primarily used to make as many allocations as possible
+ /// read-only so LLVM can place them in const memory.
+ mode: InternMode,
+ /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
+ /// the intern mode of references we encounter.
+ inside_unsafe_cell: bool,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+enum InternMode {
+ /// A static and its current mutability. Below shared references inside a `static mut`,
+ /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
+ /// is *mutable*.
+ Static(hir::Mutability),
+ /// A `const`.
+ Const,
+}
+
+/// Signalling data structure to ensure we don't recurse
+/// into the memory of other constants or statics
+struct IsStaticOrFn;
+
+/// Intern an allocation without looking at its children.
+/// `mode` is the mode of the environment where we found this pointer.
+/// `mutability` is the mutability of the place to be interned; even if that says
+/// `immutable` things might become mutable if `ty` is not frozen.
+/// `ty` can be `None` if there is no potential interior mutability
+/// to account for (e.g. for vtables).
+fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
+ ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+ leftover_allocations: &'rt mut FxHashSet<AllocId>,
+ alloc_id: AllocId,
+ mode: InternMode,
+ ty: Option<Ty<'tcx>>,
+) -> Option<IsStaticOrFn> {
+ trace!("intern_shallow {:?} with {:?}", alloc_id, mode);
+ // remove allocation
+ let tcx = ecx.tcx;
+ let Some((kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
+ // Pointer not found in local memory map. It is either a pointer to the global
+ // map, or dangling.
+ // If the pointer is dangling (neither in local nor global memory), we leave it
+ // to validation to error -- it has the much better error messages, pointing out where
+ // in the value the dangling reference lies.
+ // The `delay_span_bug` ensures that we don't forget such a check in validation.
+ if tcx.try_get_global_alloc(alloc_id).is_none() {
+ tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer");
+ }
+ // treat dangling pointers like other statics
+ // just to stop trying to recurse into them
+ return Some(IsStaticOrFn);
+ };
+ // This match is just a canary for future changes to `MemoryKind`, which most likely need
+ // changes in this function.
+ match kind {
+ MemoryKind::Stack
+ | MemoryKind::Machine(const_eval::MemoryKind::Heap)
+ | MemoryKind::CallerLocation => {}
+ }
+ // Set allocation mutability as appropriate. This is used by LLVM to put things into
+ // read-only memory, and also by Miri when evaluating other globals that
+ // access this one.
+ if let InternMode::Static(mutability) = mode {
+ // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
+ // no interior mutability.
+ let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
+ // For statics, allocation mutability is the combination of place mutability and
+ // type mutability.
+ // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
+ let immutable = mutability == Mutability::Not && frozen;
+ if immutable {
+ alloc.mutability = Mutability::Not;
+ } else {
+ // Just making sure we are not "upgrading" an immutable allocation to mutable.
+ assert_eq!(alloc.mutability, Mutability::Mut);
+ }
+ } else {
+ // No matter what, *constants are never mutable*. Mutating them is UB.
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // immutability is so important.
+
+ // Validation will ensure that there is no `UnsafeCell` on an immutable allocation.
+ alloc.mutability = Mutability::Not;
+ };
+ // link the alloc id to the actual allocation
+ leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
+ let alloc = tcx.intern_const_alloc(alloc);
+ tcx.set_alloc_id_memory(alloc_id, alloc);
+ None
+}
+
+impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
+ InternVisitor<'rt, 'mir, 'tcx, M>
+{
+ fn intern_shallow(
+ &mut self,
+ alloc_id: AllocId,
+ mode: InternMode,
+ ty: Option<Ty<'tcx>>,
+ ) -> Option<IsStaticOrFn> {
+ intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty)
+ }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
+ ValueVisitor<'mir, 'tcx, M> for InternVisitor<'rt, 'mir, 'tcx, M>
+{
+ type V = MPlaceTy<'tcx>;
+
+ #[inline(always)]
+ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+ &self.ecx
+ }
+
+ fn visit_aggregate(
+ &mut self,
+ mplace: &MPlaceTy<'tcx>,
+ fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ // We want to walk the aggregate to look for references to intern. While doing that we
+ // also need to take special care of interior mutability.
+ //
+ // As an optimization, however, if the allocation does not contain any references: we don't
+ // need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
+ let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
+ // ZSTs cannot contain pointers, we can avoid the interning walk.
+ if mplace.layout.is_zst() {
+ return Ok(false);
+ }
+
+ // Now, check whether this allocation could contain references.
+ //
+ // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
+ // to avoid could be expensive: on the potentially larger types, arrays and slices,
+ // rather than on all aggregates unconditionally.
+ if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
+ let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+ // We do the walk if we can't determine the size of the mplace: we may be
+ // dealing with extern types here in the future.
+ return Ok(true);
+ };
+
+ // If there are no relocations in this allocation, it does not contain references
+ // that point to another allocation, and we can avoid the interning walk.
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+ if !alloc.has_relocations() {
+ return Ok(false);
+ }
+ } else {
+ // We're encountering a ZST here, and can avoid the walk as well.
+ return Ok(false);
+ }
+ }
+
+ // In the general case, we do the walk.
+ Ok(true)
+ };
+
+ // If this allocation contains no references to intern, we avoid the potentially costly
+ // walk.
+ //
+ // We can do this before the checks for interior mutability below, because only references
+ // are relevant in that situation, and we're checking if there are any here.
+ if !is_walk_needed(mplace)? {
+ return Ok(());
+ }
+
+ if let Some(def) = mplace.layout.ty.ty_adt_def() {
+ if def.is_unsafe_cell() {
+ // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+ // References we encounter inside here are interned as pointing to mutable
+ // allocations.
+ // Remember the `old` value to handle nested `UnsafeCell`.
+ let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+ let walked = self.walk_aggregate(mplace, fields);
+ self.inside_unsafe_cell = old;
+ return walked;
+ }
+ }
+
+ self.walk_aggregate(mplace, fields)
+ }
+
+ fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
+ // Handle Reference types, as these are the only relocations supported by const eval.
+ // Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
+ let tcx = self.ecx.tcx;
+ let ty = mplace.layout.ty;
+ if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
+ let value = self.ecx.read_immediate(&mplace.into())?;
+ let mplace = self.ecx.ref_to_mplace(&value)?;
+ assert_eq!(mplace.layout.ty, referenced_ty);
+ // Handle trait object vtables.
+ if let ty::Dynamic(..) =
+ tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
+ {
+ let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
+ if let Some(alloc_id) = ptr.provenance {
+ // Explicitly choose const mode here, since vtables are immutable, even
+ // if the reference of the fat pointer is mutable.
+ self.intern_shallow(alloc_id, InternMode::Const, None);
+ } else {
+ // Validation will error (with a better message) on an invalid vtable pointer.
+ // Let validation show the error message, but make sure it *does* error.
+ tcx.sess
+ .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers");
+ }
+ }
+ // Check if we have encountered this pointer+layout combination before.
+ // Only recurse for allocation-backed pointers.
+ if let Some(alloc_id) = mplace.ptr.provenance {
+ // Compute the mode with which we intern this. Our goal here is to make as many
+ // statics as we can immutable so they can be placed in read-only memory by LLVM.
+ let ref_mode = match self.mode {
+ InternMode::Static(mutbl) => {
+ // In statics, merge outer mutability with reference mutability and
+ // take into account whether we are in an `UnsafeCell`.
+
+ // The only way a mutable reference actually works as a mutable reference is
+ // by being in a `static mut` directly or behind another mutable reference.
+ // If there's an immutable reference or we are inside a `static`, then our
+ // mutable reference is equivalent to an immutable one. As an example:
+ // `&&mut Foo` is semantically equivalent to `&&Foo`
+ match ref_mutability {
+ _ if self.inside_unsafe_cell => {
+ // Inside an `UnsafeCell` is like inside a `static mut`, the "outer"
+ // mutability does not matter.
+ InternMode::Static(ref_mutability)
+ }
+ Mutability::Not => {
+ // A shared reference, things become immutable.
+ // We do *not* consider `freeze` here: `intern_shallow` considers
+ // `freeze` for the actual mutability of this allocation; the intern
+ // mode for references contained in this allocation is tracked more
+ // precisely when traversing the referenced data (by tracking
+ // `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still
+ // has the left inner reference interned into a read-only
+ // allocation.
+ InternMode::Static(Mutability::Not)
+ }
+ Mutability::Mut => {
+ // Mutable reference.
+ InternMode::Static(mutbl)
+ }
+ }
+ }
+ InternMode::Const => {
+ // Ignore `UnsafeCell`, everything is immutable. Validity does some sanity
+ // checking for mutable references that we encounter -- they must all be
+ // ZST.
+ InternMode::Const
+ }
+ };
+ match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
+ // No need to recurse, these are interned already and statics may have
+ // cycles, so we don't want to recurse there
+ Some(IsStaticOrFn) => {}
+ // intern everything referenced by this value. The mutability is taken from the
+ // reference. It is checked above that mutable references only happen in
+ // `static mut`
+ None => self.ref_tracking.track((mplace, ref_mode), || ()),
+ }
+ }
+ Ok(())
+ } else {
+ // Not a reference -- proceed recursively.
+ self.walk_value(mplace)
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+pub enum InternKind {
+ /// The `mutability` of the static, ignoring the type which may have interior mutability.
+ Static(hir::Mutability),
+ Constant,
+ Promoted,
+}
+
+/// Intern `ret` and everything it references.
+///
+/// This *cannot raise an interpreter error*. Doing so is left to validation, which
+/// tracks where in the value we are and thus can show much better error messages.
+/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
+/// are hard errors.
+#[tracing::instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_recursive<
+ 'mir,
+ 'tcx: 'mir,
+ M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>,
+>(
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ intern_kind: InternKind,
+ ret: &MPlaceTy<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let tcx = ecx.tcx;
+ let base_intern_mode = match intern_kind {
+ InternKind::Static(mutbl) => InternMode::Static(mutbl),
+ // `Constant` includes array lengths.
+ InternKind::Constant | InternKind::Promoted => InternMode::Const,
+ };
+
+ // Type based interning.
+ // `ref_tracking` tracks typed references we have already interned and still need to crawl for
+ // more typed information inside them.
+ // `leftover_allocations` collects *all* allocations we see, because some might not
+ // be available in a typed way. They get interned at the end.
+ let mut ref_tracking = RefTracking::empty();
+ let leftover_allocations = &mut FxHashSet::default();
+
+ // start with the outermost allocation
+ intern_shallow(
+ ecx,
+ leftover_allocations,
+ // The outermost allocation must exist, because we allocated it with
+ // `Memory::allocate`.
+ ret.ptr.provenance.unwrap(),
+ base_intern_mode,
+ Some(ret.layout.ty),
+ );
+
+ ref_tracking.track((*ret, base_intern_mode), || ());
+
+ while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
+ let res = InternVisitor {
+ ref_tracking: &mut ref_tracking,
+ ecx,
+ mode,
+ leftover_allocations,
+ inside_unsafe_cell: false,
+ }
+ .visit_value(&mplace);
+ // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
+ // references are "leftover"-interned, and later validation will show a proper error
+ // and point at the right part of the value causing the problem.
+ match res {
+ Ok(()) => {}
+ Err(error) => {
+ ecx.tcx.sess.delay_span_bug(
+ ecx.tcx.span,
+ &format!(
+ "error during interning should later cause validation failure: {}",
+ error
+ ),
+ );
+ }
+ }
+ }
+
+ // Intern the rest of the allocations as mutable. These might be inside unions, padding, raw
+ // pointers, ... So we can't intern them according to their type rules
+
+ let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect();
+ debug!(?todo);
+ debug!("dead_alloc_map: {:#?}", ecx.memory.dead_alloc_map);
+ while let Some(alloc_id) = todo.pop() {
+ if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) {
+ // We can't call the `intern_shallow` method here, as its logic is tailored to safe
+ // references and a `leftover_allocations` set (where we only have a todo-list here).
+ // So we hand-roll the interning logic here again.
+ match intern_kind {
+ // Statics may contain mutable allocations even behind relocations.
+ // Even for immutable statics it would be ok to have mutable allocations behind
+ // raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
+ InternKind::Static(_) => {}
+ // Raw pointers in promoteds may only point to immutable things so we mark
+ // everything as immutable.
+ // It is UB to mutate through a raw pointer obtained via an immutable reference:
+ // Since all references and pointers inside a promoted must by their very definition
+ // be created from an immutable reference (and promotion also excludes interior
+ // mutability), mutating through them would be UB.
+ // There's no way we can check whether the user is using raw pointers correctly,
+ // so all we can do is mark this as immutable here.
+ InternKind::Promoted => {
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // immutability is so important.
+ alloc.mutability = Mutability::Not;
+ }
+ InternKind::Constant => {
+ // If it's a constant, we should not have any "leftovers" as everything
+ // is tracked by const-checking.
+ // FIXME: downgrade this to a warning? It rejects some legitimate consts,
+ // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
+ ecx.tcx
+ .sess
+ .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant");
+ // For better errors later, mark the allocation as immutable.
+ alloc.mutability = Mutability::Not;
+ }
+ }
+ let alloc = tcx.intern_const_alloc(alloc);
+ tcx.set_alloc_id_memory(alloc_id, alloc);
+ for &(_, alloc_id) in alloc.inner().relocations().iter() {
+ if leftover_allocations.insert(alloc_id) {
+ todo.push(alloc_id);
+ }
+ }
+ } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
+ // Codegen does not like dangling pointers, and generally `tcx` assumes that
+ // all allocations referenced anywhere actually exist. So, make sure we error here.
+ let reported = ecx
+ .tcx
+ .sess
+ .span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+ return Err(reported);
+ } else if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
+ // We have hit an `AllocId` that is neither in local or global memory and isn't
+ // marked as dangling by local memory. That should be impossible.
+ span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
+ }
+ }
+ Ok(())
+}
+
+impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
+ InterpCx<'mir, 'tcx, M>
+{
+ /// A helper function that allocates memory for the layout given and gives you access to mutate
+ /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
+ /// current `Memory` and returned.
+ pub fn intern_with_temp_alloc(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ f: impl FnOnce(
+ &mut InterpCx<'mir, 'tcx, M>,
+ &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ()>,
+ ) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
+ let dest = self.allocate(layout, MemoryKind::Stack)?;
+ f(self, &dest.into())?;
+ let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
+ alloc.mutability = Mutability::Not;
+ Ok(self.tcx.intern_const_alloc(alloc))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
new file mode 100644
index 000000000..08209eb79
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -0,0 +1,696 @@
+//! Intrinsics and other functions that the miri engine executes without
+//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
+//! and miri.
+
+use std::convert::TryFrom;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::{
+ self,
+ interpret::{ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar},
+ BinOp,
+};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf as _;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Abi, Align, Primitive, Size};
+
+use super::{
+ util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
+ Pointer,
+};
+
+mod caller_location;
+mod type_name;
+
+fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
+ let size = match kind {
+ Primitive::Int(integer, _) => integer.size(),
+ _ => bug!("invalid `{}` argument: {:?}", name, bits),
+ };
+ let extra = 128 - u128::from(size.bits());
+ let bits_out = match name {
+ sym::ctpop => u128::from(bits.count_ones()),
+ sym::ctlz => u128::from(bits.leading_zeros()) - extra,
+ sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
+ sym::bswap => (bits << extra).swap_bytes(),
+ sym::bitreverse => (bits << extra).reverse_bits(),
+ _ => bug!("not a numeric intrinsic: {}", name),
+ };
+ Scalar::from_uint(bits_out, size)
+}
+
+/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
+/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
+pub(crate) fn eval_nullary_intrinsic<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+) -> InterpResult<'tcx, ConstValue<'tcx>> {
+ let tp_ty = substs.type_at(0);
+ let name = tcx.item_name(def_id);
+ Ok(match name {
+ sym::type_name => {
+ ensure_monomorphic_enough(tcx, tp_ty)?;
+ let alloc = type_name::alloc_type_name(tcx, tp_ty);
+ ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
+ }
+ sym::needs_drop => {
+ ensure_monomorphic_enough(tcx, tp_ty)?;
+ ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
+ }
+ sym::pref_align_of => {
+ // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+ let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
+ ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
+ }
+ sym::type_id => {
+ ensure_monomorphic_enough(tcx, tp_ty)?;
+ ConstValue::from_u64(tcx.type_id_hash(tp_ty))
+ }
+ sym::variant_count => match tp_ty.kind() {
+ // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+ ty::Adt(ref adt, _) => {
+ ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx)
+ }
+ ty::Projection(_)
+ | ty::Opaque(_, _)
+ | ty::Param(_)
+ | ty::Bound(_, _)
+ | ty::Placeholder(_)
+ | ty::Infer(_) => throw_inval!(TooGeneric),
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnDef(_, _)
+ | ty::FnPtr(_)
+ | ty::Dynamic(_, _)
+ | ty::Closure(_, _)
+ | ty::Generator(_, _, _)
+ | ty::GeneratorWitness(_)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
+ },
+ other => bug!("`{}` is not a zero arg intrinsic", other),
+ })
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Returns `true` if emulation happened.
+ /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
+ /// intrinsic handling.
+ pub fn emulate_intrinsic(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx, M::Provenance>],
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ret: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx, bool> {
+ let substs = instance.substs;
+ let intrinsic_name = self.tcx.item_name(instance.def_id());
+
+ // First handle intrinsics without return place.
+ let ret = match ret {
+ None => match intrinsic_name {
+ sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
+ sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
+ // Unsupported diverging intrinsic.
+ _ => return Ok(false),
+ },
+ Some(p) => p,
+ };
+
+ match intrinsic_name {
+ sym::caller_location => {
+ let span = self.find_closest_untracked_caller_location();
+ let location = self.alloc_caller_location_for_span(span);
+ self.write_immediate(location.to_ref(self), dest)?;
+ }
+
+ sym::min_align_of_val | sym::size_of_val => {
+ // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
+ // dereferenceable!
+ let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
+ let (size, align) = self
+ .size_and_align_of_mplace(&place)?
+ .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
+
+ let result = match intrinsic_name {
+ sym::min_align_of_val => align.bytes(),
+ sym::size_of_val => size.bytes(),
+ _ => bug!(),
+ };
+
+ self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
+ }
+
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ let gid = GlobalId { instance, promoted: None };
+ let ty = match intrinsic_name {
+ sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
+ sym::needs_drop => self.tcx.types.bool,
+ sym::type_id => self.tcx.types.u64,
+ sym::type_name => self.tcx.mk_static_str(),
+ _ => bug!(),
+ };
+ let val =
+ self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
+ let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
+ self.copy_op(&val, dest, /*allow_transmute*/ false)?;
+ }
+
+ sym::ctpop
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::bswap
+ | sym::bitreverse => {
+ let ty = substs.type_at(0);
+ let layout_of = self.layout_of(ty)?;
+ let val = self.read_scalar(&args[0])?.check_init()?;
+ let bits = val.to_bits(layout_of.size)?;
+ let kind = match layout_of.abi {
+ Abi::Scalar(scalar) => scalar.primitive(),
+ _ => span_bug!(
+ self.cur_span(),
+ "{} called on invalid type {:?}",
+ intrinsic_name,
+ ty
+ ),
+ };
+ let (nonzero, intrinsic_name) = match intrinsic_name {
+ sym::cttz_nonzero => (true, sym::cttz),
+ sym::ctlz_nonzero => (true, sym::ctlz),
+ other => (false, other),
+ };
+ if nonzero && bits == 0 {
+ throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
+ }
+ let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
+ self.write_scalar(out_val, dest)?;
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ let lhs = self.read_immediate(&args[0])?;
+ let rhs = self.read_immediate(&args[1])?;
+ let bin_op = match intrinsic_name {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => bug!(),
+ };
+ self.binop_with_overflow(
+ bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest,
+ )?;
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
+ let val = self.saturating_arith(
+ if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
+ &l,
+ &r,
+ )?;
+ self.write_scalar(val, dest)?;
+ }
+ sym::discriminant_value => {
+ let place = self.deref_operand(&args[0])?;
+ let discr_val = self.read_discriminant(&place.into())?.0;
+ self.write_scalar(discr_val, dest)?;
+ }
+ sym::unchecked_shl
+ | sym::unchecked_shr
+ | sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::unchecked_div
+ | sym::unchecked_rem => {
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
+ let bin_op = match intrinsic_name {
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ _ => bug!(),
+ };
+ let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
+ if overflowed {
+ let layout = self.layout_of(substs.type_at(0))?;
+ let r_val = r.to_scalar()?.to_bits(layout.size)?;
+ if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
+ throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
+ } else {
+ throw_ub_format!("overflow executing `{}`", intrinsic_name);
+ }
+ }
+ self.write_scalar(val, dest)?;
+ }
+ sym::rotate_left | sym::rotate_right => {
+ // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
+ // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
+ let layout = self.layout_of(substs.type_at(0))?;
+ let val = self.read_scalar(&args[0])?.check_init()?;
+ let val_bits = val.to_bits(layout.size)?;
+ let raw_shift = self.read_scalar(&args[1])?.check_init()?;
+ let raw_shift_bits = raw_shift.to_bits(layout.size)?;
+ let width_bits = u128::from(layout.size.bits());
+ let shift_bits = raw_shift_bits % width_bits;
+ let inv_shift_bits = (width_bits - shift_bits) % width_bits;
+ let result_bits = if intrinsic_name == sym::rotate_left {
+ (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
+ } else {
+ (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
+ };
+ let truncated_bits = self.truncate(result_bits, layout);
+ let result = Scalar::from_uint(truncated_bits, layout.size);
+ self.write_scalar(result, dest)?;
+ }
+ sym::copy => {
+ self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
+ }
+ sym::write_bytes => {
+ self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+ }
+ sym::offset => {
+ let ptr = self.read_pointer(&args[0])?;
+ let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+ let pointee_ty = substs.type_at(0);
+
+ let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
+ self.write_pointer(offset_ptr, dest)?;
+ }
+ sym::arith_offset => {
+ let ptr = self.read_pointer(&args[0])?;
+ let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+ let pointee_ty = substs.type_at(0);
+
+ let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ let offset_bytes = offset_count.wrapping_mul(pointee_size);
+ let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
+ self.write_pointer(offset_ptr, dest)?;
+ }
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ let a = self.read_pointer(&args[0])?;
+ let b = self.read_pointer(&args[1])?;
+
+ let usize_layout = self.layout_of(self.tcx.types.usize)?;
+ let isize_layout = self.layout_of(self.tcx.types.isize)?;
+
+ // Get offsets for both that are at least relative to the same base.
+ let (a_offset, b_offset) =
+ match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) {
+ (Err(a), Err(b)) => {
+ // Neither poiner points to an allocation.
+ // If these are inequal or null, this *will* fail the deref check below.
+ (a, b)
+ }
+ (Err(_), _) | (_, Err(_)) => {
+ // We managed to find a valid allocation for one pointer, but not the other.
+ // That means they are definitely not pointing to the same allocation.
+ throw_ub_format!(
+ "`{}` called on pointers into different allocations",
+ intrinsic_name
+ );
+ }
+ (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => {
+ // Found allocation for both. They must be into the same allocation.
+ if a_alloc_id != b_alloc_id {
+ throw_ub_format!(
+ "`{}` called on pointers into different allocations",
+ intrinsic_name
+ );
+ }
+ // Use these offsets for distance calculation.
+ (a_offset.bytes(), b_offset.bytes())
+ }
+ };
+
+ // Compute distance.
+ let dist = {
+ // Addresses are unsigned, so this is a `usize` computation. We have to do the
+ // overflow check separately anyway.
+ let (val, overflowed, _ty) = {
+ let a_offset = ImmTy::from_uint(a_offset, usize_layout);
+ let b_offset = ImmTy::from_uint(b_offset, usize_layout);
+ self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
+ };
+ if overflowed {
+ // a < b
+ if intrinsic_name == sym::ptr_offset_from_unsigned {
+ throw_ub_format!(
+ "`{}` called when first pointer has smaller offset than second: {} < {}",
+ intrinsic_name,
+ a_offset,
+ b_offset,
+ );
+ }
+ // The signed form of the intrinsic allows this. If we interpret the
+ // difference as isize, we'll get the proper signed difference. If that
+ // seems *positive*, they were more than isize::MAX apart.
+ let dist = val.to_machine_isize(self)?;
+ if dist >= 0 {
+ throw_ub_format!(
+ "`{}` called when first pointer is too far before second",
+ intrinsic_name
+ );
+ }
+ dist
+ } else {
+ // b >= a
+ let dist = val.to_machine_isize(self)?;
+ // If converting to isize produced a *negative* result, we had an overflow
+ // because they were more than isize::MAX apart.
+ if dist < 0 {
+ throw_ub_format!(
+ "`{}` called when first pointer is too far ahead of second",
+ intrinsic_name
+ );
+ }
+ dist
+ }
+ };
+
+ // Check that the range between them is dereferenceable ("in-bounds or one past the
+ // end of the same allocation"). This is like the check in ptr_offset_inbounds.
+ let min_ptr = if dist >= 0 { b } else { a };
+ self.check_ptr_access_align(
+ min_ptr,
+ Size::from_bytes(dist.unsigned_abs()),
+ Align::ONE,
+ CheckInAllocMsg::OffsetFromTest,
+ )?;
+
+ // Perform division by size to compute return value.
+ let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
+ assert!(0 <= dist && dist <= self.machine_isize_max());
+ usize_layout
+ } else {
+ assert!(self.machine_isize_min() <= dist && dist <= self.machine_isize_max());
+ isize_layout
+ };
+ let pointee_layout = self.layout_of(substs.type_at(0))?;
+ // If ret_layout is unsigned, we checked that so is the distance, so we are good.
+ let val = ImmTy::from_int(dist, ret_layout);
+ let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
+ self.exact_div(&val, &size, dest)?;
+ }
+
+ sym::transmute => {
+ self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
+ }
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ let ty = instance.substs.type_at(0);
+ let layout = self.layout_of(ty)?;
+
+ // For *all* intrinsics we first check `is_uninhabited` to give a more specific
+ // error message.
+ if layout.abi.is_uninhabited() {
+ // The run-time intrinsic panics just to get a good backtrace; here we abort
+ // since there is no problem showing a backtrace even for aborts.
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to instantiate uninhabited type `{}`",
+ ty
+ ),
+ )?;
+ }
+
+ if intrinsic_name == sym::assert_zero_valid {
+ let should_panic = !self.tcx.permits_zero_init(layout);
+
+ if should_panic {
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
+ ty
+ ),
+ )?;
+ }
+ }
+
+ if intrinsic_name == sym::assert_uninit_valid {
+ let should_panic = !self.tcx.permits_uninit_init(layout);
+
+ if should_panic {
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
+ ty
+ ),
+ )?;
+ }
+ }
+ }
+ sym::simd_insert => {
+ let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+ let elem = &args[2];
+ let (input, input_len) = self.operand_to_simd(&args[0])?;
+ let (dest, dest_len) = self.place_to_simd(dest)?;
+ assert_eq!(input_len, dest_len, "Return vector length must match input length");
+ assert!(
+ index < dest_len,
+ "Index `{}` must be in bounds of vector with length {}`",
+ index,
+ dest_len
+ );
+
+ for i in 0..dest_len {
+ let place = self.mplace_index(&dest, i)?;
+ let value = if i == index {
+ elem.clone()
+ } else {
+ self.mplace_index(&input, i)?.into()
+ };
+ self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
+ }
+ }
+ sym::simd_extract => {
+ let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+ let (input, input_len) = self.operand_to_simd(&args[0])?;
+ assert!(
+ index < input_len,
+ "index `{}` must be in bounds of vector with length `{}`",
+ index,
+ input_len
+ );
+ self.copy_op(
+ &self.mplace_index(&input, index)?.into(),
+ dest,
+ /*allow_transmute*/ false,
+ )?;
+ }
+ sym::likely | sym::unlikely | sym::black_box => {
+ // These just return their argument
+ self.copy_op(&args[0], dest, /*allow_transmute*/ false)?;
+ }
+ sym::assume => {
+ let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
+ if !cond {
+ throw_ub_format!("`assume` intrinsic called with `false`");
+ }
+ }
+ sym::raw_eq => {
+ let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
+ self.write_scalar(result, dest)?;
+ }
+
+ sym::vtable_size => {
+ let ptr = self.read_pointer(&args[0])?;
+ let (size, _align) = self.get_vtable_size_and_align(ptr)?;
+ self.write_scalar(Scalar::from_machine_usize(size.bytes(), self), dest)?;
+ }
+ sym::vtable_align => {
+ let ptr = self.read_pointer(&args[0])?;
+ let (_size, align) = self.get_vtable_size_and_align(ptr)?;
+ self.write_scalar(Scalar::from_machine_usize(align.bytes(), self), dest)?;
+ }
+
+ _ => return Ok(false),
+ }
+
+ trace!("{:?}", self.dump_place(**dest));
+ self.go_to_block(ret);
+ Ok(true)
+ }
+
+ pub fn exact_div(
+ &mut self,
+ a: &ImmTy<'tcx, M::Provenance>,
+ b: &ImmTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Performs an exact division, resulting in undefined behavior where
+ // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
+ // First, check x % y != 0 (or if that computation overflows).
+ let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
+ assert!(!overflow); // All overflow is UB, so this should never return on overflow.
+ if res.assert_bits(a.layout.size) != 0 {
+ throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b)
+ }
+ // `Rem` says this is all right, so we can let `Div` do its job.
+ self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
+ }
+
+ pub fn saturating_arith(
+ &self,
+ mir_op: BinOp,
+ l: &ImmTy<'tcx, M::Provenance>,
+ r: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
+ let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
+ Ok(if overflowed {
+ let size = l.layout.size;
+ let num_bits = size.bits();
+ if l.layout.abi.is_signed() {
+ // For signed ints the saturated value depends on the sign of the first
+ // term since the sign of the second term can be inferred from this and
+ // the fact that the operation has overflowed (if either is 0 no
+ // overflow can occur)
+ let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
+ let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
+ if first_term_positive {
+ // Negative overflow not possible since the positive first term
+ // can only increase an (in range) negative term for addition
+ // or corresponding negated positive term for subtraction
+ Scalar::from_int(size.signed_int_max(), size)
+ } else {
+ // Positive overflow not possible for similar reason
+ // max negative
+ Scalar::from_int(size.signed_int_min(), size)
+ }
+ } else {
+ // unsigned
+ if matches!(mir_op, BinOp::Add) {
+ // max unsigned
+ Scalar::from_uint(size.unsigned_int_max(), size)
+ } else {
+ // underflow to 0
+ Scalar::from_uint(0u128, size)
+ }
+ }
+ } else {
+ val
+ })
+ }
+
+ /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
+ /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
+ /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
+ pub fn ptr_offset_inbounds(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ pointee_ty: Ty<'tcx>,
+ offset_count: i64,
+ ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+ // We cannot overflow i64 as a type's size must be <= isize::MAX.
+ let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ // The computed offset, in bytes, must not overflow an isize.
+ // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
+ // the difference to be noticeable.
+ let offset_bytes =
+ offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+ // The offset being in bounds cannot rely on "wrapping around" the address space.
+ // So, first rule out overflows in the pointer arithmetic.
+ let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
+ // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
+ // memory between these pointers must be accessible. Note that we do not require the
+ // pointers to be properly aligned (unlike a read/write operation).
+ let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
+ // This call handles checking for integer/null pointers.
+ self.check_ptr_access_align(
+ min_ptr,
+ Size::from_bytes(offset_bytes.unsigned_abs()),
+ Align::ONE,
+ CheckInAllocMsg::PointerArithmeticTest,
+ )?;
+ Ok(offset_ptr)
+ }
+
+ /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
+ pub(crate) fn copy_intrinsic(
+ &mut self,
+ src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+ let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+ let (size, align) = (layout.size, layout.align.abi);
+ // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // but no actual allocation can be big enough for the difference to be noticeable.
+ let size = size.checked_mul(count, self).ok_or_else(|| {
+ err_ub_format!(
+ "overflow computing total size of `{}`",
+ if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+ )
+ })?;
+
+ let src = self.read_pointer(&src)?;
+ let dst = self.read_pointer(&dst)?;
+
+ self.mem_copy(src, align, dst, align, size, nonoverlapping)
+ }
+
+ pub(crate) fn write_bytes_intrinsic(
+ &mut self,
+ dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
+
+ let dst = self.read_pointer(&dst)?;
+ let byte = self.read_scalar(&byte)?.to_u8()?;
+ let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+
+ // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // but no actual allocation can be big enough for the difference to be noticeable.
+ let len = layout
+ .size
+ .checked_mul(count, self)
+ .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
+
+ let bytes = std::iter::repeat(byte).take(len.bytes_usize());
+ self.write_bytes_ptr(dst, bytes)
+ }
+
+ pub(crate) fn raw_eq_intrinsic(
+ &mut self,
+ lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
+ assert!(!layout.is_unsized());
+
+ let lhs = self.read_pointer(lhs)?;
+ let rhs = self.read_pointer(rhs)?;
+ let lhs_bytes = self.read_bytes_ptr(lhs, layout.size)?;
+ let rhs_bytes = self.read_bytes_ptr(rhs, layout.size)?;
+ Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
new file mode 100644
index 000000000..5864b9215
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -0,0 +1,130 @@
+use std::convert::TryFrom;
+
+use rustc_ast::Mutability;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::TerminatorKind;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::subst::Subst;
+use rustc_span::{Span, Symbol};
+
+use crate::interpret::{
+ intrinsics::{InterpCx, Machine},
+ MPlaceTy, MemoryKind, Scalar,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+ /// frame which is not `#[track_caller]`.
+ pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
+ for frame in self.stack().iter().rev() {
+ debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+ // Assert that the frame we look at is actually executing code currently
+ // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
+ let loc = frame.loc.unwrap();
+
+ // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+ // (such as `box`). Use the normal span by default.
+ let mut source_info = *frame.body.source_info(loc);
+
+ // If this is a `Call` terminator, use the `fn_span` instead.
+ let block = &frame.body.basic_blocks()[loc.block];
+ if loc.statement_index == block.statements.len() {
+ debug!(
+ "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+ block.terminator(),
+ block.terminator().kind
+ );
+ if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+ source_info.span = fn_span;
+ }
+ }
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &frame.body.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(*self.tcx) {
+ return source_info.span;
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !frame.instance.def.requires_caller_location(*self.tcx) {
+ return source_info.span;
+ }
+ }
+
+ span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
+ }
+
+ /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
+ pub(crate) fn alloc_caller_location(
+ &mut self,
+ filename: Symbol,
+ line: u32,
+ col: u32,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ let loc_details = &self.tcx.sess.opts.unstable_opts.location_detail;
+ let file = if loc_details.file {
+ self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
+ } else {
+ // FIXME: This creates a new allocation each time. It might be preferable to
+ // perform this allocation only once, and re-use the `MPlaceTy`.
+ // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
+ self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not)
+ };
+ let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
+ let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
+
+ // Allocate memory for `CallerLocation` struct.
+ let loc_ty = self
+ .tcx
+ .bound_type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
+ .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
+ let loc_layout = self.layout_of(loc_ty).unwrap();
+ // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+ // pointless, since that would require allocating more memory than a Location.
+ let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
+
+ // Initialize fields.
+ self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+ .expect("writing to memory we just allocated cannot fail");
+ self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+ .expect("writing to memory we just allocated cannot fail");
+ self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+ .expect("writing to memory we just allocated cannot fail");
+
+ location
+ }
+
+ pub(crate) fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ (
+ Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
+ u32::try_from(caller.line).unwrap(),
+ u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
+ )
+ }
+
+ pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::Provenance> {
+ let (file, line, column) = self.location_triple_for_span(span);
+ self.alloc_caller_location(file, line, column)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
new file mode 100644
index 000000000..f9847742f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
@@ -0,0 +1,196 @@
+use rustc_data_structures::intern::Interned;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::definitions::DisambiguatedDefPathData;
+use rustc_middle::mir::interpret::{Allocation, ConstAllocation};
+use rustc_middle::ty::{
+ self,
+ print::{PrettyPrinter, Print, Printer},
+ subst::{GenericArg, GenericArgKind},
+ Ty, TyCtxt,
+};
+use std::fmt::Write;
+
+struct AbsolutePathPrinter<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ path: String,
+}
+
+impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+ type Error = std::fmt::Error;
+
+ type Path = Self;
+ type Region = Self;
+ type Type = Self;
+ type DynExistential = Self;
+ type Const = Self;
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+ Ok(self)
+ }
+
+ fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ match *ty.kind() {
+ // Types without identity.
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Dynamic(_, _) => self.pretty_print_type(ty),
+
+ // Placeholders (all printed as `_` to uniformize them).
+ ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
+ write!(self, "_")?;
+ Ok(self)
+ }
+
+ // Types with identity (print the module path).
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
+ | ty::FnDef(def_id, substs)
+ | ty::Opaque(def_id, substs)
+ | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+ | ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+ ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
+
+ ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
+ }
+ }
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ self.pretty_print_const(ct, false)
+ }
+
+ fn print_dyn_existential(
+ mut self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ let mut first = true;
+ for p in predicates {
+ if !first {
+ write!(self, "+")?;
+ }
+ first = false;
+ self = p.print(self)?;
+ }
+ Ok(self)
+ }
+
+ fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ self.path.push_str(self.tcx.crate_name(cnum).as_str());
+ Ok(self)
+ }
+
+ fn path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.pretty_path_qualified(self_ty, trait_ref)
+ }
+
+ fn path_append_impl(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.pretty_path_append_impl(
+ |mut cx| {
+ cx = print_prefix(cx)?;
+
+ cx.path.push_str("::");
+
+ Ok(cx)
+ },
+ self_ty,
+ trait_ref,
+ )
+ }
+
+ fn path_append(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ write!(self.path, "::{}", disambiguated_data.data).unwrap();
+
+ Ok(self)
+ }
+
+ fn path_generic_args(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+ let args =
+ args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
+ if args.clone().next().is_some() {
+ self.generic_delimiters(|cx| cx.comma_sep(args))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
+ fn should_print_region(&self, _region: ty::Region<'_>) -> bool {
+ false
+ }
+ fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+ {
+ if let Some(first) = elems.next() {
+ self = first.print(self)?;
+ for elem in elems {
+ self.path.push_str(", ");
+ self = elem.print(self)?;
+ }
+ }
+ Ok(self)
+ }
+
+ fn generic_delimiters(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error> {
+ write!(self, "<")?;
+
+ self = f(self)?;
+
+ write!(self, ">")?;
+
+ Ok(self)
+ }
+}
+
+impl Write for AbsolutePathPrinter<'_> {
+ fn write_str(&mut self, s: &str) -> std::fmt::Result {
+ self.path.push_str(s);
+ Ok(())
+ }
+}
+
+/// Directly returns an `Allocation` containing an absolute path representation of the given type.
+pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
+ let path = AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path;
+ let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
+ tcx.intern_const_alloc(alloc)
+}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
new file mode 100644
index 000000000..71ccd1799
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -0,0 +1,525 @@
+//! This module contains everything needed to instantiate an interpreter.
+//! This separation exists to ensure that no fancy miri features like
+//! interpreting common C functions leak into CTFE.
+
+use std::borrow::{Borrow, Cow};
+use std::fmt::Debug;
+use std::hash::Hash;
+
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_target::abi::Size;
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use super::{
+ AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
+ MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
+};
+
+/// Data returned by Machine::stack_pop,
+/// to provide further control over the popping of the stack frame
+#[derive(Eq, PartialEq, Debug, Copy, Clone)]
+pub enum StackPopJump {
+ /// Indicates that no special handling should be
+ /// done - we'll either return normally or unwind
+ /// based on the terminator for the function
+ /// we're leaving.
+ Normal,
+
+ /// Indicates that we should *not* jump to the return/unwind address, as the callback already
+ /// took care of everything.
+ NoJump,
+}
+
+/// Whether this kind of memory is allowed to leak
+pub trait MayLeak: Copy {
+ fn may_leak(self) -> bool;
+}
+
+/// The functionality needed by memory to manage its allocations
+pub trait AllocMap<K: Hash + Eq, V> {
+ /// Tests if the map contains the given key.
+ /// Deliberately takes `&mut` because that is sufficient, and some implementations
+ /// can be more efficient then (using `RefCell::get_mut`).
+ fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+ where
+ K: Borrow<Q>;
+
+ /// Inserts a new entry into the map.
+ fn insert(&mut self, k: K, v: V) -> Option<V>;
+
+ /// Removes an entry from the map.
+ fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>;
+
+ /// Returns data based on the keys and values in the map.
+ fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
+
+ /// Returns a reference to entry `k`. If no such entry exists, call
+ /// `vacant` and either forward its error, or add its result to the map
+ /// and return a reference to *that*.
+ fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
+
+ /// Returns a mutable reference to entry `k`. If no such entry exists, call
+ /// `vacant` and either forward its error, or add its result to the map
+ /// and return a reference to *that*.
+ fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
+
+ /// Read-only lookup.
+ fn get(&self, k: K) -> Option<&V> {
+ self.get_or(k, || Err(())).ok()
+ }
+
+ /// Mutable lookup.
+ fn get_mut(&mut self, k: K) -> Option<&mut V> {
+ self.get_mut_or(k, || Err(())).ok()
+ }
+}
+
+/// Methods of this trait signifies a point where CTFE evaluation would fail
+/// and some use case dependent behaviour can instead be applied.
+pub trait Machine<'mir, 'tcx>: Sized {
+ /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+ type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
+
+ /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
+ type Provenance: Provenance + Eq + Hash + 'static;
+
+ /// When getting the AllocId of a pointer, some extra data is also obtained from the provenance
+ /// that is passed to memory access hooks so they can do things with it.
+ type ProvenanceExtra: Copy + 'static;
+
+ /// Machines can define extra (non-instance) things that represent values of function pointers.
+ /// For example, Miri uses this to return a function pointer from `dlsym`
+ /// that can later be called to execute the right thing.
+ type ExtraFnVal: Debug + Copy;
+
+ /// Extra data stored in every call frame.
+ type FrameExtra;
+
+ /// Extra data stored in every allocation.
+ type AllocExtra: Debug + Clone + 'static;
+
+ /// Memory's allocation map
+ type MemoryMap: AllocMap<
+ AllocId,
+ (MemoryKind<Self::MemoryKind>, Allocation<Self::Provenance, Self::AllocExtra>),
+ > + Default
+ + Clone;
+
+ /// The memory kind to use for copied global memory (held in `tcx`) --
+ /// or None if such memory should not be mutated and thus any such attempt will cause
+ /// a `ModifiedStatic` error to be raised.
+ /// Statics are copied under two circumstances: When they are mutated, and when
+ /// `adjust_allocation` (see below) returns an owned allocation
+ /// that is added to the memory so that the work is not done twice.
+ const GLOBAL_KIND: Option<Self::MemoryKind>;
+
+ /// Should the machine panic on allocation failures?
+ const PANIC_ON_ALLOC_FAIL: bool;
+
+ /// Whether memory accesses should be alignment-checked.
+ fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether, when checking alignment, we should `force_int` and thus support
+ /// custom alignment logic based on whatever the integer address happens to be.
+ ///
+ /// Requires Provenance::OFFSET_IS_ADDR to be true.
+ fn force_int_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether to enforce the validity invariant
+ fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether to enforce integers and floats being initialized.
+ fn enforce_number_init(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether function calls should be [ABI](CallAbi)-checked.
+ fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ true
+ }
+
+ /// Whether CheckedBinOp MIR statements should actually check for overflow.
+ fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Entry point for obtaining the MIR of anything that should get evaluated.
+ /// So not just functions and shims, but also const/static initializers, anonymous
+ /// constants, ...
+ fn load_mir(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ Ok(ecx.tcx.instance_mir(instance))
+ }
+
+ /// Entry point to all function calls.
+ ///
+ /// Returns either the mir to use for the call, or `None` if execution should
+ /// just proceed (which usually means this hook did all the work that the
+ /// called function should usually have done). In the latter case, it is
+ /// this hook's responsibility to advance the instruction pointer!
+ /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
+ /// nor just jump to `ret`, but instead push their own stack frame.)
+ /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
+ /// was used.
+ fn find_mir_or_eval_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ abi: CallAbi,
+ args: &[OpTy<'tcx, Self::Provenance>],
+ destination: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>;
+
+ /// Execute `fn_val`. It is the hook's responsibility to advance the instruction
+ /// pointer as appropriate.
+ fn call_extra_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ fn_val: Self::ExtraFnVal,
+ abi: CallAbi,
+ args: &[OpTy<'tcx, Self::Provenance>],
+ destination: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx>;
+
+ /// Directly process an intrinsic without pushing a stack frame. It is the hook's
+ /// responsibility to advance the instruction pointer as appropriate.
+ fn call_intrinsic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx, Self::Provenance>],
+ destination: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx>;
+
+ /// Called to evaluate `Assert` MIR terminators that trigger a panic.
+ fn assert_panic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ msg: &mir::AssertMessage<'tcx>,
+ unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx>;
+
+ /// Called to evaluate `Abort` MIR terminator.
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
+ throw_unsup_format!("aborting execution is not supported")
+ }
+
+ /// Called for all binary operations where the LHS has pointer type.
+ ///
+ /// Returns a (value, overflowed) pair if the operation succeeded
+ fn binary_ptr_op(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ bin_op: mir::BinOp,
+ left: &ImmTy<'tcx, Self::Provenance>,
+ right: &ImmTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
+
+ /// Called to read the specified `local` from the `frame`.
+ /// Since reading a ZST is not actually accessing memory or locals, this is never invoked
+ /// for ZST reads.
+ #[inline]
+ fn access_local<'a>(
+ frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ local: mir::Local,
+ ) -> InterpResult<'tcx, &'a Operand<Self::Provenance>>
+ where
+ 'tcx: 'mir,
+ {
+ frame.locals[local].access()
+ }
+
+ /// Called to write the specified `local` from the `frame`.
+ /// Since writing a ZST is not actually accessing memory or locals, this is never invoked
+ /// for ZST reads.
+ #[inline]
+ fn access_local_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ frame: usize,
+ local: mir::Local,
+ ) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>
+ where
+ 'tcx: 'mir,
+ {
+ ecx.stack_mut()[frame].locals[local].access_mut()
+ }
+
+ /// Called before a basic block terminator is executed.
+ /// You can use this to detect endlessly running programs.
+ #[inline]
+ fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called before a global allocation is accessed.
+ /// `def_id` is `Some` if this is the "lazy" allocation of a static.
+ #[inline]
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &Self,
+ _alloc_id: AllocId,
+ _allocation: ConstAllocation<'tcx>,
+ _static_def_id: Option<DefId>,
+ _is_write: bool,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Return the `AllocId` for the given thread-local static in the current thread.
+ fn thread_local_static_base_pointer(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ def_id: DefId,
+ ) -> InterpResult<'tcx, Pointer<Self::Provenance>> {
+ throw_unsup!(ThreadLocalStatic(def_id))
+ }
+
+ /// Return the root pointer for the given `extern static`.
+ fn extern_static_base_pointer(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ def_id: DefId,
+ ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
+
+ /// Return a "base" pointer for the given allocation: the one that is used for direct
+ /// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
+ ///
+ /// Not called on `extern` or thread-local statics (those use the methods above).
+ fn adjust_alloc_base_pointer(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ ptr: Pointer,
+ ) -> Pointer<Self::Provenance>;
+
+ /// "Int-to-pointer cast"
+ fn ptr_from_addr_cast(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ addr: u64,
+ ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>;
+
+ /// Marks a pointer as exposed, allowing it's provenance
+ /// to be recovered. "Pointer-to-int cast"
+ fn expose_ptr(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ ptr: Pointer<Self::Provenance>,
+ ) -> InterpResult<'tcx>;
+
+ /// Convert a pointer with provenance into an allocation-offset pair
+ /// and extra provenance info.
+ ///
+ /// The returned `AllocId` must be the same as `ptr.provenance.get_alloc_id()`.
+ ///
+ /// When this fails, that means the pointer does not point to a live allocation.
+ fn ptr_get_alloc(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ ptr: Pointer<Self::Provenance>,
+ ) -> Option<(AllocId, Size, Self::ProvenanceExtra)>;
+
+ /// Called to adjust allocations to the Provenance and AllocExtra of this machine.
+ ///
+ /// The way we construct allocations is to always first construct it without extra and then add
+ /// the extra. This keeps uniform code paths for handling both allocations created by CTFE for
+ /// globals, and allocations created by Miri during evaluation.
+ ///
+ /// `kind` is the kind of the allocation being adjusted; it can be `None` when
+ /// it's a global and `GLOBAL_KIND` is `None`.
+ ///
+ /// This should avoid copying if no work has to be done! If this returns an owned
+ /// allocation (because a copy had to be done to adjust things), machine memory will
+ /// cache the result. (This relies on `AllocMap::get_or` being able to add the
+ /// owned allocation to the map even when the map is shared.)
+ ///
+ /// This must only fail if `alloc` contains relocations.
+ fn adjust_allocation<'b>(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ id: AllocId,
+ alloc: Cow<'b, Allocation>,
+ kind: Option<MemoryKind<Self::MemoryKind>>,
+ ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>;
+
+ /// Hook for performing extra checks on a memory read access.
+ ///
+ /// Takes read-only access to the allocation so we can keep all the memory read
+ /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
+ /// need to mutate.
+ #[inline(always)]
+ fn memory_read(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &Self,
+ _alloc_extra: &Self::AllocExtra,
+ _prov: (AllocId, Self::ProvenanceExtra),
+ _range: AllocRange,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Hook for performing extra checks on a memory write access.
+ #[inline(always)]
+ fn memory_written(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &mut Self,
+ _alloc_extra: &mut Self::AllocExtra,
+ _prov: (AllocId, Self::ProvenanceExtra),
+ _range: AllocRange,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Hook for performing extra operations on a memory deallocation.
+ #[inline(always)]
+ fn memory_deallocated(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &mut Self,
+ _alloc_extra: &mut Self::AllocExtra,
+ _prov: (AllocId, Self::ProvenanceExtra),
+ _range: AllocRange,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Executes a retagging operation.
+ #[inline]
+ fn retag(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _kind: mir::RetagKind,
+ _place: &PlaceTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called immediately before a new stack frame gets pushed.
+ fn init_frame_extra(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+
+ /// Borrow the current thread's stack.
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>];
+
+ /// Mutably borrow the current thread's stack.
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+
+ /// Called immediately after a stack frame got pushed and its locals got initialized.
+ fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called immediately after a stack frame got popped, but before jumping back to the caller.
+ /// The `locals` have already been destroyed!
+ fn after_stack_pop(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ unwinding: bool,
+ ) -> InterpResult<'tcx, StackPopJump> {
+ // By default, we do not support unwinding from panics
+ assert!(!unwinding);
+ Ok(StackPopJump::Normal)
+ }
+}
+
+// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+// (CTFE and ConstProp) use the same instance. Here, we share that code.
+pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
+ type Provenance = AllocId;
+ type ProvenanceExtra = ();
+
+ type ExtraFnVal = !;
+
+ type MemoryMap =
+ rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
+ const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
+
+ type AllocExtra = ();
+ type FrameExtra = ();
+
+ #[inline(always)]
+ fn enforce_alignment(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ // We do not check for alignment to avoid having to carry an `Align`
+ // in `ConstValue::ByRef`.
+ false
+ }
+
+ #[inline(always)]
+ fn force_int_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ // We do not support `force_int`.
+ false
+ }
+
+ #[inline(always)]
+ fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ false // for now, we don't enforce validity
+ }
+
+ #[inline(always)]
+ fn enforce_number_init(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ true
+ }
+
+ #[inline(always)]
+ fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ true
+ }
+
+ #[inline(always)]
+ fn call_extra_fn(
+ _ecx: &mut InterpCx<$mir, $tcx, Self>,
+ fn_val: !,
+ _abi: CallAbi,
+ _args: &[OpTy<$tcx>],
+ _destination: &PlaceTy<$tcx, Self::Provenance>,
+ _target: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<$tcx> {
+ match fn_val {}
+ }
+
+ #[inline(always)]
+ fn adjust_allocation<'b>(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ _id: AllocId,
+ alloc: Cow<'b, Allocation>,
+ _kind: Option<MemoryKind<Self::MemoryKind>>,
+ ) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
+ Ok(alloc)
+ }
+
+ fn extern_static_base_pointer(
+ ecx: &InterpCx<$mir, $tcx, Self>,
+ def_id: DefId,
+ ) -> InterpResult<$tcx, Pointer> {
+ // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
+ Ok(Pointer::new(ecx.tcx.create_static_alloc(def_id), Size::ZERO))
+ }
+
+ #[inline(always)]
+ fn adjust_alloc_base_pointer(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ ptr: Pointer<AllocId>,
+ ) -> Pointer<AllocId> {
+ ptr
+ }
+
+ #[inline(always)]
+ fn ptr_from_addr_cast(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ addr: u64,
+ ) -> InterpResult<$tcx, Pointer<Option<AllocId>>> {
+ // Allow these casts, but make the pointer not dereferenceable.
+ // (I.e., they behave like transmutation.)
+ Ok(Pointer::from_addr(addr))
+ }
+
+ #[inline(always)]
+ fn ptr_get_alloc(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ ptr: Pointer<AllocId>,
+ ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (alloc_id, offset) = ptr.into_parts();
+ Some((alloc_id, offset, ()))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
new file mode 100644
index 000000000..ed2c4edf9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -0,0 +1,1224 @@
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::collections::VecDeque;
+use std::fmt;
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::mir::display_allocation;
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_target::abi::{Align, HasDataLayout, Size};
+
+use super::{
+ alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
+ InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
+ ScalarMaybeUninit,
+};
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum MemoryKind<T> {
+ /// Stack memory. Error if deallocated except during a stack pop.
+ Stack,
+ /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
+ CallerLocation,
+ /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
+ Machine(T),
+}
+
+impl<T: MayLeak> MayLeak for MemoryKind<T> {
+ #[inline]
+ fn may_leak(self) -> bool {
+ match self {
+ MemoryKind::Stack => false,
+ MemoryKind::CallerLocation => true,
+ MemoryKind::Machine(k) => k.may_leak(),
+ }
+ }
+}
+
+impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MemoryKind::Stack => write!(f, "stack variable"),
+ MemoryKind::CallerLocation => write!(f, "caller location"),
+ MemoryKind::Machine(m) => write!(f, "{}", m),
+ }
+ }
+}
+
+/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
+pub enum AllocKind {
+ /// A regular live data allocation.
+ LiveData,
+ /// A function allocation (that fn ptrs point to).
+ Function,
+ /// A (symbolic) vtable allocation.
+ VTable,
+ /// A dead allocation.
+ Dead,
+}
+
+/// The value of a function pointer.
+#[derive(Debug, Copy, Clone)]
+pub enum FnVal<'tcx, Other> {
+ Instance(Instance<'tcx>),
+ Other(Other),
+}
+
+impl<'tcx, Other> FnVal<'tcx, Other> {
+ pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
+ match self {
+ FnVal::Instance(instance) => Ok(instance),
+ FnVal::Other(_) => {
+ throw_unsup_format!("'foreign' function pointers are not supported in this context")
+ }
+ }
+ }
+}
+
+// `Memory` has to depend on the `Machine` because some of its operations
+// (e.g., `get`) call a `Machine` hook.
+pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ /// Allocations local to this instance of the miri engine. The kind
+ /// helps ensure that the same mechanism is used for allocation and
+ /// deallocation. When an allocation is not found here, it is a
+ /// global and looked up in the `tcx` for read access. Some machines may
+ /// have to mutate this map even on a read-only access to a global (because
+ /// they do pointer provenance tracking and the allocations in `tcx` have
+ /// the wrong type), so we let the machine override this type.
+ /// Either way, if the machine allows writing to a global, doing so will
+ /// create a copy of the global allocation here.
+ // FIXME: this should not be public, but interning currently needs access to it
+ pub(super) alloc_map: M::MemoryMap,
+
+ /// Map for "extra" function pointers.
+ extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
+
+ /// To be able to compare pointers with null, and to check alignment for accesses
+ /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
+ /// that do not exist any more.
+ // FIXME: this should not be public, but interning currently needs access to it
+ pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
+}
+
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+#[derive(Copy, Clone)]
+pub struct AllocRef<'a, 'tcx, Prov, Extra> {
+ alloc: &'a Allocation<Prov, Extra>,
+ range: AllocRange,
+ tcx: TyCtxt<'tcx>,
+ alloc_id: AllocId,
+}
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+pub struct AllocRefMut<'a, 'tcx, Prov, Extra> {
+ alloc: &'a mut Allocation<Prov, Extra>,
+ range: AllocRange,
+ tcx: TyCtxt<'tcx>,
+ alloc_id: AllocId,
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+ pub fn new() -> Self {
+ Memory {
+ alloc_map: M::MemoryMap::default(),
+ extra_fn_ptr_map: FxHashMap::default(),
+ dead_alloc_map: FxHashMap::default(),
+ }
+ }
+
+ /// This is used by [priroda](https://github.com/oli-obk/priroda)
+ pub fn alloc_map(&self) -> &M::MemoryMap {
+ &self.alloc_map
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+ /// the machine pointer to the allocation. Must never be used
+ /// for any other pointers, nor for TLS statics.
+ ///
+ /// Using the resulting pointer represents a *direct* access to that memory
+ /// (e.g. by directly using a `static`),
+ /// as opposed to access through a pointer that was created by the program.
+ ///
+ /// This function can fail only if `ptr` points to an `extern static`.
+ #[inline]
+ pub fn global_base_pointer(
+ &self,
+ ptr: Pointer<AllocId>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let alloc_id = ptr.provenance;
+ // We need to handle `extern static`.
+ match self.tcx.try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
+ bug!("global memory cannot point to thread-local static")
+ }
+ Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
+ return M::extern_static_base_pointer(self, def_id);
+ }
+ _ => {}
+ }
+ // And we need to get the provenance.
+ Ok(M::adjust_alloc_base_pointer(self, ptr))
+ }
+
+ pub fn create_fn_alloc_ptr(
+ &mut self,
+ fn_val: FnVal<'tcx, M::ExtraFnVal>,
+ ) -> Pointer<M::Provenance> {
+ let id = match fn_val {
+ FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+ FnVal::Other(extra) => {
+ // FIXME(RalfJung): Should we have a cache here?
+ let id = self.tcx.reserve_alloc_id();
+ let old = self.memory.extra_fn_ptr_map.insert(id, extra);
+ assert!(old.is_none());
+ id
+ }
+ };
+ // Functions are global allocations, so make sure we get the right base pointer.
+ // We know this is not an `extern static` so this cannot fail.
+ self.global_base_pointer(Pointer::from(id)).unwrap()
+ }
+
+ pub fn allocate_ptr(
+ &mut self,
+ size: Size,
+ align: Align,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
+ // We can `unwrap` since `alloc` contains no pointers.
+ Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
+ }
+
+ pub fn allocate_bytes_ptr(
+ &mut self,
+ bytes: &[u8],
+ align: Align,
+ kind: MemoryKind<M::MemoryKind>,
+ mutability: Mutability,
+ ) -> Pointer<M::Provenance> {
+ let alloc = Allocation::from_bytes(bytes, align, mutability);
+ // We can `unwrap` since `alloc` contains no pointers.
+ self.allocate_raw_ptr(alloc, kind).unwrap()
+ }
+
+ /// This can fail only of `alloc` contains relocations.
+ pub fn allocate_raw_ptr(
+ &mut self,
+ alloc: Allocation,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let id = self.tcx.reserve_alloc_id();
+ debug_assert_ne!(
+ Some(kind),
+ M::GLOBAL_KIND.map(MemoryKind::Machine),
+ "dynamically allocating global memory"
+ );
+ let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
+ self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
+ Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
+ }
+
+ pub fn reallocate_ptr(
+ &mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ old_size_and_align: Option<(Size, Align)>,
+ new_size: Size,
+ new_align: Align,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
+ if offset.bytes() != 0 {
+ throw_ub_format!(
+ "reallocating {:?} which does not point to the beginning of an object",
+ ptr
+ );
+ }
+
+ // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
+ // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
+ let new_ptr = self.allocate_ptr(new_size, new_align, kind)?;
+ let old_size = match old_size_and_align {
+ Some((size, _align)) => size,
+ None => self.get_alloc_raw(alloc_id)?.size(),
+ };
+ // This will also call the access hooks.
+ self.mem_copy(
+ ptr,
+ Align::ONE,
+ new_ptr.into(),
+ Align::ONE,
+ old_size.min(new_size),
+ /*nonoverlapping*/ true,
+ )?;
+ self.deallocate_ptr(ptr, old_size_and_align, kind)?;
+
+ Ok(new_ptr)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn deallocate_ptr(
+ &mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ old_size_and_align: Option<(Size, Align)>,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx> {
+ let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr)?;
+ trace!("deallocating: {alloc_id:?}");
+
+ if offset.bytes() != 0 {
+ throw_ub_format!(
+ "deallocating {:?} which does not point to the beginning of an object",
+ ptr
+ );
+ }
+
+ let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
+ // Deallocating global memory -- always an error
+ return Err(match self.tcx.try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::Function(..)) => {
+ err_ub_format!("deallocating {alloc_id:?}, which is a function")
+ }
+ Some(GlobalAlloc::VTable(..)) => {
+ err_ub_format!("deallocating {alloc_id:?}, which is a vtable")
+ }
+ Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
+ err_ub_format!("deallocating {alloc_id:?}, which is static memory")
+ }
+ None => err_ub!(PointerUseAfterFree(alloc_id)),
+ }
+ .into());
+ };
+
+ debug!(?alloc);
+
+ if alloc.mutability == Mutability::Not {
+ throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
+ }
+ if alloc_kind != kind {
+ throw_ub_format!(
+ "deallocating {alloc_id:?}, which is {alloc_kind} memory, using {kind} deallocation operation"
+ );
+ }
+ if let Some((size, align)) = old_size_and_align {
+ if size != alloc.size() || align != alloc.align {
+ throw_ub_format!(
+ "incorrect layout on deallocation: {alloc_id:?} has size {} and alignment {}, but gave size {} and alignment {}",
+ alloc.size().bytes(),
+ alloc.align.bytes(),
+ size.bytes(),
+ align.bytes(),
+ )
+ }
+ }
+
+ // Let the machine take some extra action
+ let size = alloc.size();
+ M::memory_deallocated(
+ *self.tcx,
+ &mut self.machine,
+ &mut alloc.extra,
+ (alloc_id, prov),
+ alloc_range(Size::ZERO, size),
+ )?;
+
+ // Don't forget to remember size and align of this now-dead allocation
+ let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
+ if old.is_some() {
+ bug!("Nothing can be deallocated twice");
+ }
+
+ Ok(())
+ }
+
+ /// Internal helper function to determine the allocation and offset of a pointer (if any).
+ #[inline(always)]
+ fn get_ptr_access(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
+ let align = M::enforce_alignment(&self).then_some(align);
+ self.check_and_deref_ptr(
+ ptr,
+ size,
+ align,
+ CheckInAllocMsg::MemoryAccessTest,
+ |alloc_id, offset, prov| {
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ Ok((size, align, (alloc_id, offset, prov)))
+ },
+ )
+ }
+
+ /// Check if the given pointer points to live memory of given `size` and `align`
+ /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
+ /// out-of-bounds case.
+ #[inline(always)]
+ pub fn check_ptr_access_align(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ msg: CheckInAllocMsg,
+ ) -> InterpResult<'tcx> {
+ self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ Ok((size, align, ()))
+ })?;
+ Ok(())
+ }
+
+ /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
+ /// to the allocation it points to. Supports both shared and mutable references, as the actual
+ /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
+ /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
+ fn check_and_deref_ptr<T>(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Option<Align>,
+ msg: CheckInAllocMsg,
+ alloc_size: impl FnOnce(
+ AllocId,
+ Size,
+ M::ProvenanceExtra,
+ ) -> InterpResult<'tcx, (Size, Align, T)>,
+ ) -> InterpResult<'tcx, Option<T>> {
+ fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
+ if offset % align.bytes() == 0 {
+ Ok(())
+ } else {
+ // The biggest power of two through which `offset` is divisible.
+ let offset_pow2 = 1 << offset.trailing_zeros();
+ throw_ub!(AlignmentCheckFailed {
+ has: Align::from_bytes(offset_pow2).unwrap(),
+ required: align,
+ })
+ }
+ }
+
+ Ok(match self.ptr_try_get_alloc_id(ptr) {
+ Err(addr) => {
+ // We couldn't get a proper allocation. This is only okay if the access size is 0,
+ // and the address is not null.
+ if size.bytes() > 0 || addr == 0 {
+ throw_ub!(DanglingIntPointer(addr, msg));
+ }
+ // Must be aligned.
+ if let Some(align) = align {
+ check_offset_align(addr, align)?;
+ }
+ None
+ }
+ Ok((alloc_id, offset, prov)) => {
+ let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
+ // Test bounds. This also ensures non-null.
+ // It is sufficient to check this for the end pointer. Also check for overflow!
+ if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
+ throw_ub!(PointerOutOfBounds {
+ alloc_id,
+ alloc_size,
+ ptr_offset: self.machine_usize_to_isize(offset.bytes()),
+ ptr_size: size,
+ msg,
+ })
+ }
+ // Ensure we never consider the null pointer dereferencable.
+ if M::Provenance::OFFSET_IS_ADDR {
+ assert_ne!(ptr.addr(), Size::ZERO);
+ }
+ // Test align. Check this last; if both bounds and alignment are violated
+ // we want the error to be about the bounds.
+ if let Some(align) = align {
+ if M::force_int_for_alignment_check(self) {
+ // `force_int_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
+ check_offset_align(ptr.addr().bytes(), align)?;
+ } else {
+ // Check allocation alignment and offset alignment.
+ if alloc_align.bytes() < align.bytes() {
+ throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
+ }
+ check_offset_align(offset.bytes(), align)?;
+ }
+ }
+
+ // We can still be zero-sized in this branch, in which case we have to
+ // return `None`.
+ if size.bytes() == 0 { None } else { Some(ret_val) }
+ }
+ })
+ }
+}
+
+/// Allocation accessors
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Helper function to obtain a global (tcx) allocation.
+ /// This attempts to return a reference to an existing allocation if
+ /// one can be found in `tcx`. That, however, is only possible if `tcx` and
+ /// this machine use the same pointer provenance, so it is indirected through
+ /// `M::adjust_allocation`.
+ fn get_global_alloc(
+ &self,
+ id: AllocId,
+ is_write: bool,
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
+ let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Memory(mem)) => {
+ // Memory of a constant or promoted or anonymous memory referenced by a static.
+ (mem, None)
+ }
+ Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
+ Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
+ None => throw_ub!(PointerUseAfterFree(id)),
+ Some(GlobalAlloc::Static(def_id)) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ // Notice that every static has two `AllocId` that will resolve to the same
+ // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
+ // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
+ // `eval_static_initializer` and it is the "resolved" ID.
+ // The resolved ID is never used by the interpreted program, it is hidden.
+ // This is relied upon for soundness of const-patterns; a pointer to the resolved
+ // ID would "sidestep" the checks that make sure consts do not point to statics!
+ // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
+ // contains a reference to memory that was created during its evaluation (i.e., not
+ // to another static), those inner references only exist in "resolved" form.
+ if self.tcx.is_foreign_item(def_id) {
+ // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
+ // referencing arbitrary (declared) extern statics.
+ throw_unsup!(ReadExternStatic(def_id));
+ }
+
+ // Use a precise span for better cycle errors.
+ (self.tcx.at(self.cur_span()).eval_static_initializer(def_id)?, Some(def_id))
+ }
+ };
+ M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
+ // We got tcx memory. Let the machine initialize its "extra" stuff.
+ M::adjust_allocation(
+ self,
+ id, // always use the ID we got as input, not the "hidden" one.
+ Cow::Borrowed(alloc.inner()),
+ M::GLOBAL_KIND.map(MemoryKind::Machine),
+ )
+ }
+
+ /// Gives raw access to the `Allocation`, without bounds or alignment checks.
+ /// The caller is responsible for calling the access hooks!
+ fn get_alloc_raw(
+ &self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
+ // The error type of the inner closure here is somewhat funny. We have two
+ // ways of "erroring": An actual error, or because we got a reference from
+ // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
+ // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
+ let a = self.memory.alloc_map.get_or(id, || {
+ let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
+ match alloc {
+ Cow::Borrowed(alloc) => {
+ // We got a ref, cheaply return that as an "error" so that the
+ // map does not get mutated.
+ Err(Ok(alloc))
+ }
+ Cow::Owned(alloc) => {
+ // Need to put it into the map and return a ref to that
+ let kind = M::GLOBAL_KIND.expect(
+ "I got a global allocation that I have to copy but the machine does \
+ not expect that to happen",
+ );
+ Ok((MemoryKind::Machine(kind), alloc))
+ }
+ }
+ });
+ // Now unpack that funny error type
+ match a {
+ Ok(a) => Ok(&a.1),
+ Err(a) => a,
+ }
+ }
+
+ /// "Safe" (bounds and align-checked) allocation access.
+ pub fn get_ptr_alloc<'a>(
+ &'a self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+ let align = M::enforce_alignment(self).then_some(align);
+ let ptr_and_alloc = self.check_and_deref_ptr(
+ ptr,
+ size,
+ align,
+ CheckInAllocMsg::MemoryAccessTest,
+ |alloc_id, offset, prov| {
+ let alloc = self.get_alloc_raw(alloc_id)?;
+ Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
+ },
+ )?;
+ if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
+ let range = alloc_range(offset, size);
+ M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
+ Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
+ } else {
+ // Even in this branch we have to be sure that we actually access the allocation, in
+ // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
+ // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
+ // always called when `ptr` has an `AllocId`.
+ Ok(None)
+ }
+ }
+
+ /// Return the `extra` field of the given allocation.
+ pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
+ Ok(&self.get_alloc_raw(id)?.extra)
+ }
+
+ /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
+ /// The caller is responsible for calling the access hooks!
+ ///
+ /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
+ /// allocation.
+ fn get_alloc_raw_mut(
+ &mut self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
+ // We have "NLL problem case #3" here, which cannot be worked around without loss of
+ // efficiency even for the common case where the key is in the map.
+ // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
+ // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
+ if self.memory.alloc_map.get_mut(id).is_none() {
+ // Slow path.
+ // Allocation not found locally, go look global.
+ let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
+ let kind = M::GLOBAL_KIND.expect(
+ "I got a global allocation that I have to copy but the machine does \
+ not expect that to happen",
+ );
+ self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
+ }
+
+ let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
+ if alloc.mutability == Mutability::Not {
+ throw_ub!(WriteToReadOnly(id))
+ }
+ Ok((alloc, &mut self.machine))
+ }
+
+ /// "Safe" (bounds and align-checked) allocation access.
+ pub fn get_ptr_alloc_mut<'a>(
+ &'a mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+ let parts = self.get_ptr_access(ptr, size, align)?;
+ if let Some((alloc_id, offset, prov)) = parts {
+ let tcx = *self.tcx;
+ // FIXME: can we somehow avoid looking up the allocation twice here?
+ // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
+ let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
+ let range = alloc_range(offset, size);
+ M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
+ Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Return the `extra` field of the given allocation.
+ pub fn get_alloc_extra_mut<'a>(
+ &'a mut self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
+ let (alloc, machine) = self.get_alloc_raw_mut(id)?;
+ Ok((&mut alloc.extra, machine))
+ }
+
+ /// Obtain the size and alignment of an allocation, even if that allocation has
+ /// been deallocated.
+ pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) {
+ // # Regular allocations
+ // Don't use `self.get_raw` here as that will
+ // a) cause cycles in case `id` refers to a static
+ // b) duplicate a global's allocation in miri
+ if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
+ return (alloc.size(), alloc.align, AllocKind::LiveData);
+ }
+
+ // # Function pointers
+ // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
+ if self.get_fn_alloc(id).is_some() {
+ return (Size::ZERO, Align::ONE, AllocKind::Function);
+ }
+
+ // # Statics
+ // Can't do this in the match argument, we may get cycle errors since the lock would
+ // be held throughout the match.
+ match self.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Static(def_id)) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ // Use size and align of the type.
+ let ty = self.tcx.type_of(def_id);
+ let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
+ assert!(!layout.is_unsized());
+ (layout.size, layout.align.abi, AllocKind::LiveData)
+ }
+ Some(GlobalAlloc::Memory(alloc)) => {
+ // Need to duplicate the logic here, because the global allocations have
+ // different associated types than the interpreter-local ones.
+ let alloc = alloc.inner();
+ (alloc.size(), alloc.align, AllocKind::LiveData)
+ }
+ Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
+ Some(GlobalAlloc::VTable(..)) => {
+ // No data to be accessed here. But vtables are pointer-aligned.
+ return (Size::ZERO, self.tcx.data_layout.pointer_align.abi, AllocKind::VTable);
+ }
+ // The rest must be dead.
+ None => {
+ // Deallocated pointers are allowed, we should be able to find
+ // them in the map.
+ let (size, align) = *self
+ .memory
+ .dead_alloc_map
+ .get(&id)
+ .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
+ (size, align, AllocKind::Dead)
+ }
+ }
+ }
+
+ /// Obtain the size and alignment of a live allocation.
+ pub fn get_live_alloc_size_and_align(&self, id: AllocId) -> InterpResult<'tcx, (Size, Align)> {
+ let (size, align, kind) = self.get_alloc_info(id);
+ if matches!(kind, AllocKind::Dead) {
+ throw_ub!(PointerUseAfterFree(id))
+ }
+ Ok((size, align))
+ }
+
+ fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
+ if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
+ Some(FnVal::Other(*extra))
+ } else {
+ match self.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
+ _ => None,
+ }
+ }
+ }
+
+ pub fn get_ptr_fn(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+ trace!("get_ptr_fn({:?})", ptr);
+ let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
+ if offset.bytes() != 0 {
+ throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
+ }
+ self.get_fn_alloc(alloc_id)
+ .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
+ }
+
+ pub fn get_ptr_vtable(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)> {
+ trace!("get_ptr_vtable({:?})", ptr);
+ let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
+ if offset.bytes() != 0 {
+ throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
+ }
+ match self.tcx.try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::VTable(ty, trait_ref)) => Ok((ty, trait_ref)),
+ _ => throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset))),
+ }
+ }
+
+ pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
+ self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
+ Ok(())
+ }
+
+ /// Create a lazy debug printer that prints the given allocation and all allocations it points
+ /// to, recursively.
+ #[must_use]
+ pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+ self.dump_allocs(vec![id])
+ }
+
+ /// Create a lazy debug printer for a list of allocations and all allocations they point to,
+ /// recursively.
+ #[must_use]
+ pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+ allocs.sort();
+ allocs.dedup();
+ DumpAllocs { ecx: self, allocs }
+ }
+
+ /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
+ /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
+ pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
+ // Collect the set of allocations that are *reachable* from `Global` allocations.
+ let reachable = {
+ let mut reachable = FxHashSet::default();
+ let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
+ let mut todo: Vec<_> =
+ self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
+ if Some(kind) == global_kind { Some(id) } else { None }
+ });
+ todo.extend(static_roots);
+ while let Some(id) = todo.pop() {
+ if reachable.insert(id) {
+ // This is a new allocation, add its relocations to `todo`.
+ if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
+ todo.extend(
+ alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()),
+ );
+ }
+ }
+ }
+ reachable
+ };
+
+ // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
+ let leaks: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
+ if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
+ });
+ let n = leaks.len();
+ if n > 0 {
+ eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
+ }
+ n
+ }
+}
+
+#[doc(hidden)]
+/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
+pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ ecx: &'a InterpCx<'mir, 'tcx, M>,
+ allocs: Vec<AllocId>,
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Cannot be a closure because it is generic in `Prov`, `Extra`.
+ fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
+ fmt: &mut std::fmt::Formatter<'_>,
+ tcx: TyCtxt<'tcx>,
+ allocs_to_print: &mut VecDeque<AllocId>,
+ alloc: &Allocation<Prov, Extra>,
+ ) -> std::fmt::Result {
+ for alloc_id in alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()) {
+ allocs_to_print.push_back(alloc_id);
+ }
+ write!(fmt, "{}", display_allocation(tcx, alloc))
+ }
+
+ let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
+ // `allocs_printed` contains all allocations that we have already printed.
+ let mut allocs_printed = FxHashSet::default();
+
+ while let Some(id) = allocs_to_print.pop_front() {
+ if !allocs_printed.insert(id) {
+ // Already printed, so skip this.
+ continue;
+ }
+
+ write!(fmt, "{id:?}")?;
+ match self.ecx.memory.alloc_map.get(id) {
+ Some(&(kind, ref alloc)) => {
+ // normal alloc
+ write!(fmt, " ({}, ", kind)?;
+ write_allocation_track_relocs(
+ &mut *fmt,
+ *self.ecx.tcx,
+ &mut allocs_to_print,
+ alloc,
+ )?;
+ }
+ None => {
+ // global alloc
+ match self.ecx.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Memory(alloc)) => {
+ write!(fmt, " (unchanged global, ")?;
+ write_allocation_track_relocs(
+ &mut *fmt,
+ *self.ecx.tcx,
+ &mut allocs_to_print,
+ alloc.inner(),
+ )?;
+ }
+ Some(GlobalAlloc::Function(func)) => {
+ write!(fmt, " (fn: {func})")?;
+ }
+ Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
+ write!(fmt, " (vtable: impl {trait_ref} for {ty})")?;
+ }
+ Some(GlobalAlloc::VTable(ty, None)) => {
+ write!(fmt, " (vtable: impl <auto trait> for {ty})")?;
+ }
+ Some(GlobalAlloc::Static(did)) => {
+ write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
+ }
+ None => {
+ write!(fmt, " (deallocated)")?;
+ }
+ }
+ }
+ }
+ writeln!(fmt)?;
+ }
+ Ok(())
+ }
+}
+
+/// Reading and writing.
+impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn write_scalar(
+ &mut self,
+ range: AllocRange,
+ val: ScalarMaybeUninit<Prov>,
+ ) -> InterpResult<'tcx> {
+ let range = self.range.subrange(range);
+ debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
+ Ok(self
+ .alloc
+ .write_scalar(&self.tcx, range, val)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?)
+ }
+
+ /// `offset` is relative to this allocation reference, not the base of the allocation.
+ pub fn write_ptr_sized(
+ &mut self,
+ offset: Size,
+ val: ScalarMaybeUninit<Prov>,
+ ) -> InterpResult<'tcx> {
+ self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
+ }
+
+ /// Mark the entire referenced range as uninitalized
+ pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
+ Ok(self
+ .alloc
+ .write_uninit(&self.tcx, self.range)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?)
+ }
+}
+
+impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn read_scalar(
+ &self,
+ range: AllocRange,
+ read_provenance: bool,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ let range = self.range.subrange(range);
+ let res = self
+ .alloc
+ .read_scalar(&self.tcx, range, read_provenance)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?;
+ debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
+ Ok(res)
+ }
+
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ self.read_scalar(range, /*read_provenance*/ false)
+ }
+
+ /// `offset` is relative to this allocation reference, not the base of the allocation.
+ pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ self.read_scalar(
+ alloc_range(offset, self.tcx.data_layout().pointer_size),
+ /*read_provenance*/ true,
+ )
+ }
+
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn check_bytes(
+ &self,
+ range: AllocRange,
+ allow_uninit: bool,
+ allow_ptr: bool,
+ ) -> InterpResult<'tcx> {
+ Ok(self
+ .alloc
+ .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit, allow_ptr)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?)
+ }
+
+ /// Returns whether the allocation has relocations for the entire range of the `AllocRef`.
+ pub(crate) fn has_relocations(&self) -> bool {
+ self.alloc.has_relocations(&self.tcx, self.range)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Reads the given number of bytes from memory. Returns them as a slice.
+ ///
+ /// Performs appropriate bounds checks.
+ pub fn read_bytes_ptr(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ ) -> InterpResult<'tcx, &[u8]> {
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ // zero-sized access
+ return Ok(&[]);
+ };
+ // Side-step AllocRef and directly access the underlying bytes more efficiently.
+ // (We are staying inside the bounds here so all is good.)
+ Ok(alloc_ref
+ .alloc
+ .get_bytes(&alloc_ref.tcx, alloc_ref.range)
+ .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
+ }
+
+ /// Writes the given stream of bytes into memory.
+ ///
+ /// Performs appropriate bounds checks.
+ pub fn write_bytes_ptr(
+ &mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ src: impl IntoIterator<Item = u8>,
+ ) -> InterpResult<'tcx> {
+ let mut src = src.into_iter();
+ let (lower, upper) = src.size_hint();
+ let len = upper.expect("can only write bounded iterators");
+ assert_eq!(lower, len, "can only write iterators with a precise length");
+
+ let size = Size::from_bytes(len);
+ let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
+ // zero-sized access
+ assert_matches!(
+ src.next(),
+ None,
+ "iterator said it was empty but returned an element"
+ );
+ return Ok(());
+ };
+
+ // Side-step AllocRef and directly access the underlying bytes more efficiently.
+ // (We are staying inside the bounds here so all is good.)
+ let alloc_id = alloc_ref.alloc_id;
+ let bytes = alloc_ref
+ .alloc
+ .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
+ .map_err(move |e| e.to_interp_error(alloc_id))?;
+ // `zip` would stop when the first iterator ends; we want to definitely
+ // cover all of `bytes`.
+ for dest in bytes {
+ *dest = src.next().expect("iterator was shorter than it said it would be");
+ }
+ assert_matches!(src.next(), None, "iterator was longer than it said it would be");
+ Ok(())
+ }
+
+ pub fn mem_copy(
+ &mut self,
+ src: Pointer<Option<M::Provenance>>,
+ src_align: Align,
+ dest: Pointer<Option<M::Provenance>>,
+ dest_align: Align,
+ size: Size,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
+ }
+
+ pub fn mem_copy_repeatedly(
+ &mut self,
+ src: Pointer<Option<M::Provenance>>,
+ src_align: Align,
+ dest: Pointer<Option<M::Provenance>>,
+ dest_align: Align,
+ size: Size,
+ num_copies: u64,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ let tcx = self.tcx;
+ // We need to do our own bounds-checks.
+ let src_parts = self.get_ptr_access(src, size, src_align)?;
+ let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
+
+ // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
+ // and once below to get the underlying `&[mut] Allocation`.
+
+ // Source alloc preparations and access hooks.
+ let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
+ // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+ return Ok(());
+ };
+ let src_alloc = self.get_alloc_raw(src_alloc_id)?;
+ let src_range = alloc_range(src_offset, size);
+ M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_prov), src_range)?;
+ // We need the `dest` ptr for the next operation, so we get it now.
+ // We already did the source checks and called the hooks so we are good to return early.
+ let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
+ // Zero-sized *destination*.
+ return Ok(());
+ };
+
+ // This checks relocation edges on the src, which needs to happen before
+ // `prepare_relocation_copy`.
+ let src_bytes = src_alloc
+ .get_bytes_with_uninit_and_ptr(&tcx, src_range)
+ .map_err(|e| e.to_interp_error(src_alloc_id))?
+ .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
+ // first copy the relocations to a temporary buffer, because
+ // `get_bytes_mut` will clear the relocations, which is correct,
+ // since we don't want to keep any relocations at the target.
+ let relocations =
+ src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
+ // Prepare a copy of the initialization mask.
+ let compressed = src_alloc.compress_uninit_range(src_range);
+
+ // Destination alloc preparations and access hooks.
+ let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
+ let dest_range = alloc_range(dest_offset, size * num_copies);
+ M::memory_written(
+ *tcx,
+ extra,
+ &mut dest_alloc.extra,
+ (dest_alloc_id, dest_prov),
+ dest_range,
+ )?;
+ let dest_bytes = dest_alloc
+ .get_bytes_mut_ptr(&tcx, dest_range)
+ .map_err(|e| e.to_interp_error(dest_alloc_id))?
+ .as_mut_ptr();
+
+ if compressed.no_bytes_init() {
+ // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
+ // is marked as uninitialized but we otherwise omit changing the byte representation which may
+ // be arbitrary for uninitialized bytes.
+ // This also avoids writing to the target bytes so that the backing allocation is never
+ // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
+ // operating system this can avoid physically allocating the page.
+ dest_alloc
+ .write_uninit(&tcx, dest_range)
+ .map_err(|e| e.to_interp_error(dest_alloc_id))?;
+ // We can forget about the relocations, this is all not initialized anyway.
+ return Ok(());
+ }
+
+ // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
+ // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
+ // `dest` could possibly overlap.
+ // The pointers above remain valid even if the `HashMap` table is moved around because they
+ // point into the `Vec` storing the bytes.
+ unsafe {
+ if src_alloc_id == dest_alloc_id {
+ if nonoverlapping {
+ // `Size` additions
+ if (src_offset <= dest_offset && src_offset + size > dest_offset)
+ || (dest_offset <= src_offset && dest_offset + size > src_offset)
+ {
+ throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
+ }
+ }
+
+ for i in 0..num_copies {
+ ptr::copy(
+ src_bytes,
+ dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+ size.bytes_usize(),
+ );
+ }
+ } else {
+ for i in 0..num_copies {
+ ptr::copy_nonoverlapping(
+ src_bytes,
+ dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+ size.bytes_usize(),
+ );
+ }
+ }
+ }
+
+ // now fill in all the "init" data
+ dest_alloc.mark_compressed_init_range(
+ &compressed,
+ alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
+ num_copies,
+ );
+ // copy the relocations to the destination
+ dest_alloc.mark_relocation_range(relocations);
+
+ Ok(())
+ }
+}
+
+/// Machine pointer introspection.
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Test if this value might be null.
+ /// If the machine does not support ptr-to-int casts, this is conservative.
+ pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
+ Ok(match scalar.try_to_int() {
+ Ok(int) => int.is_null(),
+ Err(_) => {
+ // Can only happen during CTFE.
+ let ptr = scalar.to_pointer(self)?;
+ match self.ptr_try_get_alloc_id(ptr) {
+ Ok((alloc_id, offset, _)) => {
+ let (size, _align, _kind) = self.get_alloc_info(alloc_id);
+ // If the pointer is out-of-bounds, it may be null.
+ // Note that one-past-the-end (offset == size) is still inbounds, and never null.
+ offset > size
+ }
+ Err(_offset) => bug!("a non-int scalar is always a pointer"),
+ }
+ }
+ })
+ }
+
+ /// Turning a "maybe pointer" into a proper pointer (and some information
+ /// about where it points), or an absolute address.
+ pub fn ptr_try_get_alloc_id(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
+ match ptr.into_pointer_or_addr() {
+ Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
+ Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
+ None => {
+ assert!(M::Provenance::OFFSET_IS_ADDR);
+ let (_, addr) = ptr.into_parts();
+ Err(addr.bytes())
+ }
+ },
+ Err(addr) => Err(addr.bytes()),
+ }
+ }
+
+ /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
+ #[inline(always)]
+ pub fn ptr_get_alloc_id(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
+ self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
+ err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
+ })
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
new file mode 100644
index 000000000..2e356f67b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -0,0 +1,34 @@
+//! An interpreter for MIR used in CTFE and by miri
+
+mod cast;
+mod eval_context;
+mod intern;
+mod intrinsics;
+mod machine;
+mod memory;
+mod operand;
+mod operator;
+mod place;
+mod projection;
+mod step;
+mod terminator;
+mod traits;
+mod util;
+mod validity;
+mod visitor;
+
+pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
+
+pub use self::eval_context::{
+ Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup, StackPopUnwind,
+};
+pub use self::intern::{intern_const_alloc_recursive, InternKind};
+pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
+pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::validity::{CtfeValidationMode, RefTracking};
+pub use self::visitor::{MutValueVisitor, Value, ValueVisitor};
+
+pub(crate) use self::intrinsics::eval_nullary_intrinsic;
+use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
new file mode 100644
index 000000000..94ba62c16
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -0,0 +1,831 @@
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::fmt::Write;
+
+use rustc_hir::def::Namespace;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
+use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{
+ alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
+ InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
+ Provenance, Scalar, ScalarMaybeUninit,
+};
+
+/// An `Immediate` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and wide pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Immediate`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, Debug)]
+pub enum Immediate<Prov: Provenance = AllocId> {
+ /// A single scalar value (must have *initialized* `Scalar` ABI).
+ /// FIXME: we also currently often use this for ZST.
+ /// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
+ Scalar(ScalarMaybeUninit<Prov>),
+ /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
+ /// `Scalar::Initialized`).
+ ScalarPair(ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>),
+ /// A value of fully uninitialized memory. Can have and size and layout.
+ Uninit,
+}
+
+impl<Prov: Provenance> From<ScalarMaybeUninit<Prov>> for Immediate<Prov> {
+ #[inline(always)]
+ fn from(val: ScalarMaybeUninit<Prov>) -> Self {
+ Immediate::Scalar(val)
+ }
+}
+
+impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
+ #[inline(always)]
+ fn from(val: Scalar<Prov>) -> Self {
+ Immediate::Scalar(val.into())
+ }
+}
+
+impl<'tcx, Prov: Provenance> Immediate<Prov> {
+ pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
+ }
+
+ pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
+ }
+
+ pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
+ Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
+ }
+
+ pub fn new_dyn_trait(
+ val: Scalar<Prov>,
+ vtable: Pointer<Option<Prov>>,
+ cx: &impl HasDataLayout,
+ ) -> Self {
+ Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Prov> {
+ match self {
+ Immediate::Scalar(val) => val,
+ Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
+ Immediate::Uninit => ScalarMaybeUninit::Uninit,
+ }
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Prov>> {
+ self.to_scalar_or_uninit().check_init()
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>) {
+ match self {
+ Immediate::ScalarPair(val1, val2) => (val1, val2),
+ Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
+ Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit),
+ }
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Prov>, Scalar<Prov>)> {
+ let (val1, val2) = self.to_scalar_or_uninit_pair();
+ Ok((val1.check_init()?, val2.check_init()?))
+ }
+}
+
+// ScalarPair needs a type to interpret, so we often have an immediate and a type together
+// as input for binary and cast operations.
+#[derive(Clone, Debug)]
+pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
+ imm: Immediate<Prov>,
+ pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// Helper function for printing a scalar to a FmtPrinter
+ fn p<'a, 'tcx, Prov: Provenance>(
+ cx: FmtPrinter<'a, 'tcx>,
+ s: ScalarMaybeUninit<Prov>,
+ ty: Ty<'tcx>,
+ ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
+ match s {
+ ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
+ cx.pretty_print_const_scalar_int(int, ty, true)
+ }
+ ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
+ // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
+ // print what is points to, which would fail since it has no access to the local
+ // memory.
+ cx.pretty_print_const_pointer(ptr, ty, true)
+ }
+ ScalarMaybeUninit::Uninit => cx.typed_value(
+ |mut this| {
+ this.write_str("uninit ")?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ " ",
+ ),
+ }
+ }
+ ty::tls::with(|tcx| {
+ match self.imm {
+ Immediate::Scalar(s) => {
+ if let Some(ty) = tcx.lift(self.layout.ty) {
+ let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ f.write_str(&p(cx, s, ty)?.into_buffer())?;
+ return Ok(());
+ }
+ write!(f, "{:x}: {}", s, self.layout.ty)
+ }
+ Immediate::ScalarPair(a, b) => {
+ // FIXME(oli-obk): at least print tuples and slices nicely
+ write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
+ }
+ Immediate::Uninit => {
+ write!(f, "uninit: {}", self.layout.ty)
+ }
+ }
+ })
+ }
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
+ type Target = Immediate<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &Immediate<Prov> {
+ &self.imm
+ }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug)]
+pub enum Operand<Prov: Provenance = AllocId> {
+ Immediate(Immediate<Prov>),
+ Indirect(MemPlace<Prov>),
+}
+
+#[derive(Clone, Debug)]
+pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
+ op: Operand<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for an `OpTy`!
+ /// `None` means "alignment does not matter since this is a by-value operand"
+ /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
+ /// Also CTFE ignores alignment anyway, so this is for Miri only.
+ pub align: Option<Align>,
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
+ type Target = Operand<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &Operand<Prov> {
+ &self.op
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(val: ImmTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+ }
+}
+
+impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
+ #[inline]
+ pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ ImmTy { imm: val.into(), layout }
+ }
+
+ #[inline]
+ pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ ImmTy { imm, layout }
+ }
+
+ #[inline]
+ pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
+ ImmTy { imm: Immediate::Uninit, layout }
+ }
+
+ #[inline]
+ pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+ Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
+ }
+ #[inline]
+ pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
+ Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
+ }
+
+ #[inline]
+ pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+ Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
+ }
+
+ #[inline]
+ pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
+ Self::from_scalar(Scalar::from_int(i, layout.size), layout)
+ }
+
+ #[inline]
+ pub fn to_const_int(self) -> ConstInt {
+ assert!(self.layout.ty.is_integral());
+ let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
+ ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
+ }
+}
+
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+ pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ if self.layout.is_unsized() {
+ // There are no unsized immediates.
+ self.assert_mem_place().len(cx)
+ } else {
+ match self.layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+ }
+ }
+ }
+
+ pub fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ match self.try_as_mplace() {
+ Ok(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+ Err(imm) => {
+ assert!(
+ matches!(*imm, Immediate::Uninit),
+ "Scalar/ScalarPair cannot be offset into"
+ );
+ assert!(!meta.has_meta()); // no place to store metadata here
+ // Every part of an uninit is uninit.
+ Ok(ImmTy::uninit(layout).into())
+ }
+ }
+ }
+
+ pub fn offset(
+ &self,
+ offset: Size,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert!(!layout.is_unsized());
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
+ /// Returns `None` if the layout does not permit loading this as a value.
+ ///
+ /// This is an internal function; call `read_immediate` instead.
+ fn read_immediate_from_mplace_raw(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ force: bool,
+ ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
+ if mplace.layout.is_unsized() {
+ // Don't touch unsized
+ return Ok(None);
+ }
+
+ let Some(alloc) = self.get_place_alloc(mplace)? else {
+ // zero-sized type can be left uninit
+ return Ok(Some(ImmTy::uninit(mplace.layout)));
+ };
+
+ // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
+ // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
+ // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
+ // case where some of the bytes are initialized and others are not. So, we need an extra
+ // check that walks over the type of `mplace` to make sure it is truly correct to treat this
+ // like a `Scalar` (or `ScalarPair`).
+ let scalar_layout = match mplace.layout.abi {
+ // `if` does not work nested inside patterns, making this a bit awkward to express.
+ Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
+ Abi::Scalar(s) if force => Some(s.primitive()),
+ _ => None,
+ };
+ if let Some(s) = scalar_layout {
+ let size = s.size(self);
+ assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
+ let scalar = alloc
+ .read_scalar(alloc_range(Size::ZERO, size), /*read_provenance*/ s.is_ptr())?;
+ return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
+ }
+ let scalar_pair_layout = match mplace.layout.abi {
+ Abi::ScalarPair(
+ abi::Scalar::Initialized { value: a, .. },
+ abi::Scalar::Initialized { value: b, .. },
+ ) => Some((a, b)),
+ Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
+ _ => None,
+ };
+ if let Some((a, b)) = scalar_pair_layout {
+ // We checked `ptr_align` above, so all fields will have the alignment they need.
+ // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+ // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+ let (a_size, b_size) = (a.size(self), b.size(self));
+ let b_offset = a_size.align_to(b.align(self).abi);
+ assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+ let a_val = alloc.read_scalar(
+ alloc_range(Size::ZERO, a_size),
+ /*read_provenance*/ a.is_ptr(),
+ )?;
+ let b_val = alloc
+ .read_scalar(alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr())?;
+ return Ok(Some(ImmTy {
+ imm: Immediate::ScalarPair(a_val, b_val),
+ layout: mplace.layout,
+ }));
+ }
+ // Neither a scalar nor scalar pair.
+ return Ok(None);
+ }
+
+ /// Try returning an immediate for the operand. If the layout does not permit loading this as an
+ /// immediate, return where in memory we can find the data.
+ /// Note that for a given layout, this operation will either always fail or always
+ /// succeed! Whether it succeeds depends on whether the layout can be represented
+ /// in an `Immediate`, not on which data is stored there currently.
+ ///
+ /// If `force` is `true`, then even scalars with fields that can be ununit will be
+ /// read. This means the load is lossy and should not be written back!
+ /// This flag exists only for validity checking.
+ ///
+ /// This is an internal function that should not usually be used; call `read_immediate` instead.
+ /// ConstProp needs it, though.
+ pub fn read_immediate_raw(
+ &self,
+ src: &OpTy<'tcx, M::Provenance>,
+ force: bool,
+ ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
+ Ok(match src.try_as_mplace() {
+ Ok(ref mplace) => {
+ if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
+ Ok(val)
+ } else {
+ Err(*mplace)
+ }
+ }
+ Err(val) => Ok(val),
+ })
+ }
+
+ /// Read an immediate from a place, asserting that that is possible with the given layout.
+ #[inline(always)]
+ pub fn read_immediate(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
+ Ok(imm)
+ } else {
+ span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
+ }
+ }
+
+ /// Read a scalar from a place
+ pub fn read_scalar(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
+ Ok(self.read_immediate(op)?.to_scalar_or_uninit())
+ }
+
+ /// Read a pointer from a place.
+ pub fn read_pointer(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+ self.read_scalar(op)?.to_pointer(self)
+ }
+
+ /// Turn the wide MPlace into a string (must already be dereferenced!)
+ pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
+ let len = mplace.len(self)?;
+ let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
+ let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
+ Ok(str)
+ }
+
+ /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
+ /// Also returns the number of elements.
+ ///
+ /// Can (but does not always) trigger UB if `op` is uninitialized.
+ pub fn operand_to_simd(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+ // Basically we just transmute this place into an array following simd_size_and_type.
+ // This only works in memory, but repr(simd) types should never be immediates anyway.
+ assert!(op.layout.ty.is_simd());
+ match op.try_as_mplace() {
+ Ok(mplace) => self.mplace_to_simd(&mplace),
+ Err(imm) => match *imm {
+ Immediate::Uninit => {
+ throw_ub!(InvalidUninitBytes(None))
+ }
+ Immediate::Scalar(..) | Immediate::ScalarPair(..) => {
+ bug!("arrays/slices can never have Scalar/ScalarPair layout")
+ }
+ },
+ }
+ }
+
+ /// Read from a local. Will not actually access the local if reading from a ZST.
+ /// Will not access memory, instead an indirect `Operand` is returned.
+ ///
+ /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
+ /// OpTy from a local.
+ pub fn local_to_op(
+ &self,
+ frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+ local: mir::Local,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let layout = self.layout_of_local(frame, local, layout)?;
+ let op = if layout.is_zst() {
+ // Bypass `access_local` (helps in ConstProp)
+ Operand::Immediate(Immediate::Uninit)
+ } else {
+ *M::access_local(frame, local)?
+ };
+ Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ }
+
+ /// Every place can be read from, so we can turn them into an operand.
+ /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
+ /// will never actually read from memory.
+ #[inline(always)]
+ pub fn place_to_op(
+ &self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let op = match **place {
+ Place::Ptr(mplace) => Operand::Indirect(mplace),
+ Place::Local { frame, local } => {
+ *self.local_to_op(&self.stack()[frame], local, None)?
+ }
+ };
+ Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
+ }
+
+ /// Evaluate a place with the goal of reading from it. This lets us sometimes
+ /// avoid allocations.
+ pub fn eval_place_to_op(
+ &self,
+ mir_place: mir::Place<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Do not use the layout passed in as argument if the base we are looking at
+ // here is not the entire place.
+ let layout = if mir_place.projection.is_empty() { layout } else { None };
+
+ let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
+ // Using `try_fold` turned out to be bad for performance, hence the loop.
+ for elem in mir_place.projection.iter() {
+ op = self.operand_projection(&op, elem)?
+ }
+
+ trace!("eval_place_to_op: got {:?}", *op);
+ // Sanity-check the type we ended up with.
+ debug_assert!(
+ mir_assign_valid_types(
+ *self.tcx,
+ self.param_env,
+ self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
+ )?)?,
+ op.layout,
+ ),
+ "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ op.layout.ty,
+ );
+ Ok(op)
+ }
+
+ /// Evaluate the operand, returning a place where you can then find the data.
+ /// If you already know the layout, you can save two table lookups
+ /// by passing it in here.
+ #[inline]
+ pub fn eval_operand(
+ &self,
+ mir_op: &mir::Operand<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ use rustc_middle::mir::Operand::*;
+ let op = match *mir_op {
+ // FIXME: do some more logic on `move` to invalidate the old location
+ Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
+
+ Constant(ref constant) => {
+ let val =
+ self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
+
+ // This can still fail:
+ // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
+ // checked yet.
+ // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
+ self.mir_const_to_op(&val, layout)?
+ }
+ };
+ trace!("{:?}: {:?}", mir_op, *op);
+ Ok(op)
+ }
+
+ /// Evaluate a bunch of operands at once
+ pub(super) fn eval_operands(
+ &self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
+ ops.iter().map(|op| self.eval_operand(op, None)).collect()
+ }
+
+ // Used when the miri-engine runs into a constant and for extracting information from constants
+ // in patterns via the `const_eval` module
+ /// The `val` and `layout` are assumed to already be in our interpreter
+ /// "universe" (param_env).
+ pub fn const_to_op(
+ &self,
+ c: ty::Const<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ match c.kind() {
+ ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
+ throw_inval!(AlreadyReported(reported))
+ }
+ ty::ConstKind::Unevaluated(uv) => {
+ let instance = self.resolve(uv.def, uv.substs)?;
+ Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
+ }
+ ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
+ span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c)
+ }
+ ty::ConstKind::Value(valtree) => {
+ let ty = c.ty();
+ let const_val = self.tcx.valtree_to_const_val((ty, valtree));
+ self.const_val_to_op(const_val, ty, layout)
+ }
+ }
+ }
+
+ pub fn mir_const_to_op(
+ &self,
+ val: &mir::ConstantKind<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ match val {
+ mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
+ mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
+ }
+ }
+
+ pub(crate) fn const_val_to_op(
+ &self,
+ val_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Other cases need layout.
+ let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
+ Ok(match scalar {
+ Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
+ Scalar::Int(int) => Scalar::Int(int),
+ })
+ };
+ let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
+ let op = match val_val {
+ ConstValue::ByRef { alloc, offset } => {
+ let id = self.tcx.create_memory_alloc(alloc);
+ // We rely on mutability being set correctly in that allocation to prevent writes
+ // where none should happen.
+ let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+ Operand::Indirect(MemPlace::from_ptr(ptr.into()))
+ }
+ ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
+ ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+ ConstValue::Slice { data, start, end } => {
+ // We rely on mutability being set correctly in `data` to prevent writes
+ // where none should happen.
+ let ptr = Pointer::new(
+ self.tcx.create_memory_alloc(data),
+ Size::from_bytes(start), // offset: `start`
+ );
+ Operand::Immediate(Immediate::new_slice(
+ Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
+ u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+ self,
+ ))
+ }
+ };
+ Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ }
+
+ /// Read discriminant, return the runtime value as well as the variant index.
+ /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
+ pub fn read_discriminant(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
+ trace!("read_discriminant_value {:#?}", op.layout);
+ // Get type and layout of the discriminant.
+ let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+ trace!("discriminant type: {:?}", discr_layout.ty);
+
+ // We use "discriminant" to refer to the value associated with a particular enum variant.
+ // This is not to be confused with its "variant index", which is just determining its position in the
+ // declared list of variants -- they can differ with explicitly assigned discriminants.
+ // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+ // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+ let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+ Variants::Single { index } => {
+ let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+ Some(discr) => {
+ // This type actually has discriminants.
+ assert_eq!(discr.ty, discr_layout.ty);
+ Scalar::from_uint(discr.val, discr_layout.size)
+ }
+ None => {
+ // On a type without actual discriminants, variant is 0.
+ assert_eq!(index.as_u32(), 0);
+ Scalar::from_uint(index.as_u32(), discr_layout.size)
+ }
+ };
+ return Ok((discr, index));
+ }
+ Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+ (tag, tag_encoding, tag_field)
+ }
+ };
+
+ // There are *three* layouts that come into play here:
+ // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+ // the `Scalar` we return.
+ // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+ // and used to interpret the value we read from the tag field.
+ // For the return value, a cast to `discr_layout` is performed.
+ // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+ // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+ // Get layout for tag.
+ let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
+
+ // Read tag and sanity-check `tag_layout`.
+ let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+ assert_eq!(tag_layout.size, tag_val.layout.size);
+ assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+ trace!("tag value: {}", tag_val);
+
+ // Figure out which discriminant and variant this corresponds to.
+ Ok(match *tag_encoding {
+ TagEncoding::Direct => {
+ let scalar = tag_val.to_scalar()?;
+ // Generate a specific error if `tag_val` is not an integer.
+ // (`tag_bits` itself is only used for error messages below.)
+ let tag_bits = scalar
+ .try_to_int()
+ .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+ .assert_bits(tag_layout.size);
+ // Cast bits from tag layout to discriminant layout.
+ // After the checks we did above, this cannot fail, as
+ // discriminants are int-like.
+ let discr_val =
+ self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
+ let discr_bits = discr_val.assert_bits(discr_layout.size);
+ // Convert discriminant to variant index, and catch invalid discriminants.
+ let index = match *op.layout.ty.kind() {
+ ty::Adt(adt, _) => {
+ adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+ }
+ ty::Generator(def_id, substs, _) => {
+ let substs = substs.as_generator();
+ substs
+ .discriminants(def_id, *self.tcx)
+ .find(|(_, var)| var.val == discr_bits)
+ }
+ _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+ }
+ .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+ // Return the cast value, and the index.
+ (discr_val, index.0)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ let tag_val = tag_val.to_scalar()?;
+ // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+ // discriminant (encoded in niche/tag) and variant index are the same.
+ let variants_start = niche_variants.start().as_u32();
+ let variants_end = niche_variants.end().as_u32();
+ let variant = match tag_val.try_to_int() {
+ Err(dbg_val) => {
+ // So this is a pointer then, and casting to an int failed.
+ // Can only happen during CTFE.
+ // The niche must be just 0, and the ptr not null, then we know this is
+ // okay. Everything else, we conservatively reject.
+ let ptr_valid = niche_start == 0
+ && variants_start == variants_end
+ && !self.scalar_may_be_null(tag_val)?;
+ if !ptr_valid {
+ throw_ub!(InvalidTag(dbg_val))
+ }
+ dataful_variant
+ }
+ Ok(tag_bits) => {
+ let tag_bits = tag_bits.assert_bits(tag_layout.size);
+ // We need to use machine arithmetic to get the relative variant idx:
+ // variant_index_relative = tag_val - niche_start_val
+ let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+ let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+ let variant_index_relative_val =
+ self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+ let variant_index_relative = variant_index_relative_val
+ .to_scalar()?
+ .assert_bits(tag_val.layout.size);
+ // Check if this is in the range that indicates an actual discriminant.
+ if variant_index_relative <= u128::from(variants_end - variants_start) {
+ let variant_index_relative = u32::try_from(variant_index_relative)
+ .expect("we checked that this fits into a u32");
+ // Then computing the absolute variant idx should not overflow any more.
+ let variant_index = variants_start
+ .checked_add(variant_index_relative)
+ .expect("overflow computing absolute variant idx");
+ let variants_len = op
+ .layout
+ .ty
+ .ty_adt_def()
+ .expect("tagged layout for non adt")
+ .variants()
+ .len();
+ assert!(usize::try_from(variant_index).unwrap() < variants_len);
+ VariantIdx::from_u32(variant_index)
+ } else {
+ dataful_variant
+ }
+ }
+ };
+ // Compute the size of the scalar we need to return.
+ // No need to cast, because the variant index directly serves as discriminant and is
+ // encoded in the tag.
+ (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+ }
+ })
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(Immediate, 56);
+ rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
+ rustc_data_structures::static_assert_size!(Operand, 64);
+ rustc_data_structures::static_assert_size!(OpTy<'_>, 88);
+}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
new file mode 100644
index 000000000..f9912d706
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -0,0 +1,463 @@
+use std::convert::TryFrom;
+
+use rustc_apfloat::Float;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_target::abi::Abi;
+
+use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Applies the binary operation `op` to the two operands and writes a tuple of the result
+ /// and a boolean signifying the potential overflow to the destination.
+ ///
+ /// `force_overflow_checks` indicates whether overflow checks should be done even when
+ /// `tcx.sess.overflow_checks()` is `false`.
+ pub fn binop_with_overflow(
+ &mut self,
+ op: mir::BinOp,
+ force_overflow_checks: bool,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
+ debug_assert_eq!(
+ self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
+ dest.layout.ty,
+ "type mismatch for result of {:?}",
+ op,
+ );
+ // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
+ // component when overflow checking is disabled.
+ let overflowed =
+ overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self));
+ // Write the result to `dest`.
+ if let Abi::ScalarPair(..) = dest.layout.abi {
+ // We can use the optimized path and avoid `place_field` (which might do
+ // `force_allocation`).
+ let pair = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
+ self.write_immediate(pair, dest)?;
+ } else {
+ assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
+ // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
+ // do a component-wise write here. This code path is slower than the above because
+ // `place_field` will have to `force_allocate` locals here.
+ let val_field = self.place_field(&dest, 0)?;
+ self.write_scalar(val, &val_field)?;
+ let overflowed_field = self.place_field(&dest, 1)?;
+ self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
+ }
+ Ok(())
+ }
+
+ /// Applies the binary operation `op` to the arguments and writes the result to the
+ /// destination.
+ pub fn binop_ignore_overflow(
+ &mut self,
+ op: mir::BinOp,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
+ assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+ self.write_scalar(val, dest)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ fn binary_char_op(
+ &self,
+ bin_op: mir::BinOp,
+ l: char,
+ r: char,
+ ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ use rustc_middle::mir::BinOp::*;
+
+ let res = match bin_op {
+ Eq => l == r,
+ Ne => l != r,
+ Lt => l < r,
+ Le => l <= r,
+ Gt => l > r,
+ Ge => l >= r,
+ _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
+ };
+ (Scalar::from_bool(res), false, self.tcx.types.bool)
+ }
+
+ fn binary_bool_op(
+ &self,
+ bin_op: mir::BinOp,
+ l: bool,
+ r: bool,
+ ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ use rustc_middle::mir::BinOp::*;
+
+ let res = match bin_op {
+ Eq => l == r,
+ Ne => l != r,
+ Lt => l < r,
+ Le => l <= r,
+ Gt => l > r,
+ Ge => l >= r,
+ BitAnd => l & r,
+ BitOr => l | r,
+ BitXor => l ^ r,
+ _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
+ };
+ (Scalar::from_bool(res), false, self.tcx.types.bool)
+ }
+
+ fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
+ &self,
+ bin_op: mir::BinOp,
+ ty: Ty<'tcx>,
+ l: F,
+ r: F,
+ ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ use rustc_middle::mir::BinOp::*;
+
+ let (val, ty) = match bin_op {
+ Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+ Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+ Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+ Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+ Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+ Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+ Add => ((l + r).value.into(), ty),
+ Sub => ((l - r).value.into(), ty),
+ Mul => ((l * r).value.into(), ty),
+ Div => ((l / r).value.into(), ty),
+ Rem => ((l % r).value.into(), ty),
+ _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
+ };
+ (val, false, ty)
+ }
+
+ fn binary_int_op(
+ &self,
+ bin_op: mir::BinOp,
+ // passing in raw bits
+ l: u128,
+ left_layout: TyAndLayout<'tcx>,
+ r: u128,
+ right_layout: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ use rustc_middle::mir::BinOp::*;
+
+ // Shift ops can have an RHS with a different numeric type.
+ if bin_op == Shl || bin_op == Shr {
+ let size = u128::from(left_layout.size.bits());
+ // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
+ // zero-extended form). This matches the codegen backend:
+ // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
+ // The overflow check is also ignorant to the sign:
+ // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
+ // This would behave rather strangely if we had integer types of size 256: a shift by
+ // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
+ // shift by -1i16 though would be considered overflowing. If we had integers of size
+ // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
+ // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
+ // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
+ // consistent results for the same value represented at different bit widths.
+ assert!(size <= 128);
+ let overflow = r >= size;
+ // The shift offset is implicitly masked to the type size, to make sure this operation
+ // is always defined. This is the one MIR operator that does *not* directly map to a
+ // single LLVM operation. See
+ // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
+ // for the corresponding truncation in our codegen backends.
+ let r = r % size;
+ let r = u32::try_from(r).unwrap(); // we masked so this will always fit
+ let result = if left_layout.abi.is_signed() {
+ let l = self.sign_extend(l, left_layout) as i128;
+ let result = match bin_op {
+ Shl => l.checked_shl(r).unwrap(),
+ Shr => l.checked_shr(r).unwrap(),
+ _ => bug!(),
+ };
+ result as u128
+ } else {
+ match bin_op {
+ Shl => l.checked_shl(r).unwrap(),
+ Shr => l.checked_shr(r).unwrap(),
+ _ => bug!(),
+ }
+ };
+ let truncated = self.truncate(result, left_layout);
+ return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+ }
+
+ // For the remaining ops, the types must be the same on both sides
+ if left_layout.ty != right_layout.ty {
+ span_bug!(
+ self.cur_span(),
+ "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+ bin_op,
+ l,
+ left_layout.ty,
+ r,
+ right_layout.ty,
+ )
+ }
+
+ let size = left_layout.size;
+
+ // Operations that need special treatment for signed integers
+ if left_layout.abi.is_signed() {
+ let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
+ Lt => Some(i128::lt),
+ Le => Some(i128::le),
+ Gt => Some(i128::gt),
+ Ge => Some(i128::ge),
+ _ => None,
+ };
+ if let Some(op) = op {
+ let l = self.sign_extend(l, left_layout) as i128;
+ let r = self.sign_extend(r, right_layout) as i128;
+ return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+ }
+ let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
+ Div if r == 0 => throw_ub!(DivisionByZero),
+ Rem if r == 0 => throw_ub!(RemainderByZero),
+ Div => Some(i128::overflowing_div),
+ Rem => Some(i128::overflowing_rem),
+ Add => Some(i128::overflowing_add),
+ Sub => Some(i128::overflowing_sub),
+ Mul => Some(i128::overflowing_mul),
+ _ => None,
+ };
+ if let Some(op) = op {
+ let l = self.sign_extend(l, left_layout) as i128;
+ let r = self.sign_extend(r, right_layout) as i128;
+
+ // We need a special check for overflowing Rem and Div since they are *UB*
+ // on overflow, which can happen with "int_min $OP -1".
+ if matches!(bin_op, Rem | Div) {
+ if l == size.signed_int_min() && r == -1 {
+ if bin_op == Rem {
+ throw_ub!(RemainderOverflow)
+ } else {
+ throw_ub!(DivisionOverflow)
+ }
+ }
+ }
+
+ let (result, oflo) = op(l, r);
+ // This may be out-of-bounds for the result type, so we have to truncate ourselves.
+ // If that truncation loses any information, we have an overflow.
+ let result = result as u128;
+ let truncated = self.truncate(result, left_layout);
+ return Ok((
+ Scalar::from_uint(truncated, size),
+ oflo || self.sign_extend(truncated, left_layout) != result,
+ left_layout.ty,
+ ));
+ }
+ }
+
+ let (val, ty) = match bin_op {
+ Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+ Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+
+ Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+ Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+ Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+ Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+
+ BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
+ BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
+ BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+
+ Add | Sub | Mul | Rem | Div => {
+ assert!(!left_layout.abi.is_signed());
+ let op: fn(u128, u128) -> (u128, bool) = match bin_op {
+ Add => u128::overflowing_add,
+ Sub => u128::overflowing_sub,
+ Mul => u128::overflowing_mul,
+ Div if r == 0 => throw_ub!(DivisionByZero),
+ Rem if r == 0 => throw_ub!(RemainderByZero),
+ Div => u128::overflowing_div,
+ Rem => u128::overflowing_rem,
+ _ => bug!(),
+ };
+ let (result, oflo) = op(l, r);
+ // Truncate to target type.
+ // If that truncation loses any information, we have an overflow.
+ let truncated = self.truncate(result, left_layout);
+ return Ok((
+ Scalar::from_uint(truncated, size),
+ oflo || truncated != result,
+ left_layout.ty,
+ ));
+ }
+
+ _ => span_bug!(
+ self.cur_span(),
+ "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+ bin_op,
+ l,
+ r,
+ right_layout.ty,
+ ),
+ };
+
+ Ok((val, false, ty))
+ }
+
+ /// Returns the result of the specified operation, whether it overflowed, and
+ /// the result type.
+ pub fn overflowing_binary_op(
+ &self,
+ bin_op: mir::BinOp,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ trace!(
+ "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+ bin_op,
+ *left,
+ left.layout.ty,
+ *right,
+ right.layout.ty
+ );
+
+ match left.layout.ty.kind() {
+ ty::Char => {
+ assert_eq!(left.layout.ty, right.layout.ty);
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
+ }
+ ty::Bool => {
+ assert_eq!(left.layout.ty, right.layout.ty);
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
+ }
+ ty::Float(fty) => {
+ assert_eq!(left.layout.ty, right.layout.ty);
+ let ty = left.layout.ty;
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(match fty {
+ FloatTy::F32 => {
+ self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
+ }
+ FloatTy::F64 => {
+ self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
+ }
+ })
+ }
+ _ if left.layout.ty.is_integral() => {
+ // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
+ assert!(
+ right.layout.ty.is_integral(),
+ "Unexpected types for BinOp: {:?} {:?} {:?}",
+ left.layout.ty,
+ bin_op,
+ right.layout.ty
+ );
+
+ let l = left.to_scalar()?.to_bits(left.layout.size)?;
+ let r = right.to_scalar()?.to_bits(right.layout.size)?;
+ self.binary_int_op(bin_op, l, left.layout, r, right.layout)
+ }
+ _ if left.layout.ty.is_any_ptr() => {
+ // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
+ // (Even when both sides are pointers, their type might differ, see issue #91636)
+ assert!(
+ right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
+ "Unexpected types for BinOp: {:?} {:?} {:?}",
+ left.layout.ty,
+ bin_op,
+ right.layout.ty
+ );
+
+ M::binary_ptr_op(self, bin_op, left, right)
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "Invalid MIR: bad LHS type for binop: {:?}",
+ left.layout.ty
+ ),
+ }
+ }
+
+ /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
+ #[inline]
+ pub fn binary_op(
+ &self,
+ bin_op: mir::BinOp,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
+ Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ }
+
+ /// Returns the result of the specified operation, whether it overflowed, and
+ /// the result type.
+ pub fn overflowing_unary_op(
+ &self,
+ un_op: mir::UnOp,
+ val: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ use rustc_middle::mir::UnOp::*;
+
+ let layout = val.layout;
+ let val = val.to_scalar()?;
+ trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
+
+ match layout.ty.kind() {
+ ty::Bool => {
+ let val = val.to_bool()?;
+ let res = match un_op {
+ Not => !val,
+ _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
+ };
+ Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
+ }
+ ty::Float(fty) => {
+ let res = match (un_op, fty) {
+ (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
+ (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
+ _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
+ };
+ Ok((res, false, layout.ty))
+ }
+ _ => {
+ assert!(layout.ty.is_integral());
+ let val = val.to_bits(layout.size)?;
+ let (res, overflow) = match un_op {
+ Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
+ Neg => {
+ // arithmetic negation
+ assert!(layout.abi.is_signed());
+ let val = self.sign_extend(val, layout) as i128;
+ let (res, overflow) = val.overflowing_neg();
+ let res = res as u128;
+ // Truncate to target type.
+ // If that truncation loses any information, we have an overflow.
+ let truncated = self.truncate(res, layout);
+ (truncated, overflow || self.sign_extend(truncated, layout) != res)
+ }
+ };
+ Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
+ }
+ }
+ }
+
+ pub fn unary_op(
+ &self,
+ un_op: mir::UnOp,
+ val: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
+ Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
new file mode 100644
index 000000000..f4571a1ca
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -0,0 +1,900 @@
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::hash::Hash;
+
+use rustc_ast::Mutability;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
+
+use super::{
+ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
+ ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
+ Pointer, Provenance, Scalar, ScalarMaybeUninit,
+};
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+/// Information required for the sound usage of a `MemPlace`.
+pub enum MemPlaceMeta<Prov: Provenance = AllocId> {
+ /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
+ Meta(Scalar<Prov>),
+ /// `Sized` types or unsized `extern type`
+ None,
+}
+
+impl<Prov: Provenance> MemPlaceMeta<Prov> {
+ pub fn unwrap_meta(self) -> Scalar<Prov> {
+ match self {
+ Self::Meta(s) => s,
+ Self::None => {
+ bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
+ }
+ }
+ }
+
+ pub fn has_meta(self) -> bool {
+ match self {
+ Self::Meta(_) => true,
+ Self::None => false,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+pub struct MemPlace<Prov: Provenance = AllocId> {
+ /// The pointer can be a pure integer, with the `None` provenance.
+ pub ptr: Pointer<Option<Prov>>,
+ /// Metadata for unsized places. Interpretation is up to the type.
+ /// Must not be present for sized types, but can be missing for unsized types
+ /// (e.g., `extern type`).
+ pub meta: MemPlaceMeta<Prov>,
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
+pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
+ mplace: MemPlace<Prov>,
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `MPlaceTy`!
+ pub align: Align,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum Place<Prov: Provenance = AllocId> {
+ /// A place referring to a value allocated in the `Memory` system.
+ Ptr(MemPlace<Prov>),
+
+ /// To support alloc-free locals, we are able to write directly to a local.
+ /// (Without that optimization, we'd just always be a `MemPlace`.)
+ Local { frame: usize, local: mir::Local },
+}
+
+#[derive(Clone, Debug)]
+pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
+ place: Place<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `PlaceTy`!
+ pub align: Align,
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
+ type Target = Place<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &Place<Prov> {
+ &self.place
+ }
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
+ type Target = MemPlace<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &MemPlace<Prov> {
+ &self.mplace
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(*mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<Prov: Provenance> MemPlace<Prov> {
+ #[inline(always)]
+ pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
+ MemPlace { ptr, meta: MemPlaceMeta::None }
+ }
+
+ /// Adjust the provenance of the main pointer (metadata is unaffected).
+ pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
+ MemPlace { ptr: self.ptr.map_provenance(f), ..self }
+ }
+
+ /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
+ /// This is the inverse of `ref_to_mplace`.
+ #[inline(always)]
+ pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
+ match self.meta {
+ MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
+ MemPlaceMeta::Meta(meta) => {
+ Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
+ }
+ }
+ }
+
+ #[inline]
+ pub fn offset_with_meta<'tcx>(
+ self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
+ }
+}
+
+impl<Prov: Provenance> Place<Prov> {
+ /// Asserts that this points to some local variable.
+ /// Returns the frame idx and the variable idx.
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn assert_local(&self) -> (usize, mir::Local) {
+ match self {
+ Place::Local { frame, local } => (*frame, *local),
+ _ => bug!("assert_local: expected Place::Local, got {:?}", self),
+ }
+ }
+}
+
+impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
+ /// Produces a MemPlace that works for ZST but nothing else.
+ /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
+ /// don't need to worry about memory leaks.
+ #[inline]
+ pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
+ assert!(layout.is_zst());
+ let align = layout.align.abi;
+ let ptr = Pointer::from_addr(align.bytes()); // no provenance, absolute address
+ MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
+ }
+
+ #[inline]
+ pub fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ Ok(MPlaceTy {
+ mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
+ align: self.align.restrict_for_offset(offset),
+ layout,
+ })
+ }
+
+ pub fn offset(
+ &self,
+ offset: Size,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert!(!layout.is_unsized());
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
+ MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr_with_meta(
+ ptr: Pointer<Option<Prov>>,
+ layout: TyAndLayout<'tcx>,
+ meta: MemPlaceMeta<Prov>,
+ ) -> Self {
+ let mut mplace = MemPlace::from_ptr(ptr);
+ mplace.meta = meta;
+
+ MPlaceTy { mplace, layout, align: layout.align.abi }
+ }
+
+ #[inline]
+ pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ if self.layout.is_unsized() {
+ // We need to consult `meta` metadata
+ match self.layout.ty.kind() {
+ ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
+ _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
+ }
+ } else {
+ // Go through the layout. There are lots of types that support a length,
+ // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+ match self.layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+ }
+ }
+ }
+
+ #[inline]
+ pub(super) fn vtable(&self) -> Scalar<Prov> {
+ match self.layout.ty.kind() {
+ ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
+ _ => bug!("vtable not supported on type {:?}", self.layout.ty),
+ }
+ }
+}
+
+// These are defined here because they produce a place.
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+ #[inline(always)]
+ /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+ /// read from the resulting mplace, not to get its address back.
+ pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ match **self {
+ Operand::Indirect(mplace) => {
+ Ok(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
+ }
+ Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
+ }
+ }
+
+ #[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+ /// read from the resulting mplace, not to get its address back.
+ pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
+ self.try_as_mplace().unwrap()
+ }
+}
+
+impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+ /// A place is either an mplace or some local.
+ #[inline]
+ pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+ match **self {
+ Place::Ptr(mplace) => Ok(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Local { frame, local } => Err((frame, local)),
+ }
+ }
+
+ #[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Prov> {
+ self.try_as_mplace().unwrap()
+ }
+}
+
+// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+where
+ Prov: Provenance + Eq + Hash + 'static,
+ M: Machine<'mir, 'tcx, Provenance = Prov>,
+{
+ /// Take a value, which represents a (thin or wide) reference, and make it a place.
+ /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
+ ///
+ /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
+ /// want to ever use the place for memory access!
+ /// Generally prefer `deref_operand`.
+ pub fn ref_to_mplace(
+ &self,
+ val: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let pointee_type =
+ val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
+ let layout = self.layout_of(pointee_type)?;
+ let (ptr, meta) = match **val {
+ Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
+ Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
+ Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ };
+
+ let mplace = MemPlace { ptr: ptr.to_pointer(self)?, meta };
+ // When deref'ing a pointer, the *static* alignment given by the type is what matters.
+ let align = layout.align.abi;
+ Ok(MPlaceTy { mplace, layout, align })
+ }
+
+ /// Take an operand, representing a pointer, and dereference it to a place -- that
+ /// will always be a MemPlace. Lives in `place.rs` because it creates a place.
+ #[instrument(skip(self), level = "debug")]
+ pub fn deref_operand(
+ &self,
+ src: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let val = self.read_immediate(src)?;
+ trace!("deref to {} on {:?}", val.layout.ty, *val);
+
+ if val.layout.ty.is_box() {
+ bug!("dereferencing {:?}", val.layout.ty);
+ }
+
+ let mplace = self.ref_to_mplace(&val)?;
+ self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
+ Ok(mplace)
+ }
+
+ #[inline]
+ pub(super) fn get_place_alloc(
+ &self,
+ place: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
+ assert!(!place.layout.is_unsized());
+ assert!(!place.meta.has_meta());
+ let size = place.layout.size;
+ self.get_ptr_alloc(place.ptr, size, place.align)
+ }
+
+ #[inline]
+ pub(super) fn get_place_alloc_mut(
+ &mut self,
+ place: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
+ assert!(!place.layout.is_unsized());
+ assert!(!place.meta.has_meta());
+ let size = place.layout.size;
+ self.get_ptr_alloc_mut(place.ptr, size, place.align)
+ }
+
+ /// Check if this mplace is dereferenceable and sufficiently aligned.
+ fn check_mplace_access(
+ &self,
+ mplace: MPlaceTy<'tcx, M::Provenance>,
+ msg: CheckInAllocMsg,
+ ) -> InterpResult<'tcx> {
+ let (size, align) = self
+ .size_and_align_of_mplace(&mplace)?
+ .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+ assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
+ let align = M::enforce_alignment(self).then_some(align);
+ self.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
+ Ok(())
+ }
+
+ /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
+ /// Also returns the number of elements.
+ pub fn mplace_to_simd(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+ // Basically we just transmute this place into an array following simd_size_and_type.
+ // (Transmuting is okay since this is an in-memory place. We also double-check the size
+ // stays the same.)
+ let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
+ let array = self.tcx.mk_array(e_ty, len);
+ let layout = self.layout_of(array)?;
+ assert_eq!(layout.size, mplace.layout.size);
+ Ok((MPlaceTy { layout, ..*mplace }, len))
+ }
+
+ /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
+ /// Also returns the number of elements.
+ pub fn place_to_simd(
+ &mut self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+ let mplace = self.force_allocation(place)?;
+ self.mplace_to_simd(&mplace)
+ }
+
+ pub fn local_to_place(
+ &self,
+ frame: usize,
+ local: mir::Local,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
+ let place = Place::Local { frame, local };
+ Ok(PlaceTy { place, layout, align: layout.align.abi })
+ }
+
+ /// Computes a place. You should only use this if you intend to write into this
+ /// place; for reading, a more efficient alternative is `eval_place_to_op`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn eval_place(
+ &mut self,
+ mir_place: mir::Place<'tcx>,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
+ // Using `try_fold` turned out to be bad for performance, hence the loop.
+ for elem in mir_place.projection.iter() {
+ place = self.place_projection(&place, elem)?
+ }
+
+ trace!("{:?}", self.dump_place(place.place));
+ // Sanity-check the type we ended up with.
+ debug_assert!(
+ mir_assign_valid_types(
+ *self.tcx,
+ self.param_env,
+ self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
+ )?)?,
+ place.layout,
+ ),
+ "eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ place.layout.ty,
+ );
+ Ok(place)
+ }
+
+ /// Write an immediate to a place
+ #[inline(always)]
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_immediate(
+ &mut self,
+ src: Immediate<M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ self.write_immediate_no_validate(src, dest)?;
+
+ if M::enforce_validity(self) {
+ // Data got changed, better make sure it matches the type!
+ self.validate_operand(&self.place_to_op(dest)?)?;
+ }
+
+ Ok(())
+ }
+
+ /// Write a scalar to a place
+ #[inline(always)]
+ pub fn write_scalar(
+ &mut self,
+ val: impl Into<ScalarMaybeUninit<M::Provenance>>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ self.write_immediate(Immediate::Scalar(val.into()), dest)
+ }
+
+ /// Write a pointer to a place
+ #[inline(always)]
+ pub fn write_pointer(
+ &mut self,
+ ptr: impl Into<Pointer<Option<M::Provenance>>>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
+ }
+
+ /// Write an immediate to a place.
+ /// If you use this you are responsible for validating that things got copied at the
+ /// right type.
+ fn write_immediate_no_validate(
+ &mut self,
+ src: Immediate<M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
+ trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+ // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
+ // but not factored as a separate function.
+ let mplace = match dest.place {
+ Place::Local { frame, local } => {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local) => {
+ // Local can be updated in-place.
+ *local = src;
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ *mplace
+ }
+ }
+ }
+ Place::Ptr(mplace) => mplace, // already referring to memory
+ };
+
+ // This is already in memory, write there.
+ self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
+ }
+
+ /// Write an immediate to memory.
+ /// If you use this you are responsible for validating that things got copied at the
+ /// right layout.
+ fn write_immediate_to_mplace_no_validate(
+ &mut self,
+ value: Immediate<M::Provenance>,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ dest: MemPlace<M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Note that it is really important that the type here is the right one, and matches the
+ // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
+ // to handle padding properly, which is only correct if we never look at this data with the
+ // wrong type.
+
+ let tcx = *self.tcx;
+ let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
+ // zero-sized access
+ return Ok(());
+ };
+
+ match value {
+ Immediate::Scalar(scalar) => {
+ let Abi::Scalar(s) = layout.abi else { span_bug!(
+ self.cur_span(),
+ "write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
+ )
+ };
+ let size = s.size(&tcx);
+ assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
+ alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
+ }
+ Immediate::ScalarPair(a_val, b_val) => {
+ // We checked `ptr_align` above, so all fields will have the alignment they need.
+ // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+ // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+ let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
+ self.cur_span(),
+ "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
+ layout
+ )
+ };
+ let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
+ let b_offset = a_size.align_to(b.align(&tcx).abi);
+ assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+
+ // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
+ // but that does not work: We could be a newtype around a pair, then the
+ // fields do not match the `ScalarPair` components.
+
+ alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
+ alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
+ }
+ Immediate::Uninit => alloc.write_uninit(),
+ }
+ }
+
+ pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ let mplace = match dest.try_as_mplace() {
+ Ok(mplace) => mplace,
+ Err((frame, local)) => {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local) => {
+ *local = Immediate::Uninit;
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
+ }
+ }
+ }
+ };
+ let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
+ // Zero-sized access
+ return Ok(());
+ };
+ alloc.write_uninit()?;
+ Ok(())
+ }
+
+ /// Copies the data from an operand to a place.
+ /// `allow_transmute` indicates whether the layouts may disagree.
+ #[inline(always)]
+ #[instrument(skip(self), level = "debug")]
+ pub fn copy_op(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ allow_transmute: bool,
+ ) -> InterpResult<'tcx> {
+ self.copy_op_no_validate(src, dest, allow_transmute)?;
+
+ if M::enforce_validity(self) {
+ // Data got changed, better make sure it matches the type!
+ self.validate_operand(&self.place_to_op(dest)?)?;
+ }
+
+ Ok(())
+ }
+
+ /// Copies the data from an operand to a place.
+ /// `allow_transmute` indicates whether the layouts may disagree.
+ /// Also, if you use this you are responsible for validating that things get copied at the
+ /// right type.
+ #[instrument(skip(self), level = "debug")]
+ fn copy_op_no_validate(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ allow_transmute: bool,
+ ) -> InterpResult<'tcx> {
+ // We do NOT compare the types for equality, because well-typed code can
+ // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
+ let layout_compat =
+ mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
+ if !allow_transmute && !layout_compat {
+ span_bug!(
+ self.cur_span(),
+ "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
+ src.layout.ty,
+ dest.layout.ty,
+ );
+ }
+
+ // Let us see if the layout is simple so we take a shortcut,
+ // avoid force_allocation.
+ let src = match self.read_immediate_raw(src, /*force*/ false)? {
+ Ok(src_val) => {
+ assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
+ assert!(
+ !dest.layout.is_unsized(),
+ "the src is sized, so the dest must also be sized"
+ );
+ assert_eq!(src.layout.size, dest.layout.size);
+ // Yay, we got a value that we can write directly.
+ return if layout_compat {
+ self.write_immediate_no_validate(*src_val, dest)
+ } else {
+ // This is tricky. The problematic case is `ScalarPair`: the `src_val` was
+ // loaded using the offsets defined by `src.layout`. When we put this back into
+ // the destination, we have to use the same offsets! So (a) we make sure we
+ // write back to memory, and (b) we use `dest` *with the source layout*.
+ let dest_mem = self.force_allocation(dest)?;
+ self.write_immediate_to_mplace_no_validate(
+ *src_val,
+ src.layout,
+ dest_mem.align,
+ *dest_mem,
+ )
+ };
+ }
+ Err(mplace) => mplace,
+ };
+ // Slow path, this does not fit into an immediate. Just memcpy.
+ trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+ let dest = self.force_allocation(&dest)?;
+ let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
+ span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
+ };
+ if cfg!(debug_assertions) {
+ let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
+ assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
+ } else {
+ // As a cheap approximation, we compare the fixed parts of the size.
+ assert_eq!(src.layout.size, dest.layout.size);
+ }
+
+ self.mem_copy(
+ src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ false,
+ )
+ }
+
+ /// Ensures that a place is in memory, and returns where it is.
+ /// If the place currently refers to a local that doesn't yet have a matching allocation,
+ /// create such an allocation.
+ /// This is essentially `force_to_memplace`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn force_allocation(
+ &mut self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let mplace = match place.place {
+ Place::Local { frame, local } => {
+ match M::access_local_mut(self, frame, local)? {
+ &mut Operand::Immediate(local_val) => {
+ // We need to make an allocation.
+
+ // We need the layout of the local. We can NOT use the layout we got,
+ // that might e.g., be an inner field of a struct with `Scalar` layout,
+ // that has different alignment than the outer field.
+ let local_layout =
+ self.layout_of_local(&self.stack()[frame], local, None)?;
+ if local_layout.is_unsized() {
+ throw_unsup_format!("unsized locals are not supported");
+ }
+ let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+ if !matches!(local_val, Immediate::Uninit) {
+ // Preserve old value. (As an optimization, we can skip this if it was uninit.)
+ // We don't have to validate as we can assume the local
+ // was already valid for its type.
+ self.write_immediate_to_mplace_no_validate(
+ local_val,
+ local_layout,
+ local_layout.align.abi,
+ mplace,
+ )?;
+ }
+ // Now we can call `access_mut` again, asserting it goes well,
+ // and actually overwrite things.
+ *M::access_local_mut(self, frame, local).unwrap() =
+ Operand::Indirect(mplace);
+ mplace
+ }
+ &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
+ }
+ }
+ Place::Ptr(mplace) => mplace,
+ };
+ // Return with the original layout, so that the caller can go on
+ Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
+ }
+
+ pub fn allocate(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ assert!(!layout.is_unsized());
+ let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
+ Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ }
+
+ /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
+ pub fn allocate_str(
+ &mut self,
+ str: &str,
+ kind: MemoryKind<M::MemoryKind>,
+ mutbl: Mutability,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl);
+ let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
+ let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
+
+ let ty = self.tcx.mk_ref(
+ self.tcx.lifetimes.re_static,
+ ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
+ );
+ let layout = self.layout_of(ty).unwrap();
+ MPlaceTy { mplace, layout, align: layout.align.abi }
+ }
+
+ /// Writes the discriminant of the given variant.
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_discriminant(
+ &mut self,
+ variant_index: VariantIdx,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // This must be an enum or generator.
+ match dest.layout.ty.kind() {
+ ty::Adt(adt, _) => assert!(adt.is_enum()),
+ ty::Generator(..) => {}
+ _ => span_bug!(
+ self.cur_span(),
+ "write_discriminant called on non-variant-type (neither enum nor generator)"
+ ),
+ }
+ // Layout computation excludes uninhabited variants from consideration
+ // therefore there's no way to represent those variants in the given layout.
+ // Essentially, uninhabited variants do not have a tag that corresponds to their
+ // discriminant, so we cannot do anything here.
+ // When evaluating we will always error before even getting here, but ConstProp 'executes'
+ // dead code, so we cannot ICE here.
+ if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantWritten)
+ }
+
+ match dest.layout.variants {
+ abi::Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ abi::Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ tag: tag_layout,
+ tag_field,
+ ..
+ } => {
+ // No need to validate that the discriminant here because the
+ // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+ let discr_val =
+ dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+ // raw discriminants for enums are isize or bigger during
+ // their computation, but the in-memory tag is the smallest possible
+ // representation
+ let size = tag_layout.size(self);
+ let tag_val = size.truncate(discr_val);
+
+ let tag_dest = self.place_field(dest, tag_field)?;
+ self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+ }
+ abi::Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ tag: tag_layout,
+ tag_field,
+ ..
+ } => {
+ // No need to validate that the discriminant here because the
+ // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+ if variant_index != dataful_variant {
+ let variants_start = niche_variants.start().as_u32();
+ let variant_index_relative = variant_index
+ .as_u32()
+ .checked_sub(variants_start)
+ .expect("overflow computing relative variant idx");
+ // We need to use machine arithmetic when taking into account `niche_start`:
+ // tag_val = variant_index_relative + niche_start_val
+ let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
+ let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+ let variant_index_relative_val =
+ ImmTy::from_uint(variant_index_relative, tag_layout);
+ let tag_val = self.binary_op(
+ mir::BinOp::Add,
+ &variant_index_relative_val,
+ &niche_start_val,
+ )?;
+ // Write result.
+ let niche_dest = self.place_field(dest, tag_field)?;
+ self.write_immediate(*tag_val, &niche_dest)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn raw_const_to_mplace(
+ &self,
+ raw: ConstAlloc<'tcx>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ // This must be an allocation in `tcx`
+ let _ = self.tcx.global_alloc(raw.alloc_id);
+ let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
+ let layout = self.layout_of(raw.ty)?;
+ Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ }
+
+ /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+ pub(super) fn unpack_dyn_trait(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let vtable = mplace.vtable().to_pointer(self)?; // also sanity checks the type
+ let (ty, _) = self.get_ptr_vtable(vtable)?;
+ let layout = self.layout_of(ty)?;
+
+ let mplace = MPlaceTy {
+ mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace },
+ layout,
+ align: layout.align.abi,
+ };
+ Ok(mplace)
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
+ rustc_data_structures::static_assert_size!(MemPlace, 40);
+ rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
+ rustc_data_structures::static_assert_size!(Place, 48);
+ rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
+}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
new file mode 100644
index 000000000..742339f2b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -0,0 +1,391 @@
+//! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy.
+//!
+//! OpTy and PlaceTy genrally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
+//! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway.
+//! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields),
+//! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial,
+//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
+//! implement the logic on OpTy, and MPlaceTy calls that.
+
+use std::hash::Hash;
+
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_target::abi::{self, Abi, VariantIdx};
+
+use super::{
+ ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy,
+ Provenance, Scalar,
+};
+
+// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+where
+ Prov: Provenance + Eq + Hash + 'static,
+ M: Machine<'mir, 'tcx, Provenance = Prov>,
+{
+ //# Field access
+
+ /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+ /// always possible without allocating, so it can take `&self`. Also return the field's layout.
+ /// This supports both struct and array fields.
+ ///
+ /// This also works for arrays, but then the `usize` index type is restricting.
+ /// For indexing into arrays, use `mplace_index`.
+ pub fn mplace_field(
+ &self,
+ base: &MPlaceTy<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let offset = base.layout.fields.offset(field);
+ let field_layout = base.layout.field(self, field);
+
+ // Offset may need adjustment for unsized fields.
+ let (meta, offset) = if field_layout.is_unsized() {
+ // Re-use parent metadata to determine dynamic field layout.
+ // With custom DSTS, this *will* execute user-defined code, but the same
+ // happens at run-time so that's okay.
+ match self.size_and_align_of(&base.meta, &field_layout)? {
+ Some((_, align)) => (base.meta, offset.align_to(align)),
+ None => {
+ // For unsized types with an extern type tail we perform no adjustments.
+ // NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
+ assert!(matches!(base.meta, MemPlaceMeta::None));
+ (base.meta, offset)
+ }
+ }
+ } else {
+ // base.meta could be present; we might be accessing a sized field of an unsized
+ // struct.
+ (MemPlaceMeta::None, offset)
+ };
+
+ // We do not look at `base.layout.align` nor `field_layout.align`, unlike
+ // codegen -- mostly to see if we can get away with that
+ base.offset_with_meta(offset, meta, field_layout, self)
+ }
+
+ /// Gets the place of a field inside the place, and also the field's type.
+ /// Just a convenience function, but used quite a bit.
+ /// This is the only projection that might have a side-effect: We cannot project
+ /// into the field of a local `ScalarPair`, we have to first allocate it.
+ pub fn place_field(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ // FIXME: We could try to be smarter and avoid allocation for fields that span the
+ // entire place.
+ let base = self.force_allocation(base)?;
+ Ok(self.mplace_field(&base, field)?.into())
+ }
+
+ pub fn operand_field(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let base = match base.try_as_mplace() {
+ Ok(ref mplace) => {
+ // We can reuse the mplace field computation logic for indirect operands.
+ let field = self.mplace_field(mplace, field)?;
+ return Ok(field.into());
+ }
+ Err(value) => value,
+ };
+
+ let field_layout = base.layout.field(self, field);
+ let offset = base.layout.fields.offset(field);
+ // This makes several assumptions about what layouts we will encounter; we match what
+ // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
+ let field_val: Immediate<_> = match (*base, base.layout.abi) {
+ // the field contains no information, can be left uninit
+ _ if field_layout.is_zst() => Immediate::Uninit,
+ // the field covers the entire type
+ _ if field_layout.size == base.layout.size => {
+ assert!(match (base.layout.abi, field_layout.abi) {
+ (Abi::Scalar(..), Abi::Scalar(..)) => true,
+ (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ _ => false,
+ });
+ assert!(offset.bytes() == 0);
+ *base
+ }
+ // extract fields from types with `ScalarPair` ABI
+ (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+ assert!(matches!(field_layout.abi, Abi::Scalar(..)));
+ Immediate::from(if offset.bytes() == 0 {
+ debug_assert_eq!(field_layout.size, a.size(self));
+ a_val
+ } else {
+ debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
+ debug_assert_eq!(field_layout.size, b.size(self));
+ b_val
+ })
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "invalid field access on immediate {}, layout {:#?}",
+ base,
+ base.layout
+ ),
+ };
+
+ Ok(ImmTy::from_immediate(field_val, field_layout).into())
+ }
+
+ //# Downcasting
+
+ pub fn mplace_downcast(
+ &self,
+ base: &MPlaceTy<'tcx, M::Provenance>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ // Downcasts only change the layout.
+ // (In particular, no check about whether this is even the active variant -- that's by design,
+ // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
+ assert!(!base.meta.has_meta());
+ let mut base = *base;
+ base.layout = base.layout.for_variant(self, variant);
+ Ok(base)
+ }
+
+ pub fn place_downcast(
+ &self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ // Downcast just changes the layout
+ let mut base = base.clone();
+ base.layout = base.layout.for_variant(self, variant);
+ Ok(base)
+ }
+
+ pub fn operand_downcast(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Downcast just changes the layout
+ let mut base = base.clone();
+ base.layout = base.layout.for_variant(self, variant);
+ Ok(base)
+ }
+
+ //# Slice indexing
+
+ #[inline(always)]
+ pub fn operand_index(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ index: u64,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Not using the layout method because we want to compute on u64
+ match base.layout.fields {
+ abi::FieldsShape::Array { stride, count: _ } => {
+ // `count` is nonsense for slices, use the dynamic length instead.
+ let len = base.len(self)?;
+ if index >= len {
+ // This can only be reached in ConstProp and non-rustc-MIR.
+ throw_ub!(BoundsCheckFailed { len, index });
+ }
+ let offset = stride * index; // `Size` multiplication
+ // All fields have the same layout.
+ let field_layout = base.layout.field(self, 0);
+ base.offset(offset, field_layout, self)
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "`mplace_index` called on non-array type {:?}",
+ base.layout.ty
+ ),
+ }
+ }
+
+ // Iterates over all fields of an array. Much more efficient than doing the
+ // same by repeatedly calling `operand_index`.
+ pub fn operand_array_fields<'a>(
+ &self,
+ base: &'a OpTy<'tcx, Prov>,
+ ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
+ let len = base.len(self)?; // also asserts that we have a type where this makes sense
+ let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
+ span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
+ };
+ let field_layout = base.layout.field(self, 0);
+ let dl = &self.tcx.data_layout;
+ // `Size` multiplication
+ Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
+ }
+
+ /// Index into an array.
+ pub fn mplace_index(
+ &self,
+ base: &MPlaceTy<'tcx, M::Provenance>,
+ index: u64,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
+ }
+
+ pub fn place_index(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ index: u64,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ // There's not a lot we can do here, since we cannot have a place to a part of a local. If
+ // we are accessing the only element of a 1-element array, it's still the entire local...
+ // that doesn't seem worth it.
+ let base = self.force_allocation(base)?;
+ Ok(self.mplace_index(&base, index)?.into())
+ }
+
+ //# ConstantIndex support
+
+ fn operand_constant_index(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ offset: u64,
+ min_length: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let n = base.len(self)?;
+ if n < min_length {
+ // This can only be reached in ConstProp and non-rustc-MIR.
+ throw_ub!(BoundsCheckFailed { len: min_length, index: n });
+ }
+
+ let index = if from_end {
+ assert!(0 < offset && offset <= min_length);
+ n.checked_sub(offset).unwrap()
+ } else {
+ assert!(offset < min_length);
+ offset
+ };
+
+ self.operand_index(base, index)
+ }
+
+ fn place_constant_index(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ offset: u64,
+ min_length: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let base = self.force_allocation(base)?;
+ Ok(self
+ .operand_constant_index(&base.into(), offset, min_length, from_end)?
+ .assert_mem_place()
+ .into())
+ }
+
+ //# Subslicing
+
+ fn operand_subslice(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ from: u64,
+ to: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let len = base.len(self)?; // also asserts that we have a type where this makes sense
+ let actual_to = if from_end {
+ if from.checked_add(to).map_or(true, |to| to > len) {
+ // This can only be reached in ConstProp and non-rustc-MIR.
+ throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
+ }
+ len.checked_sub(to).unwrap()
+ } else {
+ to
+ };
+
+ // Not using layout method because that works with usize, and does not work with slices
+ // (that have count 0 in their layout).
+ let from_offset = match base.layout.fields {
+ abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
+ _ => {
+ span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+ }
+ };
+
+ // Compute meta and new layout
+ let inner_len = actual_to.checked_sub(from).unwrap();
+ let (meta, ty) = match base.layout.ty.kind() {
+ // It is not nice to match on the type, but that seems to be the only way to
+ // implement this.
+ ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
+ ty::Slice(..) => {
+ let len = Scalar::from_machine_usize(inner_len, self);
+ (MemPlaceMeta::Meta(len), base.layout.ty)
+ }
+ _ => {
+ span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+ }
+ };
+ let layout = self.layout_of(ty)?;
+ base.offset_with_meta(from_offset, meta, layout, self)
+ }
+
+ pub fn place_subslice(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ from: u64,
+ to: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let base = self.force_allocation(base)?;
+ Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
+ }
+
+ //# Applying a general projection
+
+ /// Projects into a place.
+ #[instrument(skip(self), level = "trace")]
+ pub fn place_projection(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ proj_elem: mir::PlaceElem<'tcx>,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ use rustc_middle::mir::ProjectionElem::*;
+ Ok(match proj_elem {
+ Field(field, _) => self.place_field(base, field.index())?,
+ Downcast(_, variant) => self.place_downcast(base, variant)?,
+ Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
+ Index(local) => {
+ let layout = self.layout_of(self.tcx.types.usize)?;
+ let n = self.local_to_op(self.frame(), local, Some(layout))?;
+ let n = self.read_scalar(&n)?.to_machine_usize(self)?;
+ self.place_index(base, n)?
+ }
+ ConstantIndex { offset, min_length, from_end } => {
+ self.place_constant_index(base, offset, min_length, from_end)?
+ }
+ Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
+ })
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ pub fn operand_projection(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ proj_elem: mir::PlaceElem<'tcx>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ use rustc_middle::mir::ProjectionElem::*;
+ Ok(match proj_elem {
+ Field(field, _) => self.operand_field(base, field.index())?,
+ Downcast(_, variant) => self.operand_downcast(base, variant)?,
+ Deref => self.deref_operand(base)?.into(),
+ Index(local) => {
+ let layout = self.layout_of(self.tcx.types.usize)?;
+ let n = self.local_to_op(self.frame(), local, Some(layout))?;
+ let n = self.read_scalar(&n)?.to_machine_usize(self)?;
+ self.operand_index(base, n)?
+ }
+ ConstantIndex { offset, min_length, from_end } => {
+ self.operand_constant_index(base, offset, min_length, from_end)?
+ }
+ Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
+ })
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
new file mode 100644
index 000000000..fea158a9f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -0,0 +1,320 @@
+//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
+//!
+//! The main entry point is the `step` method.
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::LayoutOf;
+
+use super::{InterpCx, Machine};
+
+/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
+/// same type as the result.
+#[inline]
+fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+ use rustc_middle::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
+ Eq | Ne | Lt | Le | Gt | Ge => false,
+ }
+}
+/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
+/// same type as the LHS.
+#[inline]
+fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+ use rustc_middle::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
+ Offset | Shl | Shr => false,
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub fn run(&mut self) -> InterpResult<'tcx> {
+ while self.step()? {}
+ Ok(())
+ }
+
+ /// Returns `true` as long as there are more things to do.
+ ///
+ /// This is used by [priroda](https://github.com/oli-obk/priroda)
+ ///
+ /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
+ #[inline(always)]
+ pub fn step(&mut self) -> InterpResult<'tcx, bool> {
+ if self.stack().is_empty() {
+ return Ok(false);
+ }
+
+ let Ok(loc) = self.frame().loc else {
+ // We are unwinding and this fn has no cleanup code.
+ // Just go on unwinding.
+ trace!("unwinding: skipping frame");
+ self.pop_stack_frame(/* unwinding */ true)?;
+ return Ok(true);
+ };
+ let basic_block = &self.body().basic_blocks()[loc.block];
+
+ if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
+ let old_frames = self.frame_idx();
+ self.statement(stmt)?;
+ // Make sure we are not updating `statement_index` of the wrong frame.
+ assert_eq!(old_frames, self.frame_idx());
+ // Advance the program counter.
+ self.frame_mut().loc.as_mut().unwrap().statement_index += 1;
+ return Ok(true);
+ }
+
+ M::before_terminator(self)?;
+
+ let terminator = basic_block.terminator();
+ self.terminator(terminator)?;
+ Ok(true)
+ }
+
+ /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
+ /// statement counter.
+ ///
+ /// This does NOT move the statement counter forward, the caller has to do that!
+ pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
+ info!("{:?}", stmt);
+
+ use rustc_middle::mir::StatementKind::*;
+
+ match &stmt.kind {
+ Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
+
+ SetDiscriminant { place, variant_index } => {
+ let dest = self.eval_place(**place)?;
+ self.write_discriminant(*variant_index, &dest)?;
+ }
+
+ Deinit(place) => {
+ let dest = self.eval_place(**place)?;
+ self.write_uninit(&dest)?;
+ }
+
+ // Mark locals as alive
+ StorageLive(local) => {
+ self.storage_live(*local)?;
+ }
+
+ // Mark locals as dead
+ StorageDead(local) => {
+ self.storage_dead(*local)?;
+ }
+
+ // No dynamic semantics attached to `FakeRead`; MIR
+ // interpreter is solely intended for borrowck'ed code.
+ FakeRead(..) => {}
+
+ // Stacked Borrows.
+ Retag(kind, place) => {
+ let dest = self.eval_place(**place)?;
+ M::retag(self, *kind, &dest)?;
+ }
+
+ // Call CopyNonOverlapping
+ CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
+ let src = self.eval_operand(src, None)?;
+ let dst = self.eval_operand(dst, None)?;
+ let count = self.eval_operand(count, None)?;
+ self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
+ }
+
+ // Statements we do not track.
+ AscribeUserType(..) => {}
+
+ // Currently, Miri discards Coverage statements. Coverage statements are only injected
+ // via an optional compile time MIR pass and have no side effects. Since Coverage
+ // statements don't exist at the source level, it is safe for Miri to ignore them, even
+ // for undefined behavior (UB) checks.
+ //
+ // A coverage counter inside a const expression (for example, a counter injected in a
+ // const function) is discarded when the const is evaluated at compile time. Whether
+ // this should change, and/or how to implement a const eval counter, is a subject of the
+ // following issue:
+ //
+ // FIXME(#73156): Handle source code coverage in const eval
+ Coverage(..) => {}
+
+ // Defined to do nothing. These are added by optimization passes, to avoid changing the
+ // size of MIR constantly.
+ Nop => {}
+ }
+
+ Ok(())
+ }
+
+ /// Evaluate an assignment statement.
+ ///
+ /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+ /// type writes its results directly into the memory specified by the place.
+ pub fn eval_rvalue_into_place(
+ &mut self,
+ rvalue: &mir::Rvalue<'tcx>,
+ place: mir::Place<'tcx>,
+ ) -> InterpResult<'tcx> {
+ let dest = self.eval_place(place)?;
+ // FIXME: ensure some kind of non-aliasing between LHS and RHS?
+ // Also see https://github.com/rust-lang/rust/issues/68364.
+
+ use rustc_middle::mir::Rvalue::*;
+ match *rvalue {
+ ThreadLocalRef(did) => {
+ let ptr = M::thread_local_static_base_pointer(self, did)?;
+ self.write_pointer(ptr, &dest)?;
+ }
+
+ Use(ref operand) => {
+ // Avoid recomputing the layout
+ let op = self.eval_operand(operand, Some(dest.layout))?;
+ self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
+ }
+
+ CopyForDeref(ref place) => {
+ let op = self.eval_place_to_op(*place, Some(dest.layout))?;
+ self.copy_op(&op, &dest, /* allow_transmute*/ false)?;
+ }
+
+ BinaryOp(bin_op, box (ref left, ref right)) => {
+ let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
+ let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
+ let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+ let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+ self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
+ }
+
+ CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
+ // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+ let left = self.read_immediate(&self.eval_operand(left, None)?)?;
+ let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+ let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+ self.binop_with_overflow(
+ bin_op, /*force_overflow_checks*/ false, &left, &right, &dest,
+ )?;
+ }
+
+ UnaryOp(un_op, ref operand) => {
+ // The operand always has the same type as the result.
+ let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
+ let val = self.unary_op(un_op, &val)?;
+ assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+ self.write_immediate(*val, &dest)?;
+ }
+
+ Aggregate(box ref kind, ref operands) => {
+ assert!(matches!(kind, mir::AggregateKind::Array(..)));
+
+ for (field_index, operand) in operands.iter().enumerate() {
+ let op = self.eval_operand(operand, None)?;
+ let field_dest = self.place_field(&dest, field_index)?;
+ self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
+ }
+ }
+
+ Repeat(ref operand, _) => {
+ let src = self.eval_operand(operand, None)?;
+ assert!(!src.layout.is_unsized());
+ let dest = self.force_allocation(&dest)?;
+ let length = dest.len(self)?;
+
+ if length == 0 {
+ // Nothing to copy... but let's still make sure that `dest` as a place is valid.
+ self.get_place_alloc_mut(&dest)?;
+ } else {
+ // Write the src to the first element.
+ let first = self.mplace_field(&dest, 0)?;
+ self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
+
+ // This is performance-sensitive code for big static/const arrays! So we
+ // avoid writing each operand individually and instead just make many copies
+ // of the first element.
+ let elem_size = first.layout.size;
+ let first_ptr = first.ptr;
+ let rest_ptr = first_ptr.offset(elem_size, self)?;
+ // For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
+ // that place might be more aligned than its type mandates (a `u8` array could
+ // be 4-aligned if it sits at the right spot in a struct). Instead we use
+ // `first.layout.align`, i.e., the alignment given by the type.
+ self.mem_copy_repeatedly(
+ first_ptr,
+ first.align,
+ rest_ptr,
+ first.layout.align.abi,
+ elem_size,
+ length - 1,
+ /*nonoverlapping:*/ true,
+ )?;
+ }
+ }
+
+ Len(place) => {
+ let src = self.eval_place(place)?;
+ let mplace = self.force_allocation(&src)?;
+ let len = mplace.len(self)?;
+ self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
+ }
+
+ AddressOf(_, place) | Ref(_, _, place) => {
+ let src = self.eval_place(place)?;
+ let place = self.force_allocation(&src)?;
+ self.write_immediate(place.to_ref(self), &dest)?;
+ }
+
+ NullaryOp(null_op, ty) => {
+ let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
+ let layout = self.layout_of(ty)?;
+ if layout.is_unsized() {
+ // FIXME: This should be a span_bug (#80742)
+ self.tcx.sess.delay_span_bug(
+ self.frame().current_span(),
+ &format!("Nullary MIR operator called for unsized type {}", ty),
+ );
+ throw_inval!(SizeOfUnsizedType(ty));
+ }
+ let val = match null_op {
+ mir::NullOp::SizeOf => layout.size.bytes(),
+ mir::NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?;
+ }
+
+ ShallowInitBox(ref operand, _) => {
+ let src = self.eval_operand(operand, None)?;
+ let v = self.read_immediate(&src)?;
+ self.write_immediate(*v, &dest)?;
+ }
+
+ Cast(cast_kind, ref operand, cast_ty) => {
+ let src = self.eval_operand(operand, None)?;
+ let cast_ty =
+ self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
+ self.cast(&src, cast_kind, cast_ty, &dest)?;
+ }
+
+ Discriminant(place) => {
+ let op = self.eval_place_to_op(place, None)?;
+ let discr_val = self.read_discriminant(&op)?.0;
+ self.write_scalar(discr_val, &dest)?;
+ }
+ }
+
+ trace!("{:?}", self.dump_place(*dest));
+
+ Ok(())
+ }
+
+ /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly.
+ fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
+ info!("{:?}", terminator.kind);
+
+ self.eval_terminator(terminator)?;
+ if !self.stack().is_empty() {
+ if let Ok(loc) = self.frame().loc {
+ info!("// executing {:?}", loc.block);
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
new file mode 100644
index 000000000..d563e35f9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -0,0 +1,671 @@
+use std::borrow::Cow;
+
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::Instance;
+use rustc_middle::{
+ mir,
+ ty::{self, Ty},
+};
+use rustc_target::abi;
+use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
+use rustc_target::spec::abi::Abi;
+
+use super::{
+ FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
+ PlaceTy, Scalar, StackPopCleanup, StackPopUnwind,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub(super) fn eval_terminator(
+ &mut self,
+ terminator: &mir::Terminator<'tcx>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::TerminatorKind::*;
+ match terminator.kind {
+ Return => {
+ self.pop_stack_frame(/* unwinding */ false)?
+ }
+
+ Goto { target } => self.go_to_block(target),
+
+ SwitchInt { ref discr, ref targets, switch_ty } => {
+ let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
+ trace!("SwitchInt({:?})", *discr);
+ assert_eq!(discr.layout.ty, switch_ty);
+
+ // Branch to the `otherwise` case by default, if no match is found.
+ assert!(!targets.iter().is_empty());
+ let mut target_block = targets.otherwise();
+
+ for (const_int, target) in targets.iter() {
+ // Compare using MIR BinOp::Eq, to also support pointer values.
+ // (Avoiding `self.binary_op` as that does some redundant layout computation.)
+ let res = self
+ .overflowing_binary_op(
+ mir::BinOp::Eq,
+ &discr,
+ &ImmTy::from_uint(const_int, discr.layout),
+ )?
+ .0;
+ if res.to_bool()? {
+ target_block = target;
+ break;
+ }
+ }
+
+ self.go_to_block(target_block);
+ }
+
+ Call {
+ ref func,
+ ref args,
+ destination,
+ target,
+ ref cleanup,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ let old_stack = self.frame_idx();
+ let old_loc = self.frame().loc;
+ let func = self.eval_operand(func, None)?;
+ let args = self.eval_operands(args)?;
+
+ let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
+ let fn_sig =
+ self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty));
+
+ let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
+ ty::FnPtr(_sig) => {
+ let fn_ptr = self.read_pointer(&func)?;
+ let fn_val = self.get_ptr_fn(fn_ptr)?;
+ (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
+ }
+ ty::FnDef(def_id, substs) => {
+ let instance =
+ self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?;
+ (
+ FnVal::Instance(instance),
+ self.fn_abi_of_instance(instance, extra_args)?,
+ instance.def.requires_caller_location(*self.tcx),
+ )
+ }
+ _ => span_bug!(
+ terminator.source_info.span,
+ "invalid callee of type {:?}",
+ func.layout.ty
+ ),
+ };
+
+ let destination = self.eval_place(destination)?;
+ self.eval_fn_call(
+ fn_val,
+ (fn_sig.abi, fn_abi),
+ &args,
+ with_caller_location,
+ &destination,
+ target,
+ match (cleanup, fn_abi.can_unwind) {
+ (Some(cleanup), true) => StackPopUnwind::Cleanup(*cleanup),
+ (None, true) => StackPopUnwind::Skip,
+ (_, false) => StackPopUnwind::NotAllowed,
+ },
+ )?;
+ // Sanity-check that `eval_fn_call` either pushed a new frame or
+ // did a jump to another block.
+ if self.frame_idx() == old_stack && self.frame().loc == old_loc {
+ span_bug!(terminator.source_info.span, "evaluating this call made no progress");
+ }
+ }
+
+ Drop { place, target, unwind } => {
+ let place = self.eval_place(place)?;
+ let ty = place.layout.ty;
+ trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
+
+ let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+ self.drop_in_place(&place, instance, target, unwind)?;
+ }
+
+ Assert { ref cond, expected, ref msg, target, cleanup } => {
+ let cond_val =
+ self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
+ if expected == cond_val {
+ self.go_to_block(target);
+ } else {
+ M::assert_panic(self, msg, cleanup)?;
+ }
+ }
+
+ Abort => {
+ M::abort(self, "the program aborted execution".to_owned())?;
+ }
+
+ // When we encounter Resume, we've finished unwinding
+ // cleanup for the current stack frame. We pop it in order
+ // to continue unwinding the next frame
+ Resume => {
+ trace!("unwinding: resuming from cleanup");
+ // By definition, a Resume terminator means
+ // that we're unwinding
+ self.pop_stack_frame(/* unwinding */ true)?;
+ return Ok(());
+ }
+
+ // It is UB to ever encounter this.
+ Unreachable => throw_ub!(Unreachable),
+
+ // These should never occur for MIR we actually run.
+ DropAndReplace { .. }
+ | FalseEdge { .. }
+ | FalseUnwind { .. }
+ | Yield { .. }
+ | GeneratorDrop => span_bug!(
+ terminator.source_info.span,
+ "{:#?} should have been eliminated by MIR pass",
+ terminator.kind
+ ),
+
+ // Inline assembly can't be interpreted.
+ InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+ }
+
+ Ok(())
+ }
+
+ fn check_argument_compat(
+ caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) -> bool {
+ // Heuristic for type comparison.
+ let layout_compat = || {
+ if caller_abi.layout.ty == callee_abi.layout.ty {
+ // No question
+ return true;
+ }
+ if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() {
+ // No, no, no. We require the types to *exactly* match for unsized arguments. If
+ // these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`),
+ // then who knows what happens.
+ return false;
+ }
+ if caller_abi.layout.size != callee_abi.layout.size
+ || caller_abi.layout.align.abi != callee_abi.layout.align.abi
+ {
+ // This cannot go well...
+ return false;
+ }
+ // The rest *should* be okay, but we are extra conservative.
+ match (caller_abi.layout.abi, callee_abi.layout.abi) {
+ // Different valid ranges are okay (once we enforce validity,
+ // that will take care to make it UB to leave the range, just
+ // like for transmute).
+ (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
+ caller.primitive() == callee.primitive()
+ }
+ (
+ abi::Abi::ScalarPair(caller1, caller2),
+ abi::Abi::ScalarPair(callee1, callee2),
+ ) => {
+ caller1.primitive() == callee1.primitive()
+ && caller2.primitive() == callee2.primitive()
+ }
+ // Be conservative
+ _ => false,
+ }
+ };
+ // Padding must be fully equal.
+ let pad_compat = || caller_abi.pad == callee_abi.pad;
+ // When comparing the PassMode, we have to be smart about comparing the attributes.
+ let arg_attr_compat = |a1: ArgAttributes, a2: ArgAttributes| {
+ // There's only one regular attribute that matters for the call ABI: InReg.
+ // Everything else is things like noalias, dereferencable, nonnull, ...
+ // (This also applies to pointee_size, pointee_align.)
+ if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
+ {
+ return false;
+ }
+ // We also compare the sign extension mode -- this could let the callee make assumptions
+ // about bits that conceptually were not even passed.
+ if a1.arg_ext != a2.arg_ext {
+ return false;
+ }
+ return true;
+ };
+ let mode_compat = || match (caller_abi.mode, callee_abi.mode) {
+ (PassMode::Ignore, PassMode::Ignore) => true,
+ (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
+ (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
+ arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
+ }
+ (PassMode::Cast(c1), PassMode::Cast(c2)) => c1 == c2,
+ (
+ PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
+ PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
+ ) => arg_attr_compat(a1, a2) && s1 == s2,
+ (
+ PassMode::Indirect { attrs: a1, extra_attrs: Some(e1), on_stack: s1 },
+ PassMode::Indirect { attrs: a2, extra_attrs: Some(e2), on_stack: s2 },
+ ) => arg_attr_compat(a1, a2) && arg_attr_compat(e1, e2) && s1 == s2,
+ _ => false,
+ };
+
+ if layout_compat() && pad_compat() && mode_compat() {
+ return true;
+ }
+ trace!(
+ "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
+ caller_abi,
+ callee_abi
+ );
+ return false;
+ }
+
+ /// Initialize a single callee argument, checking the types for compatibility.
+ fn pass_argument<'x, 'y>(
+ &mut self,
+ caller_args: &mut impl Iterator<
+ Item = (&'x OpTy<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
+ >,
+ callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ callee_arg: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx>
+ where
+ 'tcx: 'x,
+ 'tcx: 'y,
+ {
+ if matches!(callee_abi.mode, PassMode::Ignore) {
+ // This one is skipped.
+ return Ok(());
+ }
+ // Find next caller arg.
+ let (caller_arg, caller_abi) = caller_args.next().ok_or_else(|| {
+ err_ub_format!("calling a function with fewer arguments than it requires")
+ })?;
+ // Now, check
+ if !Self::check_argument_compat(caller_abi, callee_abi) {
+ throw_ub_format!(
+ "calling a function with argument of type {:?} passing data of type {:?}",
+ callee_arg.layout.ty,
+ caller_arg.layout.ty
+ )
+ }
+ // Special handling for unsized parameters.
+ if caller_arg.layout.is_unsized() {
+ // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
+ assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty);
+ // We have to properly pre-allocate the memory for the callee.
+ // So let's tear down some wrappers.
+ // This all has to be in memory, there are no immediate unsized values.
+ let src = caller_arg.assert_mem_place();
+ // The destination cannot be one of these "spread args".
+ let (dest_frame, dest_local) = callee_arg.assert_local();
+ // We are just initializing things, so there can't be anything here yet.
+ assert!(matches!(
+ *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
+ Operand::Immediate(Immediate::Uninit)
+ ));
+ // Allocate enough memory to hold `src`.
+ let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
+ span_bug!(self.cur_span(), "unsized fn arg with `extern` type tail should not be allowed")
+ };
+ let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
+ let dest_place =
+ MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), callee_arg.layout, src.meta);
+ // Update the local to be that new place.
+ *M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
+ }
+ // We allow some transmutes here.
+ // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
+ // is true for all `copy_op`, but there are a lot of special cases for argument passing
+ // specifically.)
+ self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
+ }
+
+ /// Call this function -- pushing the stack frame and initializing the arguments.
+ ///
+ /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
+ /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
+ ///
+ /// `with_caller_location` indicates whether the caller passed a caller location. Miri
+ /// implements caller locations without argument passing, but to match `FnAbi` we need to know
+ /// when those arguments are present.
+ pub(crate) fn eval_fn_call(
+ &mut self,
+ fn_val: FnVal<'tcx, M::ExtraFnVal>,
+ (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
+ args: &[OpTy<'tcx, M::Provenance>],
+ with_caller_location: bool,
+ destination: &PlaceTy<'tcx, M::Provenance>,
+ target: Option<mir::BasicBlock>,
+ mut unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ trace!("eval_fn_call: {:#?}", fn_val);
+
+ let instance = match fn_val {
+ FnVal::Instance(instance) => instance,
+ FnVal::Other(extra) => {
+ return M::call_extra_fn(
+ self,
+ extra,
+ caller_abi,
+ args,
+ destination,
+ target,
+ unwind,
+ );
+ }
+ };
+
+ match instance.def {
+ ty::InstanceDef::Intrinsic(def_id) => {
+ assert!(self.tcx.is_intrinsic(def_id));
+ // caller_fn_abi is not relevant here, we interpret the arguments directly for each intrinsic.
+ M::call_intrinsic(self, instance, args, destination, target, unwind)
+ }
+ ty::InstanceDef::VTableShim(..)
+ | ty::InstanceDef::ReifyShim(..)
+ | ty::InstanceDef::ClosureOnceShim { .. }
+ | ty::InstanceDef::FnPtrShim(..)
+ | ty::InstanceDef::DropGlue(..)
+ | ty::InstanceDef::CloneShim(..)
+ | ty::InstanceDef::Item(_) => {
+ // We need MIR for this fn
+ let Some((body, instance)) =
+ M::find_mir_or_eval_fn(self, instance, caller_abi, args, destination, target, unwind)? else {
+ return Ok(());
+ };
+
+ // Compute callee information using the `instance` returned by
+ // `find_mir_or_eval_fn`.
+ // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
+ let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+ if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
+ throw_unsup_format!("calling a c-variadic function is not supported");
+ }
+
+ if M::enforce_abi(self) {
+ if caller_fn_abi.conv != callee_fn_abi.conv {
+ throw_ub_format!(
+ "calling a function with calling convention {:?} using calling convention {:?}",
+ callee_fn_abi.conv,
+ caller_fn_abi.conv
+ )
+ }
+ }
+
+ if !matches!(unwind, StackPopUnwind::NotAllowed) && !callee_fn_abi.can_unwind {
+ // The callee cannot unwind.
+ unwind = StackPopUnwind::NotAllowed;
+ }
+
+ self.push_stack_frame(
+ instance,
+ body,
+ destination,
+ StackPopCleanup::Goto { ret: target, unwind },
+ )?;
+
+ // If an error is raised here, pop the frame again to get an accurate backtrace.
+ // To this end, we wrap it all in a `try` block.
+ let res: InterpResult<'tcx> = try {
+ trace!(
+ "caller ABI: {:?}, args: {:#?}",
+ caller_abi,
+ args.iter()
+ .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
+ .collect::<Vec<_>>()
+ );
+ trace!(
+ "spread_arg: {:?}, locals: {:#?}",
+ body.spread_arg,
+ body.args_iter()
+ .map(|local| (
+ local,
+ self.layout_of_local(self.frame(), local, None).unwrap().ty
+ ))
+ .collect::<Vec<_>>()
+ );
+
+ // In principle, we have two iterators: Where the arguments come from, and where
+ // they go to.
+
+ // For where they come from: If the ABI is RustCall, we untuple the
+ // last incoming argument. These two iterators do not have the same type,
+ // so to keep the code paths uniform we accept an allocation
+ // (for RustCall ABI only).
+ let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
+ if caller_abi == Abi::RustCall && !args.is_empty() {
+ // Untuple
+ let (untuple_arg, args) = args.split_last().unwrap();
+ trace!("eval_fn_call: Will pass last argument by untupling");
+ Cow::from(
+ args.iter()
+ .map(|a| Ok(a.clone()))
+ .chain(
+ (0..untuple_arg.layout.fields.count())
+ .map(|i| self.operand_field(untuple_arg, i)),
+ )
+ .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::Provenance>>>>(
+ )?,
+ )
+ } else {
+ // Plain arg passing
+ Cow::from(args)
+ };
+ // If `with_caller_location` is set we pretend there is an extra argument (that
+ // we will not pass).
+ assert_eq!(
+ caller_args.len() + if with_caller_location { 1 } else { 0 },
+ caller_fn_abi.args.len(),
+ "mismatch between caller ABI and caller arguments",
+ );
+ let mut caller_args = caller_args
+ .iter()
+ .zip(caller_fn_abi.args.iter())
+ .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
+
+ // Now we have to spread them out across the callee's locals,
+ // taking into account the `spread_arg`. If we could write
+ // this is a single iterator (that handles `spread_arg`), then
+ // `pass_argument` would be the loop body. It takes care to
+ // not advance `caller_iter` for ZSTs.
+ let mut callee_args_abis = callee_fn_abi.args.iter();
+ for local in body.args_iter() {
+ let dest = self.eval_place(mir::Place::from(local))?;
+ if Some(local) == body.spread_arg {
+ // Must be a tuple
+ for i in 0..dest.layout.fields.count() {
+ let dest = self.place_field(&dest, i)?;
+ let callee_abi = callee_args_abis.next().unwrap();
+ self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ }
+ } else {
+ // Normal argument
+ let callee_abi = callee_args_abis.next().unwrap();
+ self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ }
+ }
+ // If the callee needs a caller location, pretend we consume one more argument from the ABI.
+ if instance.def.requires_caller_location(*self.tcx) {
+ callee_args_abis.next().unwrap();
+ }
+ // Now we should have no more caller args or callee arg ABIs
+ assert!(
+ callee_args_abis.next().is_none(),
+ "mismatch between callee ABI and callee body arguments"
+ );
+ if caller_args.next().is_some() {
+ throw_ub_format!("calling a function with more arguments than it expected")
+ }
+ // Don't forget to check the return type!
+ if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
+ throw_ub_format!(
+ "calling a function with return type {:?} passing \
+ return place of type {:?}",
+ callee_fn_abi.ret.layout.ty,
+ caller_fn_abi.ret.layout.ty,
+ )
+ }
+ };
+ match res {
+ Err(err) => {
+ self.stack_mut().pop();
+ Err(err)
+ }
+ Ok(()) => Ok(()),
+ }
+ }
+ // cannot use the shim here, because that will only result in infinite recursion
+ ty::InstanceDef::Virtual(def_id, idx) => {
+ let mut args = args.to_vec();
+ // We have to implement all "object safe receivers". So we have to go search for a
+ // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
+ // unwrap those newtypes until we are there.
+ let mut receiver = args[0].clone();
+ let receiver_place = loop {
+ match receiver.layout.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
+ ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values
+ _ => {
+ // Not there yet, search for the only non-ZST field.
+ let mut non_zst_field = None;
+ for i in 0..receiver.layout.fields.count() {
+ let field = self.operand_field(&receiver, i)?;
+ if !field.layout.is_zst() {
+ assert!(
+ non_zst_field.is_none(),
+ "multiple non-ZST fields in dyn receiver type {}",
+ receiver.layout.ty
+ );
+ non_zst_field = Some(field);
+ }
+ }
+ receiver = non_zst_field.unwrap_or_else(|| {
+ panic!(
+ "no non-ZST fields in dyn receiver type {}",
+ receiver.layout.ty
+ )
+ });
+ }
+ }
+ };
+ // Obtain the underlying trait we are working on.
+ let receiver_tail = self
+ .tcx
+ .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
+ let ty::Dynamic(data, ..) = receiver_tail.kind() else {
+ span_bug!(self.cur_span(), "dyanmic call on non-`dyn` type {}", receiver_tail)
+ };
+
+ // Get the required information from the vtable.
+ let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
+ let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+ if dyn_trait != data.principal() {
+ throw_ub_format!(
+ "`dyn` call on a pointer whose vtable does not match its type"
+ );
+ }
+
+ // Now determine the actual method to call. We can do that in two different ways and
+ // compare them to ensure everything fits.
+ let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
+ throw_ub_format!("`dyn` call trying to call something that is not a method")
+ };
+ if cfg!(debug_assertions) {
+ let tcx = *self.tcx;
+
+ let trait_def_id = tcx.trait_of_item(def_id).unwrap();
+ let virtual_trait_ref =
+ ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
+ assert_eq!(
+ receiver_tail,
+ virtual_trait_ref.self_ty(),
+ "mismatch in underlying dyn trait computation within Miri and MIR building",
+ );
+ let existential_trait_ref =
+ ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
+ let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
+
+ let concrete_method = Instance::resolve_for_vtable(
+ tcx,
+ self.param_env,
+ def_id,
+ instance.substs.rebase_onto(tcx, trait_def_id, concrete_trait_ref.substs),
+ )
+ .unwrap();
+ assert_eq!(fn_inst, concrete_method);
+ }
+
+ // `*mut receiver_place.layout.ty` is almost the layout that we
+ // want for args[0]: We have to project to field 0 because we want
+ // a thin pointer.
+ assert!(receiver_place.layout.is_unsized());
+ let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
+ let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
+ // Adjust receiver argument.
+ args[0] = OpTy::from(ImmTy::from_immediate(
+ Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
+ this_receiver_ptr,
+ ));
+ trace!("Patched receiver operand to {:#?}", args[0]);
+ // recurse with concrete function
+ self.eval_fn_call(
+ FnVal::Instance(fn_inst),
+ (caller_abi, caller_fn_abi),
+ &args,
+ with_caller_location,
+ destination,
+ target,
+ unwind,
+ )
+ }
+ }
+ }
+
+ fn drop_in_place(
+ &mut self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ instance: ty::Instance<'tcx>,
+ target: mir::BasicBlock,
+ unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
+ // We take the address of the object. This may well be unaligned, which is fine
+ // for us here. However, unaligned accesses will probably make the actual drop
+ // implementation fail -- a problem shared by rustc.
+ let place = self.force_allocation(place)?;
+
+ let (instance, place) = match place.layout.ty.kind() {
+ ty::Dynamic(..) => {
+ // Dropping a trait object. Need to find actual drop fn.
+ let place = self.unpack_dyn_trait(&place)?;
+ let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
+ (instance, place)
+ }
+ _ => (instance, place),
+ };
+ let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+ let arg = ImmTy::from_immediate(
+ place.to_ref(self),
+ self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
+ );
+ let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
+
+ self.eval_fn_call(
+ FnVal::Instance(instance),
+ (Abi::Rust, fn_abi),
+ &[arg.into()],
+ false,
+ &ret.into(),
+ Some(target),
+ match unwind {
+ Some(cleanup) => StackPopUnwind::Cleanup(cleanup),
+ None => StackPopUnwind::Skip,
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
new file mode 100644
index 000000000..b3a511d5a
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -0,0 +1,59 @@
+use rustc_middle::mir::interpret::{InterpResult, Pointer};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::{Align, Size};
+
+use super::util::ensure_monomorphic_enough;
+use super::{InterpCx, Machine};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
+ /// objects.
+ ///
+ /// The `trait_ref` encodes the erased self type. Hence, if we are making an object `Foo<Trait>`
+ /// from a value of type `Foo<T>`, then `trait_ref` would map `T: Trait`. `None` here means that
+ /// this is an auto trait without any methods, so we only need the basic vtable (drop, size,
+ /// align).
+ pub fn get_vtable_ptr(
+ &self,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+ trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
+
+ let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
+
+ // All vtables must be monomorphic, bail out otherwise.
+ ensure_monomorphic_enough(*self.tcx, ty)?;
+ ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
+
+ let vtable_symbolic_allocation = self.tcx.create_vtable_alloc(ty, poly_trait_ref);
+ let vtable_ptr = self.global_base_pointer(Pointer::from(vtable_symbolic_allocation))?;
+ Ok(vtable_ptr.into())
+ }
+
+ /// Returns a high-level representation of the entires of the given vtable.
+ pub fn get_vtable_entries(
+ &self,
+ vtable: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, &'tcx [ty::VtblEntry<'tcx>]> {
+ let (ty, poly_trait_ref) = self.get_ptr_vtable(vtable)?;
+ Ok(if let Some(poly_trait_ref) = poly_trait_ref {
+ let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
+ let trait_ref = self.tcx.erase_regions(trait_ref);
+ self.tcx.vtable_entries(trait_ref)
+ } else {
+ TyCtxt::COMMON_VTABLE_ENTRIES
+ })
+ }
+
+ pub fn get_vtable_size_and_align(
+ &self,
+ vtable: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, (Size, Align)> {
+ let (ty, _trait_ref) = self.get_ptr_vtable(vtable)?;
+ let layout = self.layout_of(ty)?;
+ assert!(!layout.is_unsized(), "there are no vtables for unsized types");
+ Ok((layout.size, layout.align.abi))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
new file mode 100644
index 000000000..2bc521d5b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -0,0 +1,73 @@
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use std::convert::TryInto;
+use std::ops::ControlFlow;
+
+/// Checks whether a type contains generic parameters which require substitution.
+///
+/// In case it does, returns a `TooGeneric` const eval error. Note that due to polymorphization
+/// types may be "concrete enough" even though they still contain generic parameters in
+/// case these parameters are unused.
+pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
+where
+ T: TypeVisitable<'tcx>,
+{
+ debug!("ensure_monomorphic_enough: ty={:?}", ty);
+ if !ty.needs_subst() {
+ return Ok(());
+ }
+
+ struct FoundParam;
+ struct UsedParamsNeedSubstVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
+ type BreakTy = FoundParam;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !ty.needs_subst() {
+ return ControlFlow::CONTINUE;
+ }
+
+ match *ty.kind() {
+ ty::Param(_) => ControlFlow::Break(FoundParam),
+ ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, ..)
+ | ty::FnDef(def_id, substs) => {
+ let instance = ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id));
+ let unused_params = self.tcx.unused_generic_params(instance);
+ for (index, subst) in substs.into_iter().enumerate() {
+ let index = index
+ .try_into()
+ .expect("more generic parameters than can fit into a `u32`");
+ let is_used = unused_params.contains(index).map_or(true, |unused| !unused);
+ // Only recurse when generic parameters in fns, closures and generators
+ // are used and require substitution.
+ // Just in case there are closures or generators within this subst,
+ // recurse.
+ if is_used && subst.needs_subst() {
+ return subst.visit_with(self);
+ }
+ }
+ ControlFlow::CONTINUE
+ }
+ _ => ty.super_visit_with(self),
+ }
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match c.kind() {
+ ty::ConstKind::Param(..) => ControlFlow::Break(FoundParam),
+ _ => c.super_visit_with(self),
+ }
+ }
+ }
+
+ let mut vis = UsedParamsNeedSubstVisitor { tcx };
+ if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
+ throw_inval!(TooGeneric);
+ } else {
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
new file mode 100644
index 000000000..0e50d1ed4
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -0,0 +1,986 @@
+//! Check the validity invariant of a given value, and tell the user
+//! where in the value it got violated.
+//! In const context, this goes even further and tries to approximate const safety.
+//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
+//! to be const-safe.
+
+use std::convert::TryFrom;
+use std::fmt::Write;
+use std::num::NonZeroUsize;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpError;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange};
+
+use std::hash::Hash;
+
+use super::{
+ alloc_range, CheckInAllocMsg, GlobalAlloc, Immediate, InterpCx, InterpResult, MPlaceTy,
+ Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
+};
+
+macro_rules! throw_validation_failure {
+ ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
+ let mut msg = String::new();
+ msg.push_str("encountered ");
+ write!(&mut msg, $($what_fmt),+).unwrap();
+ $(
+ msg.push_str(", but expected ");
+ write!(&mut msg, $($expected_fmt),+).unwrap();
+ )?
+ let path = rustc_middle::ty::print::with_no_trimmed_paths!({
+ let where_ = &$where;
+ if !where_.is_empty() {
+ let mut path = String::new();
+ write_path(&mut path, where_);
+ Some(path)
+ } else {
+ None
+ }
+ });
+ throw_ub!(ValidationFailure { path, msg })
+ }};
+}
+
+/// If $e throws an error matching the pattern, throw a validation failure.
+/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
+/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
+/// This lets you use the patterns as a kind of validation list, asserting which errors
+/// can possibly happen:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+/// Foo | Bar | Baz => { "some failure" },
+/// });
+/// ```
+///
+/// An additional expected parameter can also be added to the failure message:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+/// Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
+/// });
+/// ```
+///
+/// An additional nicety is that both parameters actually take format args, so you can just write
+/// the format string in directly:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+/// Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
+/// });
+/// ```
+///
+macro_rules! try_validation {
+ ($e:expr, $where:expr,
+ $( $( $p:pat_param )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+ ) => {{
+ match $e {
+ Ok(x) => x,
+ // We catch the error and turn it into a validation failure. We are okay with
+ // allocation here as this can only slow down builds that fail anyway.
+ Err(e) => match e.kind() {
+ $(
+ $($p)|+ =>
+ throw_validation_failure!(
+ $where,
+ { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+ )
+ ),+,
+ #[allow(unreachable_patterns)]
+ _ => Err::<!, _>(e)?,
+ }
+ }
+ }};
+}
+
+/// We want to show a nice path to the invalid field for diagnostics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+ Field(Symbol),
+ Variant(Symbol),
+ GeneratorState(VariantIdx),
+ CapturedVar(Symbol),
+ ArrayElem(usize),
+ TupleElem(usize),
+ Deref,
+ EnumTag,
+ GeneratorTag,
+ DynDowncast,
+}
+
+/// Extra things to check for during validation of CTFE results.
+pub enum CtfeValidationMode {
+ /// Regular validation, nothing special happening.
+ Regular,
+ /// Validation of a `const`.
+ /// `inner` says if this is an inner, indirect allocation (as opposed to the top-level const
+ /// allocation). Being an inner allocation makes a difference because the top-level allocation
+ /// of a `const` is copied for each use, but the inner allocations are implicitly shared.
+ /// `allow_static_ptrs` says if pointers to statics are permitted (which is the case for promoteds in statics).
+ Const { inner: bool, allow_static_ptrs: bool },
+}
+
+/// State for tracking recursive validation of references
+pub struct RefTracking<T, PATH = ()> {
+ pub seen: FxHashSet<T>,
+ pub todo: Vec<(T, PATH)>,
+}
+
+impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+ pub fn empty() -> Self {
+ RefTracking { seen: FxHashSet::default(), todo: vec![] }
+ }
+ pub fn new(op: T) -> Self {
+ let mut ref_tracking_for_consts =
+ RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+ ref_tracking_for_consts.seen.insert(op);
+ ref_tracking_for_consts
+ }
+
+ pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
+ if self.seen.insert(op) {
+ trace!("Recursing below ptr {:#?}", op);
+ let path = path();
+ // Remember to come back to this later.
+ self.todo.push((op, path));
+ }
+ }
+}
+
+/// Format a path
+fn write_path(out: &mut String, path: &[PathElem]) {
+ use self::PathElem::*;
+
+ for elem in path.iter() {
+ match elem {
+ Field(name) => write!(out, ".{}", name),
+ EnumTag => write!(out, ".<enum-tag>"),
+ Variant(name) => write!(out, ".<enum-variant({})>", name),
+ GeneratorTag => write!(out, ".<generator-tag>"),
+ GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
+ CapturedVar(name) => write!(out, ".<captured-var({})>", name),
+ TupleElem(idx) => write!(out, ".{}", idx),
+ ArrayElem(idx) => write!(out, "[{}]", idx),
+ // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
+ // some of the other items here also are not Rust syntax. Actually we can't
+ // even use the usual syntax because we are just showing the projections,
+ // not the root.
+ Deref => write!(out, ".<deref>"),
+ DynDowncast => write!(out, ".<dyn-downcast>"),
+ }
+ .unwrap()
+ }
+}
+
+// Formats such that a sentence like "expected something {}" to mean
+// "expected something <in the given range>" makes sense.
+fn wrapping_range_format(r: WrappingRange, max_hi: u128) -> String {
+ let WrappingRange { start: lo, end: hi } = r;
+ assert!(hi <= max_hi);
+ if lo > hi {
+ format!("less or equal to {}, or greater or equal to {}", hi, lo)
+ } else if lo == hi {
+ format!("equal to {}", lo)
+ } else if lo == 0 {
+ assert!(hi < max_hi, "should not be printing if the range covers everything");
+ format!("less or equal to {}", hi)
+ } else if hi == max_hi {
+ assert!(lo > 0, "should not be printing if the range covers everything");
+ format!("greater or equal to {}", lo)
+ } else {
+ format!("in the range {:?}", r)
+ }
+}
+
+struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ /// The `path` may be pushed to, but the part that is present when a function
+ /// starts must not be changed! `visit_fields` and `visit_array` rely on
+ /// this stack discipline.
+ path: Vec<PathElem>,
+ ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
+ /// `None` indicates this is not validating for CTFE (but for runtime).
+ ctfe_mode: Option<CtfeValidationMode>,
+ ecx: &'rt InterpCx<'mir, 'tcx, M>,
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
+ fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
+ // First, check if we are projecting to a variant.
+ match layout.variants {
+ Variants::Multiple { tag_field, .. } => {
+ if tag_field == field {
+ return match layout.ty.kind() {
+ ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
+ ty::Generator(..) => PathElem::GeneratorTag,
+ _ => bug!("non-variant type {:?}", layout.ty),
+ };
+ }
+ }
+ Variants::Single { .. } => {}
+ }
+
+ // Now we know we are projecting to a field, so figure out which one.
+ match layout.ty.kind() {
+ // generators and closures.
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ let mut name = None;
+ // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
+ // https://github.com/rust-lang/project-rfc-2229/issues/46
+ if let Some(local_def_id) = def_id.as_local() {
+ let tables = self.ecx.tcx.typeck(local_def_id);
+ if let Some(captured_place) =
+ tables.closure_min_captures_flattened(local_def_id).nth(field)
+ {
+ // Sometimes the index is beyond the number of upvars (seen
+ // for a generator).
+ let var_hir_id = captured_place.get_root_variable();
+ let node = self.ecx.tcx.hir().get(var_hir_id);
+ if let hir::Node::Pat(pat) = node {
+ if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
+ name = Some(ident.name);
+ }
+ }
+ }
+ }
+
+ PathElem::CapturedVar(name.unwrap_or_else(|| {
+ // Fall back to showing the field index.
+ sym::integer(field)
+ }))
+ }
+
+ // tuples
+ ty::Tuple(_) => PathElem::TupleElem(field),
+
+ // enums
+ ty::Adt(def, ..) if def.is_enum() => {
+ // we might be projecting *to* a variant, or to a field *in* a variant.
+ match layout.variants {
+ Variants::Single { index } => {
+ // Inside a variant
+ PathElem::Field(def.variant(index).fields[field].name)
+ }
+ Variants::Multiple { .. } => bug!("we handled variants above"),
+ }
+ }
+
+ // other ADTs
+ ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].name),
+
+ // arrays/slices
+ ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
+
+ // dyn traits
+ ty::Dynamic(..) => PathElem::DynDowncast,
+
+ // nothing else has an aggregate layout
+ _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
+ }
+ }
+
+ fn with_elem<R>(
+ &mut self,
+ elem: PathElem,
+ f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>,
+ ) -> InterpResult<'tcx, R> {
+ // Remember the old state
+ let path_len = self.path.len();
+ // Record new element
+ self.path.push(elem);
+ // Perform operation
+ let r = f(self)?;
+ // Undo changes
+ self.path.truncate(path_len);
+ // Done
+ Ok(r)
+ }
+
+ fn check_wide_ptr_meta(
+ &mut self,
+ meta: MemPlaceMeta<M::Provenance>,
+ pointee: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx> {
+ let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
+ match tail.kind() {
+ ty::Dynamic(..) => {
+ let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
+ // Make sure it is a genuine vtable pointer.
+ let (_ty, _trait) = try_validation!(
+ self.ecx.get_ptr_vtable(vtable),
+ self.path,
+ err_ub!(DanglingIntPointer(..)) |
+ err_ub!(InvalidVTablePointer(..)) =>
+ { "{vtable}" } expected { "a vtable pointer" },
+ );
+ // FIXME: check if the type/trait match what ty::Dynamic says?
+ }
+ ty::Slice(..) | ty::Str => {
+ let _len = meta.unwrap_meta().to_machine_usize(self.ecx)?;
+ // We do not check that `len * elem_size <= isize::MAX`:
+ // that is only required for references, and there it falls out of the
+ // "dereferenceable" check performed by Stacked Borrows.
+ }
+ ty::Foreign(..) => {
+ // Unsized, but not wide.
+ }
+ _ => bug!("Unexpected unsized type tail: {:?}", tail),
+ }
+
+ Ok(())
+ }
+
+ /// Check a reference or `Box`.
+ fn check_safe_pointer(
+ &mut self,
+ value: &OpTy<'tcx, M::Provenance>,
+ kind: &str,
+ ) -> InterpResult<'tcx> {
+ let value = self.ecx.read_immediate(value)?;
+ // Handle wide pointers.
+ // Check metadata early, for better diagnostics
+ let place = try_validation!(
+ self.ecx.ref_to_mplace(&value),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
+ );
+ if place.layout.is_unsized() {
+ self.check_wide_ptr_meta(place.meta, place.layout)?;
+ }
+ // Make sure this is dereferenceable and all.
+ let size_and_align = try_validation!(
+ self.ecx.size_and_align_of_mplace(&place),
+ self.path,
+ err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
+ );
+ let (size, align) = size_and_align
+ // for the purpose of validity, consider foreign types to have
+ // alignment and size determined by the layout (size will be 0,
+ // alignment should take attributes into account).
+ .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
+ // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+ try_validation!(
+ self.ecx.check_ptr_access_align(
+ place.ptr,
+ size,
+ align,
+ CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
+ ),
+ self.path,
+ err_ub!(AlignmentCheckFailed { required, has }) =>
+ {
+ "an unaligned {kind} (required {} byte alignment but found {})",
+ required.bytes(),
+ has.bytes()
+ },
+ err_ub!(DanglingIntPointer(0, _)) =>
+ { "a null {kind}" },
+ err_ub!(DanglingIntPointer(i, _)) =>
+ { "a dangling {kind} (address {i:#x} is unallocated)" },
+ err_ub!(PointerOutOfBounds { .. }) =>
+ { "a dangling {kind} (going beyond the bounds of its allocation)" },
+ // This cannot happen during const-eval (because interning already detects
+ // dangling pointers), but it can happen in Miri.
+ err_ub!(PointerUseAfterFree(..)) =>
+ { "a dangling {kind} (use-after-free)" },
+ );
+ // Do not allow pointers to uninhabited types.
+ if place.layout.abi.is_uninhabited() {
+ throw_validation_failure!(self.path,
+ { "a {kind} pointing to uninhabited type {}", place.layout.ty }
+ )
+ }
+ // Recursive checking
+ if let Some(ref mut ref_tracking) = self.ref_tracking {
+ // Proceed recursively even for ZST, no reason to skip them!
+ // `!` is a ZST and we want to validate it.
+ if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
+ // Special handling for pointers to statics (irrespective of their type).
+ let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id);
+ if let Some(GlobalAlloc::Static(did)) = alloc_kind {
+ assert!(!self.ecx.tcx.is_thread_local_static(did));
+ assert!(self.ecx.tcx.is_static(did));
+ if matches!(
+ self.ctfe_mode,
+ Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
+ ) {
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // this check is so important.
+ // This check is reachable when the const just referenced the static,
+ // but never read it (so we never entered `before_access_global`).
+ throw_validation_failure!(self.path,
+ { "a {} pointing to a static variable", kind }
+ );
+ }
+ // We skip checking other statics. These statics must be sound by
+ // themselves, and the only way to get broken statics here is by using
+ // unsafe code.
+ // The reasons we don't check other statics is twofold. For one, in all
+ // sound cases, the static was already validated on its own, and second, we
+ // trigger cycle errors if we try to compute the value of the other static
+ // and that static refers back to us.
+ // We might miss const-invalid data,
+ // but things are still sound otherwise (in particular re: consts
+ // referring to statics).
+ return Ok(());
+ }
+ }
+ let path = &self.path;
+ ref_tracking.track(place, || {
+ // We need to clone the path anyway, make sure it gets created
+ // with enough space for the additional `Deref`.
+ let mut new_path = Vec::with_capacity(path.len() + 1);
+ new_path.extend(path);
+ new_path.push(PathElem::Deref);
+ new_path
+ });
+ }
+ Ok(())
+ }
+
+ fn read_scalar(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
+ self.ecx.read_scalar(op)
+ }
+
+ fn read_immediate_forced(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ Ok(*self.ecx.read_immediate_raw(op, /*force*/ true)?.unwrap())
+ }
+
+ /// Check if this is a value of primitive type, and if yes check the validity of the value
+ /// at that type. Return `true` if the type is indeed primitive.
+ fn try_visit_primitive(
+ &mut self,
+ value: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, bool> {
+ // Go over all the primitive types
+ let ty = value.layout.ty;
+ match ty.kind() {
+ ty::Bool => {
+ let value = self.read_scalar(value)?;
+ try_validation!(
+ value.to_bool(),
+ self.path,
+ err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) =>
+ { "{:x}", value } expected { "a boolean" },
+ );
+ Ok(true)
+ }
+ ty::Char => {
+ let value = self.read_scalar(value)?;
+ try_validation!(
+ value.to_char(),
+ self.path,
+ err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) =>
+ { "{:x}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
+ );
+ Ok(true)
+ }
+ ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
+ let value = self.read_scalar(value)?;
+ // NOTE: Keep this in sync with the array optimization for int/float
+ // types below!
+ if M::enforce_number_init(self.ecx) {
+ try_validation!(
+ value.check_init(),
+ self.path,
+ err_ub!(InvalidUninitBytes(..)) =>
+ { "{:x}", value } expected { "initialized bytes" }
+ );
+ }
+ // As a special exception we *do* match on a `Scalar` here, since we truly want
+ // to know its underlying representation (and *not* cast it to an integer).
+ let is_ptr = value.check_init().map_or(false, |v| matches!(v, Scalar::Ptr(..)));
+ if is_ptr {
+ throw_validation_failure!(self.path,
+ { "{:x}", value } expected { "plain (non-pointer) bytes" }
+ )
+ }
+ Ok(true)
+ }
+ ty::RawPtr(..) => {
+ // We are conservative with uninit for integers, but try to
+ // actually enforce the strict rules for raw pointers (mostly because
+ // that lets us re-use `ref_to_mplace`).
+ let place = try_validation!(
+ self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
+ );
+ if place.layout.is_unsized() {
+ self.check_wide_ptr_meta(place.meta, place.layout)?;
+ }
+ Ok(true)
+ }
+ ty::Ref(_, ty, mutbl) => {
+ if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
+ && *mutbl == hir::Mutability::Mut
+ {
+ // A mutable reference inside a const? That does not seem right (except if it is
+ // a ZST).
+ let layout = self.ecx.layout_of(*ty)?;
+ if !layout.is_zst() {
+ throw_validation_failure!(self.path, { "mutable reference in a `const`" });
+ }
+ }
+ self.check_safe_pointer(value, "reference")?;
+ Ok(true)
+ }
+ ty::FnPtr(_sig) => {
+ let value = try_validation!(
+ self.ecx.read_scalar(value).and_then(|v| v.check_init()),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "uninitialized bytes" } expected { "a proper pointer or integer value" },
+ );
+
+ // If we check references recursively, also check that this points to a function.
+ if let Some(_) = self.ref_tracking {
+ let ptr = value.to_pointer(self.ecx)?;
+ let _fn = try_validation!(
+ self.ecx.get_ptr_fn(ptr),
+ self.path,
+ err_ub!(DanglingIntPointer(..)) |
+ err_ub!(InvalidFunctionPointer(..)) =>
+ { "{ptr}" } expected { "a function pointer" },
+ );
+ // FIXME: Check if the signature matches
+ } else {
+ // Otherwise (for standalone Miri), we have to still check it to be non-null.
+ if self.ecx.scalar_may_be_null(value)? {
+ throw_validation_failure!(self.path, { "a null function pointer" });
+ }
+ }
+ Ok(true)
+ }
+ ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }),
+ ty::Foreign(..) | ty::FnDef(..) => {
+ // Nothing to check.
+ Ok(true)
+ }
+ // The above should be all the primitive types. The rest is compound, we
+ // check them by visiting their fields/variants.
+ ty::Adt(..)
+ | ty::Tuple(..)
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::Str
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::Generator(..) => Ok(false),
+ // Some types only occur during typechecking, they have no layout.
+ // We should not see them here and we could not check them anyway.
+ ty::Error(_)
+ | ty::Infer(..)
+ | ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Param(..)
+ | ty::Opaque(..)
+ | ty::Projection(..)
+ | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
+ }
+ }
+
+ fn visit_scalar(
+ &mut self,
+ scalar: ScalarMaybeUninit<M::Provenance>,
+ scalar_layout: ScalarAbi,
+ ) -> InterpResult<'tcx> {
+ // We check `is_full_range` in a slightly complicated way because *if* we are checking
+ // number validity, then we want to ensure that `Scalar::Initialized` is indeed initialized,
+ // i.e. that we go over the `check_init` below.
+ let size = scalar_layout.size(self.ecx);
+ let is_full_range = match scalar_layout {
+ ScalarAbi::Initialized { .. } => {
+ if M::enforce_number_init(self.ecx) {
+ false // not "full" since uninit is not accepted
+ } else {
+ scalar_layout.is_always_valid(self.ecx)
+ }
+ }
+ ScalarAbi::Union { .. } => true,
+ };
+ if is_full_range {
+ // Nothing to check. Cruciall we don't even `read_scalar` until here, since that would
+ // fail for `Union` scalars!
+ return Ok(());
+ }
+ // We have something to check: it must at least be initialized.
+ let valid_range = scalar_layout.valid_range(self.ecx);
+ let WrappingRange { start, end } = valid_range;
+ let max_value = size.unsigned_int_max();
+ assert!(end <= max_value);
+ let value = try_validation!(
+ scalar.check_init(),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "{:x}", scalar }
+ expected { "something {}", wrapping_range_format(valid_range, max_value) },
+ );
+ let bits = match value.try_to_int() {
+ Ok(int) => int.assert_bits(size),
+ Err(_) => {
+ // So this is a pointer then, and casting to an int failed.
+ // Can only happen during CTFE.
+ // We support 2 kinds of ranges here: full range, and excluding zero.
+ if start == 1 && end == max_value {
+ // Only null is the niche. So make sure the ptr is NOT null.
+ if self.ecx.scalar_may_be_null(value)? {
+ throw_validation_failure!(self.path,
+ { "a potentially null pointer" }
+ expected {
+ "something that cannot possibly fail to be {}",
+ wrapping_range_format(valid_range, max_value)
+ }
+ )
+ } else {
+ return Ok(());
+ }
+ } else if scalar_layout.is_always_valid(self.ecx) {
+ // Easy. (This is reachable if `enforce_number_validity` is set.)
+ return Ok(());
+ } else {
+ // Conservatively, we reject, because the pointer *could* have a bad
+ // value.
+ throw_validation_failure!(self.path,
+ { "a pointer" }
+ expected {
+ "something that cannot possibly fail to be {}",
+ wrapping_range_format(valid_range, max_value)
+ }
+ )
+ }
+ }
+ };
+ // Now compare.
+ if valid_range.contains(bits) {
+ Ok(())
+ } else {
+ throw_validation_failure!(self.path,
+ { "{}", bits }
+ expected { "something {}", wrapping_range_format(valid_range, max_value) }
+ )
+ }
+ }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
+ for ValidityVisitor<'rt, 'mir, 'tcx, M>
+{
+ type V = OpTy<'tcx, M::Provenance>;
+
+ #[inline(always)]
+ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+ &self.ecx
+ }
+
+ fn read_discriminant(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, VariantIdx> {
+ self.with_elem(PathElem::EnumTag, move |this| {
+ Ok(try_validation!(
+ this.ecx.read_discriminant(op),
+ this.path,
+ err_ub!(InvalidTag(val)) =>
+ { "{:x}", val } expected { "a valid enum tag" },
+ err_ub!(InvalidUninitBytes(None)) =>
+ { "uninitialized bytes" } expected { "a valid enum tag" },
+ )
+ .1)
+ })
+ }
+
+ #[inline]
+ fn visit_field(
+ &mut self,
+ old_op: &OpTy<'tcx, M::Provenance>,
+ field: usize,
+ new_op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let elem = self.aggregate_field_path_elem(old_op.layout, field);
+ self.with_elem(elem, move |this| this.visit_value(new_op))
+ }
+
+ #[inline]
+ fn visit_variant(
+ &mut self,
+ old_op: &OpTy<'tcx, M::Provenance>,
+ variant_id: VariantIdx,
+ new_op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let name = match old_op.layout.ty.kind() {
+ ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
+ // Generators also have variants
+ ty::Generator(..) => PathElem::GeneratorState(variant_id),
+ _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
+ };
+ self.with_elem(name, move |this| this.visit_value(new_op))
+ }
+
+ #[inline(always)]
+ fn visit_union(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ _fields: NonZeroUsize,
+ ) -> InterpResult<'tcx> {
+ // Special check preventing `UnsafeCell` inside unions in the inner part of constants.
+ if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) {
+ if !op.layout.ty.is_freeze(self.ecx.tcx.at(DUMMY_SP), self.ecx.param_env) {
+ throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+ }
+ }
+ Ok(())
+ }
+
+ #[inline]
+ fn visit_box(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ self.check_safe_pointer(op, "box")?;
+ Ok(())
+ }
+
+ #[inline]
+ fn visit_value(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ trace!("visit_value: {:?}, {:?}", *op, op.layout);
+
+ // Check primitive types -- the leaves of our recursive descent.
+ if self.try_visit_primitive(op)? {
+ return Ok(());
+ }
+
+ // Special check preventing `UnsafeCell` in the inner part of constants
+ if let Some(def) = op.layout.ty.ty_adt_def() {
+ if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
+ && def.is_unsafe_cell()
+ {
+ throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+ }
+ }
+
+ // Recursively walk the value at its type.
+ self.walk_value(op)?;
+
+ // *After* all of this, check the ABI. We need to check the ABI to handle
+ // types like `NonNull` where the `Scalar` info is more restrictive than what
+ // the fields say (`rustc_layout_scalar_valid_range_start`).
+ // But in most cases, this will just propagate what the fields say,
+ // and then we want the error to point at the field -- so, first recurse,
+ // then check ABI.
+ //
+ // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+ // scalars, we do the same check on every "level" (e.g., first we check
+ // MyNewtype and then the scalar in there).
+ match op.layout.abi {
+ Abi::Uninhabited => {
+ throw_validation_failure!(self.path,
+ { "a value of uninhabited type {:?}", op.layout.ty }
+ );
+ }
+ Abi::Scalar(scalar_layout) => {
+ // We use a 'forced' read because we always need a `Immediate` here
+ // and treating "partially uninit" as "fully uninit" is fine for us.
+ let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
+ self.visit_scalar(scalar, scalar_layout)?;
+ }
+ Abi::ScalarPair(a_layout, b_layout) => {
+ // There is no `rustc_layout_scalar_valid_range_start` for pairs, so
+ // we would validate these things as we descend into the fields,
+ // but that can miss bugs in layout computation. Layout computation
+ // is subtle due to enums having ScalarPair layout, where one field
+ // is the discriminant.
+ if cfg!(debug_assertions) {
+ // We use a 'forced' read because we always need a `Immediate` here
+ // and treating "partially uninit" as "fully uninit" is fine for us.
+ let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
+ self.visit_scalar(a, a_layout)?;
+ self.visit_scalar(b, b_layout)?;
+ }
+ }
+ Abi::Vector { .. } => {
+ // No checks here, we assume layout computation gets this right.
+ // (This is harder to check since Miri does not represent these as `Immediate`. We
+ // also cannot use field projections since this might be a newtype around a vector.)
+ }
+ Abi::Aggregate { .. } => {
+ // Nothing to do.
+ }
+ }
+
+ Ok(())
+ }
+
+ fn visit_aggregate(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ match op.layout.ty.kind() {
+ ty::Str => {
+ let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
+ let len = mplace.len(self.ecx)?;
+ try_validation!(
+ self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)),
+ self.path,
+ err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
+ );
+ }
+ ty::Array(tys, ..) | ty::Slice(tys)
+ // This optimization applies for types that can hold arbitrary bytes (such as
+ // integer and floating point types) or for structs or tuples with no fields.
+ // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
+ // or tuples made up of integer/floating point types or inhabited ZSTs with no
+ // padding.
+ if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
+ =>
+ {
+ // Optimized handling for arrays of integer/float type.
+
+ // This is the length of the array/slice.
+ let len = op.len(self.ecx)?;
+ // This is the element type size.
+ let layout = self.ecx.layout_of(*tys)?;
+ // This is the size in bytes of the whole array. (This checks for overflow.)
+ let size = layout.size * len;
+ // If the size is 0, there is nothing to check.
+ // (`size` can only be 0 of `len` is 0, and empty arrays are always valid.)
+ if size == Size::ZERO {
+ return Ok(());
+ }
+ // Now that we definitely have a non-ZST array, we know it lives in memory.
+ let mplace = match op.try_as_mplace() {
+ Ok(mplace) => mplace,
+ Err(imm) => match *imm {
+ Immediate::Uninit =>
+ throw_validation_failure!(self.path, { "uninitialized bytes" }),
+ Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
+ bug!("arrays/slices can never have Scalar/ScalarPair layout"),
+ }
+ };
+
+ // Optimization: we just check the entire range at once.
+ // NOTE: Keep this in sync with the handling of integer and float
+ // types above, in `visit_primitive`.
+ // In run-time mode, we accept pointers in here. This is actually more
+ // permissive than a per-element check would be, e.g., we accept
+ // a &[u8] that contains a pointer even though bytewise checking would
+ // reject it. However, that's good: We don't inherently want
+ // to reject those pointers, we just do not have the machinery to
+ // talk about parts of a pointer.
+ // We also accept uninit, for consistency with the slow path.
+ let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
+
+ match alloc.check_bytes(
+ alloc_range(Size::ZERO, size),
+ /*allow_uninit*/ !M::enforce_number_init(self.ecx),
+ /*allow_ptr*/ false,
+ ) {
+ // In the happy case, we needn't check anything else.
+ Ok(()) => {}
+ // Some error happened, try to provide a more detailed description.
+ Err(err) => {
+ // For some errors we might be able to provide extra information.
+ // (This custom logic does not fit the `try_validation!` macro.)
+ match err.kind() {
+ err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
+ // Some byte was uninitialized, determine which
+ // element that byte belongs to so we can
+ // provide an index.
+ let i = usize::try_from(
+ access.uninit.start.bytes() / layout.size.bytes(),
+ )
+ .unwrap();
+ self.path.push(PathElem::ArrayElem(i));
+
+ throw_validation_failure!(self.path, { "uninitialized bytes" })
+ }
+
+ // Propagate upwards (that will also check for unexpected errors).
+ _ => return Err(err),
+ }
+ }
+ }
+ }
+ // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
+ // of an array and not all of them, because there's only a single value of a specific
+ // ZST type, so either validation fails for all elements or none.
+ ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
+ // Validate just the first element (if any).
+ self.walk_aggregate(op, fields.take(1))?
+ }
+ _ => {
+ self.walk_aggregate(op, fields)? // default handler
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ fn validate_operand_internal(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ path: Vec<PathElem>,
+ ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
+ ctfe_mode: Option<CtfeValidationMode>,
+ ) -> InterpResult<'tcx> {
+ trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
+
+ // Construct a visitor
+ let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
+
+ // Run it.
+ match visitor.visit_value(&op) {
+ Ok(()) => Ok(()),
+ // Pass through validation failures.
+ Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err),
+ // Complain about any other kind of UB error -- those are bad because we'd like to
+ // report them in a way that shows *where* in the value the issue lies.
+ Err(err) if matches!(err.kind(), InterpError::UndefinedBehavior(_)) => {
+ err.print_backtrace();
+ bug!("Unexpected Undefined Behavior error during validation: {}", err);
+ }
+ // Pass through everything else.
+ Err(err) => Err(err),
+ }
+ }
+
+ /// This function checks the data at `op` to be const-valid.
+ /// `op` is assumed to cover valid memory if it is an indirect operand.
+ /// It will error if the bits at the destination do not match the ones described by the layout.
+ ///
+ /// `ref_tracking` is used to record references that we encounter so that they
+ /// can be checked recursively by an outside driving loop.
+ ///
+ /// `constant` controls whether this must satisfy the rules for constants:
+ /// - no pointers to statics.
+ /// - no `UnsafeCell` or non-ZST `&mut`.
+ #[inline(always)]
+ pub fn const_validate_operand(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ path: Vec<PathElem>,
+ ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>,
+ ctfe_mode: CtfeValidationMode,
+ ) -> InterpResult<'tcx> {
+ self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
+ }
+
+ /// This function checks the data at `op` to be runtime-valid.
+ /// `op` is assumed to cover valid memory if it is an indirect operand.
+ /// It will error if the bits at the destination do not match the ones described by the layout.
+ #[inline(always)]
+ pub fn validate_operand(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ self.validate_operand_internal(op, vec![], None, None)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
new file mode 100644
index 000000000..aee1f93b1
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -0,0 +1,534 @@
+//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
+//! types until we arrive at the leaves, with custom handling for primitive types.
+
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
+
+use std::num::NonZeroUsize;
+
+use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy};
+
+/// A thing that we can project into, and that has a layout.
+/// This wouldn't have to depend on `Machine` but with the current type inference,
+/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
+pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
+ /// Gets this value's layout.
+ fn layout(&self) -> TyAndLayout<'tcx>;
+
+ /// Makes this into an `OpTy`, in a cheap way that is good for reading.
+ fn to_op_for_read(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+
+ /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
+ fn to_op_for_proj(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ self.to_op_for_read(ecx)
+ }
+
+ /// Creates this from an `OpTy`.
+ ///
+ /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
+
+ /// Projects to the given enum variant.
+ fn project_downcast(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self>;
+
+ /// Projects to the n-th field.
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self>;
+}
+
+/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
+/// This wouldn't have to depend on `Machine` but with the current type inference,
+/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
+pub trait ValueMut<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
+ /// Gets this value's layout.
+ fn layout(&self) -> TyAndLayout<'tcx>;
+
+ /// Makes this into an `OpTy`, in a cheap way that is good for reading.
+ fn to_op_for_read(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+
+ /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
+ fn to_op_for_proj(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+
+ /// Creates this from an `OpTy`.
+ ///
+ /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
+
+ /// Projects to the given enum variant.
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self>;
+
+ /// Projects to the n-th field.
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self>;
+}
+
+// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
+// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
+// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
+// double-impl, that would barely make the code shorter, if at all.)
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ op.clone()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
+ for OpTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+
+ #[inline(always)]
+ fn to_op_for_proj(
+ &self,
+ _ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ op.clone()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
+ for MPlaceTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.into())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ // assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
+ op.assert_mem_place()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
+ for MPlaceTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.into())
+ }
+
+ #[inline(always)]
+ fn to_op_for_proj(
+ &self,
+ _ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.into())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ // assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
+ op.assert_mem_place()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
+ for PlaceTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // We `force_allocation` here so that `from_op` below can work.
+ ecx.place_to_op(self)
+ }
+
+ #[inline(always)]
+ fn to_op_for_proj(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // We `force_allocation` here so that `from_op` below can work.
+ Ok(ecx.force_allocation(self)?.into())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ // assert is justified because our `to_op` only ever produces `Indirect` operands.
+ op.assert_mem_place().into()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.place_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.place_field(self, field)
+ }
+}
+
+macro_rules! make_value_visitor {
+ ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
+ // How to traverse a value and what to do when we are at the leaves.
+ pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+ type V: $value_trait<'mir, 'tcx, M>;
+
+ /// The visitor must have an `InterpCx` in it.
+ fn ecx(&$($mutability)? self)
+ -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
+
+ /// `read_discriminant` can be hooked for better error messages.
+ #[inline(always)]
+ fn read_discriminant(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, VariantIdx> {
+ Ok(self.ecx().read_discriminant(op)?.1)
+ }
+
+ // Recursive actions, ready to be overloaded.
+ /// Visits the given value, dispatching as appropriate to more specialized visitors.
+ #[inline(always)]
+ fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
+ {
+ self.walk_value(v)
+ }
+ /// Visits the given value as a union. No automatic recursion can happen here.
+ #[inline(always)]
+ fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
+ {
+ Ok(())
+ }
+ /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+ /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
+ /// pointee type is the actual `T`.
+ #[inline(always)]
+ fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx>
+ {
+ Ok(())
+ }
+ /// Visits this value as an aggregate, you are getting an iterator yielding
+ /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
+ /// Recurses into the fields.
+ #[inline(always)]
+ fn visit_aggregate(
+ &mut self,
+ v: &Self::V,
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ self.walk_aggregate(v, fields)
+ }
+
+ /// Called each time we recurse down to a field of a "product-like" aggregate
+ /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+ /// and new (inner) value.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_field(
+ &mut self,
+ _old_val: &Self::V,
+ _field: usize,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+ /// Called when recursing into an enum variant.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_variant(
+ &mut self,
+ _old_val: &Self::V,
+ _variant: VariantIdx,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+
+ // Default recursors. Not meant to be overloaded.
+ fn walk_aggregate(
+ &mut self,
+ v: &Self::V,
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ // Now iterate over it.
+ for (idx, field_val) in fields.enumerate() {
+ self.visit_field(v, idx, &field_val?)?;
+ }
+ Ok(())
+ }
+ fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
+ {
+ let ty = v.layout().ty;
+ trace!("walk_value: type: {ty}");
+
+ // Special treatment for special types, where the (static) layout is not sufficient.
+ match *ty.kind() {
+ // If it is a trait object, switch to the real type that was used to create it.
+ ty::Dynamic(..) => {
+ // unsized values are never immediate, so we can assert_mem_place
+ let op = v.to_op_for_read(self.ecx())?;
+ let dest = op.assert_mem_place();
+ let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?;
+ trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+ // recurse with the inner type
+ return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
+ },
+ // Slices do not need special handling here: they have `Array` field
+ // placement with length 0, so we enter the `Array` case below which
+ // indirectly uses the metadata to determine the actual length.
+
+ // However, `Box`... let's talk about `Box`.
+ ty::Adt(def, ..) if def.is_box() => {
+ // `Box` is a hybrid primitive-library-defined type that one the one hand is
+ // a dereferenceable pointer, on the other hand has *basically arbitrary
+ // user-defined layout* since the user controls the 'allocator' field. So it
+ // cannot be treated like a normal pointer, since it does not fit into an
+ // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+ // something with "all boxed pointers", so we handle this mess for them.
+ //
+ // When we hit a `Box`, we do not do the usual `visit_aggregate`; instead,
+ // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+ // allocator field. We also assert tons of things to ensure we do not miss
+ // any other fields.
+
+ // `Box` has two fields: the pointer we care about, and the allocator.
+ assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+ let (unique_ptr, alloc) =
+ (v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?);
+ // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+ // (which means another 2 fields, the second of which is a `PhantomData`)
+ assert_eq!(unique_ptr.layout().fields.count(), 2);
+ let (nonnull_ptr, phantom) = (
+ unique_ptr.project_field(self.ecx(), 0)?,
+ unique_ptr.project_field(self.ecx(), 1)?,
+ );
+ assert!(
+ phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+ "2nd field of `Unique` should be PhantomData but is {:?}",
+ phantom.layout().ty,
+ );
+ // ... that contains a `NonNull`... (gladly, only a single field here)
+ assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+ let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr
+ // ... whose only field finally is a raw ptr we can dereference.
+ self.visit_box(&raw_ptr)?;
+
+ // The second `Box` field is the allocator, which we recursively check for validity
+ // like in regular structs.
+ self.visit_field(v, 1, &alloc)?;
+
+ // We visited all parts of this one.
+ return Ok(());
+ }
+ _ => {},
+ };
+
+ // Visit the fields of this value.
+ match v.layout().fields {
+ FieldsShape::Primitive => {}
+ FieldsShape::Union(fields) => {
+ self.visit_union(v, fields)?;
+ }
+ FieldsShape::Arbitrary { ref offsets, .. } => {
+ // FIXME: We collect in a vec because otherwise there are lifetime
+ // errors: Projecting to a field needs access to `ecx`.
+ let fields: Vec<InterpResult<'tcx, Self::V>> =
+ (0..offsets.len()).map(|i| {
+ v.project_field(self.ecx(), i)
+ })
+ .collect();
+ self.visit_aggregate(v, fields.into_iter())?;
+ }
+ FieldsShape::Array { .. } => {
+ // Let's get an mplace (or immediate) first.
+ // This might `force_allocate` if `v` is a `PlaceTy`, but `place_index` does that anyway.
+ let op = v.to_op_for_proj(self.ecx())?;
+ // Now we can go over all the fields.
+ // This uses the *run-time length*, i.e., if we are a slice,
+ // the dynamic info from the metadata is used.
+ let iter = self.ecx().operand_array_fields(&op)?
+ .map(|f| f.and_then(|f| {
+ Ok($value_trait::from_op(&f))
+ }));
+ self.visit_aggregate(v, iter)?;
+ }
+ }
+
+ match v.layout().variants {
+ // If this is a multi-variant layout, find the right variant and proceed
+ // with *its* fields.
+ Variants::Multiple { .. } => {
+ let op = v.to_op_for_read(self.ecx())?;
+ let idx = self.read_discriminant(&op)?;
+ let inner = v.project_downcast(self.ecx(), idx)?;
+ trace!("walk_value: variant layout: {:#?}", inner.layout());
+ // recurse with the inner type
+ self.visit_variant(v, idx, &inner)
+ }
+ // For single-variant layouts, we already did anything there is to do.
+ Variants::Single { .. } => Ok(())
+ }
+ }
+ }
+ }
+}
+
+make_value_visitor!(ValueVisitor, Value,);
+make_value_visitor!(MutValueVisitor, ValueMut, mut);
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
new file mode 100644
index 000000000..72ac6af68
--- /dev/null
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -0,0 +1,68 @@
+/*!
+
+Rust MIR: a lowered representation of Rust.
+
+*/
+
+#![feature(assert_matches)]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(decl_macro)]
+#![feature(exact_size_is_empty)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(map_try_insert)]
+#![feature(min_specialization)]
+#![feature(slice_ptr_get)]
+#![feature(option_get_or_insert_default)]
+#![feature(never_type)]
+#![feature(trait_alias)]
+#![feature(trusted_len)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+#![feature(yeet_expr)]
+#![feature(is_some_with)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+pub mod const_eval;
+mod errors;
+pub mod interpret;
+mod might_permit_raw_init;
+pub mod transform;
+pub mod util;
+
+use rustc_middle::ty;
+use rustc_middle::ty::query::Providers;
+use rustc_target::abi::InitKind;
+
+pub fn provide(providers: &mut Providers) {
+ const_eval::provide(providers);
+ providers.eval_to_const_value_raw = const_eval::eval_to_const_value_raw_provider;
+ providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
+ providers.const_caller_location = const_eval::const_caller_location;
+ providers.eval_to_valtree = |tcx, param_env_and_value| {
+ let (param_env, raw) = param_env_and_value.into_parts();
+ const_eval::eval_to_valtree(tcx, param_env, raw)
+ };
+ providers.try_destructure_mir_constant = |tcx, param_env_and_value| {
+ let (param_env, value) = param_env_and_value.into_parts();
+ const_eval::try_destructure_mir_constant(tcx, param_env, value).ok()
+ };
+ providers.valtree_to_const_val = |tcx, (ty, valtree)| {
+ const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree)
+ };
+ providers.deref_mir_constant = |tcx, param_env_and_value| {
+ let (param_env, value) = param_env_and_value.into_parts();
+ const_eval::deref_mir_constant(tcx, param_env, value)
+ };
+ providers.permits_uninit_init =
+ |tcx, ty| might_permit_raw_init::might_permit_raw_init(tcx, ty, InitKind::Uninit);
+ providers.permits_zero_init =
+ |tcx, ty| might_permit_raw_init::might_permit_raw_init(tcx, ty, InitKind::Zero);
+}
diff --git a/compiler/rustc_const_eval/src/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/might_permit_raw_init.rs
new file mode 100644
index 000000000..f971c2238
--- /dev/null
+++ b/compiler/rustc_const_eval/src/might_permit_raw_init.rs
@@ -0,0 +1,40 @@
+use crate::const_eval::CompileTimeInterpreter;
+use crate::interpret::{InterpCx, MemoryKind, OpTy};
+use rustc_middle::ty::layout::LayoutCx;
+use rustc_middle::ty::{layout::TyAndLayout, ParamEnv, TyCtxt};
+use rustc_session::Limit;
+use rustc_target::abi::InitKind;
+
+pub fn might_permit_raw_init<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: TyAndLayout<'tcx>,
+ kind: InitKind,
+) -> bool {
+ let strict = tcx.sess.opts.unstable_opts.strict_init_checks;
+
+ if strict {
+ let machine = CompileTimeInterpreter::new(Limit::new(0), false);
+
+ let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine);
+
+ let allocated = cx
+ .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
+ .expect("OOM: failed to allocate for uninit check");
+
+ if kind == InitKind::Zero {
+ cx.write_bytes_ptr(
+ allocated.ptr,
+ std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
+ )
+ .expect("failed to write bytes for zero valid check");
+ }
+
+ let ot: OpTy<'_, _> = allocated.into();
+
+ // Assume that if it failed, it's a validation failure.
+ cx.validate_operand(&ot).is_ok()
+ } else {
+ let layout_cx = LayoutCx { tcx, param_env: ParamEnv::reveal_all() };
+ ty.might_permit_raw_init(&layout_cx, kind)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
new file mode 100644
index 000000000..0adb88a18
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -0,0 +1,1032 @@
+//! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations.
+
+use rustc_errors::{Diagnostic, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{Binder, TraitPredicate, TraitRef, TypeVisitable};
+use rustc_mir_dataflow::{self, Analysis};
+use rustc_span::{sym, Span, Symbol};
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::SelectionContext;
+
+use std::mem;
+use std::ops::Deref;
+
+use super::ops::{self, NonConstOp, Status};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::resolver::FlowSensitiveAnalysis;
+use super::{ConstCx, Qualif};
+use crate::const_eval::is_unstable_const_fn;
+use crate::errors::UnstableInStable;
+
+type QualifResults<'mir, 'tcx, Q> =
+ rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
+
+#[derive(Default)]
+pub struct Qualifs<'mir, 'tcx> {
+ has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
+ needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
+ needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+}
+
+impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
+ /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary
+ pub fn needs_drop(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ // Peeking into opaque types causes cycles if the current function declares said opaque
+ // type. Thus we avoid short circuiting on the type and instead run the more expensive
+ // analysis that looks at the actual usage within this function
+ if !ty.has_opaque_types() && !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let needs_drop = self.needs_drop.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(NeedsDrop, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ needs_drop.seek_before_primary_effect(location);
+ needs_drop.get().contains(local)
+ }
+
+ /// Returns `true` if `local` is `NeedsNonConstDrop` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary
+ pub fn needs_non_const_drop(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ if !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ needs_non_const_drop.seek_before_primary_effect(location);
+ needs_non_const_drop.get().contains(local)
+ }
+
+ /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary.
+ pub fn has_mut_interior(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ // Peeking into opaque types causes cycles if the current function declares said opaque
+ // type. Thus we avoid short circuiting on the type and instead run the more expensive
+ // analysis that looks at the actual usage within this function
+ if !ty.has_opaque_types() && !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(HasMutInterior, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ has_mut_interior.seek_before_primary_effect(location);
+ has_mut_interior.get().contains(local)
+ }
+
+ fn in_return_place(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+ ) -> ConstQualifs {
+ // Find the `Return` terminator if one exists.
+ //
+ // If no `Return` terminator exists, this MIR is divergent. Just return the conservative
+ // qualifs for the return type.
+ let return_block = ccx
+ .body
+ .basic_blocks()
+ .iter_enumerated()
+ .find(|(_, block)| matches!(block.terminator().kind, TerminatorKind::Return))
+ .map(|(bb, _)| bb);
+
+ let Some(return_block) = return_block else {
+ return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty(), tainted_by_errors);
+ };
+
+ let return_loc = ccx.body.terminator_loc(return_block);
+
+ let custom_eq = match ccx.const_kind() {
+ // We don't care whether a `const fn` returns a value that is not structurally
+ // matchable. Functions calls are opaque and always use type-based qualification, so
+ // this value should never be used.
+ hir::ConstContext::ConstFn => true,
+
+ // If we know that all values of the return type are structurally matchable, there's no
+ // need to run dataflow.
+ // Opaque types do not participate in const generics or pattern matching, so we can safely count them out.
+ _ if ccx.body.return_ty().has_opaque_types()
+ || !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) =>
+ {
+ false
+ }
+
+ hir::ConstContext::Const | hir::ConstContext::Static(_) => {
+ let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
+ .into_engine(ccx.tcx, &ccx.body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&ccx.body);
+
+ cursor.seek_after_primary_effect(return_loc);
+ cursor.get().contains(RETURN_PLACE)
+ }
+ };
+
+ ConstQualifs {
+ needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc),
+ needs_non_const_drop: self.needs_non_const_drop(ccx, RETURN_PLACE, return_loc),
+ has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc),
+ custom_eq,
+ tainted_by_errors,
+ }
+ }
+}
+
+pub struct Checker<'mir, 'tcx> {
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ qualifs: Qualifs<'mir, 'tcx>,
+
+ /// The span of the current statement.
+ span: Span,
+
+ /// A set that stores for each local whether it has a `StorageDead` for it somewhere.
+ local_has_storage_dead: Option<BitSet<Local>>,
+
+ error_emitted: Option<ErrorGuaranteed>,
+ secondary_errors: Vec<Diagnostic>,
+}
+
+impl<'mir, 'tcx> Deref for Checker<'mir, 'tcx> {
+ type Target = ConstCx<'mir, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+impl<'mir, 'tcx> Checker<'mir, 'tcx> {
+ pub fn new(ccx: &'mir ConstCx<'mir, 'tcx>) -> Self {
+ Checker {
+ span: ccx.body.span,
+ ccx,
+ qualifs: Default::default(),
+ local_has_storage_dead: None,
+ error_emitted: None,
+ secondary_errors: Vec::new(),
+ }
+ }
+
+ pub fn check_body(&mut self) {
+ let ConstCx { tcx, body, .. } = *self.ccx;
+ let def_id = self.ccx.def_id();
+
+ // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
+ // no need to emit duplicate errors here.
+ if self.ccx.is_async() || body.generator.is_some() {
+ tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`");
+ return;
+ }
+
+ // The local type and predicate checks are not free and only relevant for `const fn`s.
+ if self.const_kind() == hir::ConstContext::ConstFn {
+ for (idx, local) in body.local_decls.iter_enumerated() {
+ // Handle the return place below.
+ if idx == RETURN_PLACE || local.internal {
+ continue;
+ }
+
+ self.span = local.source_info.span;
+ self.check_local_or_return_ty(local.ty, idx);
+ }
+
+ // impl trait is gone in MIR, so check the return type of a const fn by its signature
+ // instead of the type of the return place.
+ self.span = body.local_decls[RETURN_PLACE].source_info.span;
+ let return_ty = tcx.fn_sig(def_id).output();
+ self.check_local_or_return_ty(return_ty.skip_binder(), RETURN_PLACE);
+ }
+
+ if !tcx.has_attr(def_id.to_def_id(), sym::rustc_do_not_const_check) {
+ self.visit_body(&body);
+ }
+
+ // If we got through const-checking without emitting any "primary" errors, emit any
+ // "secondary" errors if they occurred.
+ let secondary_errors = mem::take(&mut self.secondary_errors);
+ if self.error_emitted.is_none() {
+ for mut error in secondary_errors {
+ self.tcx.sess.diagnostic().emit_diagnostic(&mut error);
+ }
+ } else {
+ assert!(self.tcx.sess.has_errors().is_some());
+ }
+ }
+
+ fn local_has_storage_dead(&mut self, local: Local) -> bool {
+ let ccx = self.ccx;
+ self.local_has_storage_dead
+ .get_or_insert_with(|| {
+ struct StorageDeads {
+ locals: BitSet<Local>,
+ }
+ impl<'tcx> Visitor<'tcx> for StorageDeads {
+ fn visit_statement(&mut self, stmt: &Statement<'tcx>, _: Location) {
+ if let StatementKind::StorageDead(l) = stmt.kind {
+ self.locals.insert(l);
+ }
+ }
+ }
+ let mut v = StorageDeads { locals: BitSet::new_empty(ccx.body.local_decls.len()) };
+ v.visit_body(ccx.body);
+ v.locals
+ })
+ .contains(local)
+ }
+
+ pub fn qualifs_in_return_place(&mut self) -> ConstQualifs {
+ self.qualifs.in_return_place(self.ccx, self.error_emitted)
+ }
+
+ /// Emits an error if an expression cannot be evaluated in the current context.
+ pub fn check_op(&mut self, op: impl NonConstOp<'tcx>) {
+ self.check_op_spanned(op, self.span);
+ }
+
+ /// Emits an error at the given `span` if an expression cannot be evaluated in the current
+ /// context.
+ pub fn check_op_spanned<O: NonConstOp<'tcx>>(&mut self, op: O, span: Span) {
+ let gate = match op.status_in_item(self.ccx) {
+ Status::Allowed => return,
+
+ Status::Unstable(gate) if self.tcx.features().enabled(gate) => {
+ let unstable_in_stable = self.ccx.is_const_stable_const_fn()
+ && !super::rustc_allow_const_fn_unstable(self.tcx, self.def_id(), gate);
+ if unstable_in_stable {
+ emit_unstable_in_stable_error(self.ccx, span, gate);
+ }
+
+ return;
+ }
+
+ Status::Unstable(gate) => Some(gate),
+ Status::Forbidden => None,
+ };
+
+ if self.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
+ self.tcx.sess.miri_unleashed_feature(span, gate);
+ return;
+ }
+
+ let mut err = op.build_error(self.ccx, span);
+ assert!(err.is_error());
+
+ match op.importance() {
+ ops::DiagnosticImportance::Primary => {
+ let reported = err.emit();
+ self.error_emitted = Some(reported);
+ }
+
+ ops::DiagnosticImportance::Secondary => err.buffer(&mut self.secondary_errors),
+ }
+ }
+
+ fn check_static(&mut self, def_id: DefId, span: Span) {
+ if self.tcx.is_thread_local_static(def_id) {
+ self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+ }
+ self.check_op_spanned(ops::StaticAccess, span)
+ }
+
+ fn check_local_or_return_ty(&mut self, ty: Ty<'tcx>, local: Local) {
+ let kind = self.body.local_kind(local);
+
+ for ty in ty.walk() {
+ let ty = match ty.unpack() {
+ GenericArgKind::Type(ty) => ty,
+
+ // No constraints on lifetimes or constants, except potentially
+ // constants' types, but `walk` will get to them as well.
+ GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
+ };
+
+ match *ty.kind() {
+ ty::Ref(_, _, hir::Mutability::Mut) => self.check_op(ops::ty::MutRef(kind)),
+ _ => {}
+ }
+ }
+ }
+
+ fn check_mut_borrow(&mut self, local: Local, kind: hir::BorrowKind) {
+ match self.const_kind() {
+ // In a const fn all borrows are transient or point to the places given via
+ // references in the arguments (so we already checked them with
+ // TransientMutBorrow/MutBorrow as appropriate).
+ // The borrow checker guarantees that no new non-transient borrows are created.
+ // NOTE: Once we have heap allocations during CTFE we need to figure out
+ // how to prevent `const fn` to create long-lived allocations that point
+ // to mutable memory.
+ hir::ConstContext::ConstFn => self.check_op(ops::TransientMutBorrow(kind)),
+ _ => {
+ // Locals with StorageDead do not live beyond the evaluation and can
+ // thus safely be borrowed without being able to be leaked to the final
+ // value of the constant.
+ if self.local_has_storage_dead(local) {
+ self.check_op(ops::TransientMutBorrow(kind));
+ } else {
+ self.check_op(ops::MutBorrow(kind));
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
+ trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+ // We don't const-check basic blocks on the cleanup path since we never unwind during
+ // const-eval: a panic causes an immediate compile error. In other words, cleanup blocks
+ // are unreachable during const-eval.
+ //
+ // We can't be more conservative (e.g., by const-checking cleanup blocks anyways) because
+ // locals that would never be dropped during normal execution are sometimes dropped during
+ // unwinding, which means backwards-incompatible live-drop errors.
+ if block.is_cleanup {
+ return;
+ }
+
+ self.super_basic_block_data(bb, block);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ trace!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location);
+
+ // Special-case reborrows to be more like a copy of a reference.
+ match *rvalue {
+ Rvalue::Ref(_, kind, place) => {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+ let ctx = match kind {
+ BorrowKind::Shared => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+ }
+ BorrowKind::Shallow => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ }
+ BorrowKind::Unique => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+ }
+ BorrowKind::Mut { .. } => {
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ }
+ };
+ self.visit_local(reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
+ return;
+ }
+ }
+ Rvalue::AddressOf(mutbl, place) => {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+ let ctx = match mutbl {
+ Mutability::Not => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+ }
+ Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
+ };
+ self.visit_local(reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ self.super_rvalue(rvalue, location);
+
+ match *rvalue {
+ Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
+
+ Rvalue::Use(_)
+ | Rvalue::CopyForDeref(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::Len(_)
+ | Rvalue::Aggregate(..) => {}
+
+ Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
+ | Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
+ let ty = place.ty(self.body, self.tcx).ty;
+ let is_allowed = match ty.kind() {
+ // Inside a `static mut`, `&mut [...]` is allowed.
+ ty::Array(..) | ty::Slice(_)
+ if self.const_kind() == hir::ConstContext::Static(hir::Mutability::Mut) =>
+ {
+ true
+ }
+
+ // FIXME(ecstaticmorse): We could allow `&mut []` inside a const context given
+ // that this is merely a ZST and it is already eligible for promotion.
+ // This may require an RFC?
+ /*
+ ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+ => true,
+ */
+ _ => false,
+ };
+
+ if !is_allowed {
+ if let BorrowKind::Mut { .. } = kind {
+ self.check_mut_borrow(place.local, hir::BorrowKind::Ref)
+ } else {
+ self.check_op(ops::CellBorrow);
+ }
+ }
+ }
+
+ Rvalue::AddressOf(Mutability::Mut, ref place) => {
+ self.check_mut_borrow(place.local, hir::BorrowKind::Raw)
+ }
+
+ Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
+ | Rvalue::AddressOf(Mutability::Not, ref place) => {
+ let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
+ &self.ccx,
+ &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
+ place.as_ref(),
+ );
+
+ if borrowed_place_has_mut_interior {
+ match self.const_kind() {
+ // In a const fn all borrows are transient or point to the places given via
+ // references in the arguments (so we already checked them with
+ // TransientCellBorrow/CellBorrow as appropriate).
+ // The borrow checker guarantees that no new non-transient borrows are created.
+ // NOTE: Once we have heap allocations during CTFE we need to figure out
+ // how to prevent `const fn` to create long-lived allocations that point
+ // to (interior) mutable memory.
+ hir::ConstContext::ConstFn => self.check_op(ops::TransientCellBorrow),
+ _ => {
+ // Locals with StorageDead are definitely not part of the final constant value, and
+ // it is thus inherently safe to permit such locals to have their
+ // address taken as we can't end up with a reference to them in the
+ // final value.
+ // Note: This is only sound if every local that has a `StorageDead` has a
+ // `StorageDead` in every control flow path leading to a `return` terminator.
+ if self.local_has_storage_dead(place.local) {
+ self.check_op(ops::TransientCellBorrow);
+ } else {
+ self.check_op(ops::CellBorrow);
+ }
+ }
+ }
+ }
+ }
+
+ Rvalue::Cast(
+ CastKind::Pointer(
+ PointerCast::MutToConstPointer
+ | PointerCast::ArrayToPointer
+ | PointerCast::UnsafeFnPointer
+ | PointerCast::ClosureFnPointer(_)
+ | PointerCast::ReifyFnPointer,
+ ),
+ _,
+ _,
+ ) => {
+ // These are all okay; they only change the type, not the data.
+ }
+
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), _, _) => {
+ // Unsizing is implemented for CTFE.
+ }
+
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
+ self.check_op(ops::RawPtrToIntCast);
+ }
+ Rvalue::Cast(CastKind::PointerFromExposedAddress, _, _) => {
+ // Since no pointer can ever get exposed (rejected above), this is easy to support.
+ }
+
+ Rvalue::Cast(CastKind::Misc, _, _) => {}
+
+ Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {}
+ Rvalue::ShallowInitBox(_, _) => {}
+
+ Rvalue::UnaryOp(_, ref operand) => {
+ let ty = operand.ty(self.body, self.tcx);
+ if is_int_bool_or_char(ty) {
+ // Int, bool, and char operations are fine.
+ } else if ty.is_floating_point() {
+ self.check_op(ops::FloatingPointOp);
+ } else {
+ span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty);
+ }
+ }
+
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs))
+ | Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(self.body, self.tcx);
+ let rhs_ty = rhs.ty(self.body, self.tcx);
+
+ if is_int_bool_or_char(lhs_ty) && is_int_bool_or_char(rhs_ty) {
+ // Int, bool, and char operations are fine.
+ } else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
+ assert_eq!(lhs_ty, rhs_ty);
+ assert!(
+ op == BinOp::Eq
+ || op == BinOp::Ne
+ || op == BinOp::Le
+ || op == BinOp::Lt
+ || op == BinOp::Ge
+ || op == BinOp::Gt
+ || op == BinOp::Offset
+ );
+
+ self.check_op(ops::RawPtrComparison);
+ } else if lhs_ty.is_floating_point() || rhs_ty.is_floating_point() {
+ self.check_op(ops::FloatingPointOp);
+ } else {
+ span_bug!(
+ self.span,
+ "non-primitive type in `Rvalue::BinaryOp`: {:?} ⚬ {:?}",
+ lhs_ty,
+ rhs_ty
+ );
+ }
+ }
+ }
+ }
+
+ fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+ self.super_operand(op, location);
+ if let Operand::Constant(c) = op {
+ if let Some(def_id) = c.check_static_ptr(self.tcx) {
+ self.check_static(def_id, self.span);
+ }
+ }
+ }
+ fn visit_projection_elem(
+ &mut self,
+ place_local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ trace!(
+ "visit_projection_elem: place_local={:?} proj_base={:?} elem={:?} \
+ context={:?} location={:?}",
+ place_local,
+ proj_base,
+ elem,
+ context,
+ location,
+ );
+
+ self.super_projection_elem(place_local, proj_base, elem, context, location);
+
+ match elem {
+ ProjectionElem::Deref => {
+ let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
+ if base_ty.is_unsafe_ptr() {
+ if proj_base.is_empty() {
+ let decl = &self.body.local_decls[place_local];
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+ let span = decl.source_info.span;
+ self.check_static(def_id, span);
+ return;
+ }
+ }
+
+ // `*const T` is stable, `*mut T` is not
+ if !base_ty.is_mutable_ptr() {
+ return;
+ }
+
+ self.check_op(ops::RawMutPtrDeref);
+ }
+
+ if context.is_mutating_use() {
+ self.check_op(ops::MutDeref);
+ }
+ }
+
+ ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Downcast(..)
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Field(..)
+ | ProjectionElem::Index(_) => {}
+ }
+ }
+
+ fn visit_source_info(&mut self, source_info: &SourceInfo) {
+ trace!("visit_source_info: source_info={:?}", source_info);
+ self.span = source_info.span;
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ trace!("visit_statement: statement={:?} location={:?}", statement, location);
+
+ self.super_statement(statement, location);
+
+ match statement.kind {
+ StatementKind::Assign(..)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ self.super_terminator(terminator, location);
+
+ match &terminator.kind {
+ TerminatorKind::Call { func, args, fn_span, from_hir_call, .. } => {
+ let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+ let caller = self.def_id();
+
+ let fn_ty = func.ty(body, tcx);
+
+ let (mut callee, mut substs) = match *fn_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+
+ ty::FnPtr(_) => {
+ self.check_op(ops::FnCallIndirect);
+ return;
+ }
+ _ => {
+ span_bug!(terminator.source_info.span, "invalid callee of type {:?}", fn_ty)
+ }
+ };
+
+ // Attempting to call a trait method?
+ if let Some(trait_id) = tcx.trait_of_item(callee) {
+ trace!("attempting to call a trait method");
+ if !self.tcx.features().const_trait_impl {
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+
+ let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+ let poly_trait_pred = Binder::dummy(TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ });
+ let obligation =
+ Obligation::new(ObligationCause::dummy(), param_env, poly_trait_pred);
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation)
+ });
+
+ match implsrc {
+ Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+ debug!(
+ "const_trait_impl: provided {:?} via where-clause in {:?}",
+ trait_ref, param_env
+ );
+ return;
+ }
+ Ok(Some(ImplSource::UserDefined(data))) => {
+ let callee_name = tcx.item_name(callee);
+ if let Some(&did) = tcx
+ .associated_item_def_ids(data.impl_def_id)
+ .iter()
+ .find(|did| tcx.item_name(**did) == callee_name)
+ {
+ // using internal substs is ok here, since this is only
+ // used for the `resolve` call below
+ substs = InternalSubsts::identity_for_item(tcx, did);
+ callee = did;
+ }
+
+ if let hir::Constness::NotConst = tcx.constness(data.impl_def_id) {
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+ _ if !tcx.is_const_fn_raw(callee) => {
+ // At this point, it is only legal when the caller is in a trait
+ // marked with #[const_trait], and the callee is in the same trait.
+ let mut nonconst_call_permission = false;
+ if let Some(callee_trait) = tcx.trait_of_item(callee)
+ && tcx.has_attr(callee_trait, sym::const_trait)
+ && Some(callee_trait) == tcx.trait_of_item(caller.to_def_id())
+ // Can only call methods when it's `<Self as TheTrait>::f`.
+ && tcx.types.self_param == substs.type_at(0)
+ {
+ nonconst_call_permission = true;
+ }
+
+ if !nonconst_call_permission {
+ let obligation = Obligation::new(
+ ObligationCause::dummy_with_span(*fn_span),
+ param_env,
+ tcx.mk_predicate(
+ poly_trait_pred.map_bound(ty::PredicateKind::Trait),
+ ),
+ );
+
+ // improve diagnostics by showing what failed. Our requirements are stricter this time
+ // as we are going to error again anyways.
+ tcx.infer_ctxt().enter(|infcx| {
+ if let Err(e) = implsrc {
+ infcx.report_selection_error(
+ obligation.clone(),
+ &obligation,
+ &e,
+ false,
+ );
+ }
+ });
+
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ // Resolve a trait method call to its concrete implementation, which may be in a
+ // `const` trait impl.
+ let instance = Instance::resolve(tcx, param_env, callee, substs);
+ debug!("Resolving ({:?}) -> {:?}", callee, instance);
+ if let Ok(Some(func)) = instance {
+ if let InstanceDef::Item(def) = func.def {
+ callee = def.did;
+ }
+ }
+ }
+
+ // At this point, we are calling a function, `callee`, whose `DefId` is known...
+
+ // `begin_panic` and `panic_display` are generic functions that accept
+ // types other than str. Check to enforce that only str can be used in
+ // const-eval.
+
+ // const-eval of the `begin_panic` fn assumes the argument is `&str`
+ if Some(callee) == tcx.lang_items().begin_panic_fn() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if ty.is_str() => return,
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
+ // const-eval of the `panic_display` fn assumes the argument is `&&str`
+ if Some(callee) == tcx.lang_items().panic_display() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
+ {
+ return;
+ }
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
+ if Some(callee) == tcx.lang_items().exchange_malloc_fn() {
+ self.check_op(ops::HeapAllocation);
+ return;
+ }
+
+ // `async` blocks get lowered to `std::future::from_generator(/* a closure */)`.
+ let is_async_block = Some(callee) == tcx.lang_items().from_generator_fn();
+ if is_async_block {
+ let kind = hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block);
+ self.check_op(ops::Generator(kind));
+ return;
+ }
+
+ let is_intrinsic = tcx.is_intrinsic(callee);
+
+ if !tcx.is_const_fn_raw(callee) {
+ if !tcx.is_const_default_method(callee) {
+ // To get to here we must have already found a const impl for the
+ // trait, but for it to still be non-const can be that the impl is
+ // using default method bodies.
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+
+ // If the `const fn` we are trying to call is not const-stable, ensure that we have
+ // the proper feature gate enabled.
+ if let Some(gate) = is_unstable_const_fn(tcx, callee) {
+ trace!(?gate, "calling unstable const fn");
+ if self.span.allows_unstable(gate) {
+ return;
+ }
+
+ // Calling an unstable function *always* requires that the corresponding gate
+ // be enabled, even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
+ if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate) {
+ self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+ return;
+ }
+
+ // If this crate is not using stability attributes, or the caller is not claiming to be a
+ // stable `const fn`, that is all that is required.
+ if !self.ccx.is_const_stable_const_fn() {
+ trace!("crate not using stability attributes or caller not stably const");
+ return;
+ }
+
+ // Otherwise, we are something const-stable calling a const-unstable fn.
+
+ if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
+ trace!("rustc_allow_const_fn_unstable gate active");
+ return;
+ }
+
+ self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+ return;
+ }
+
+ // FIXME(ecstaticmorse); For compatibility, we consider `unstable` callees that
+ // have no `rustc_const_stable` attributes to be const-unstable as well. This
+ // should be fixed later.
+ let callee_is_unstable_unmarked = tcx.lookup_const_stability(callee).is_none()
+ && tcx.lookup_stability(callee).map_or(false, |s| s.is_unstable());
+ if callee_is_unstable_unmarked {
+ trace!("callee_is_unstable_unmarked");
+ // We do not use `const` modifiers for intrinsic "functions", as intrinsics are
+ // `extern` functions, and these have no way to get marked `const`. So instead we
+ // use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
+ if self.ccx.is_const_stable_const_fn() || is_intrinsic {
+ self.check_op(ops::FnCallUnstable(callee, None));
+ return;
+ }
+ }
+ trace!("permitting call");
+ }
+
+ // Forbid all `Drop` terminators unless the place being dropped is a local with no
+ // projections that cannot be `NeedsNonConstDrop`.
+ TerminatorKind::Drop { place: dropped_place, .. }
+ | TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ // If we are checking live drops after drop-elaboration, don't emit duplicate
+ // errors here.
+ if super::post_drop_elaboration::checking_enabled(self.ccx) {
+ return;
+ }
+
+ let mut err_span = self.span;
+ let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+
+ let ty_needs_non_const_drop =
+ qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+
+ debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
+
+ if !ty_needs_non_const_drop {
+ return;
+ }
+
+ let needs_non_const_drop = if let Some(local) = dropped_place.as_local() {
+ // Use the span where the local was declared as the span of the drop error.
+ err_span = self.body.local_decls[local].source_info.span;
+ self.qualifs.needs_non_const_drop(self.ccx, local, location)
+ } else {
+ true
+ };
+
+ if needs_non_const_drop {
+ self.check_op_spanned(
+ ops::LiveDrop { dropped_at: Some(terminator.source_info.span) },
+ err_span,
+ );
+ }
+ }
+
+ TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
+
+ TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
+ self.check_op(ops::Generator(hir::GeneratorKind::Gen))
+ }
+
+ TerminatorKind::Abort => {
+ // Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
+ span_bug!(self.span, "`Abort` terminator outside of cleanup block")
+ }
+
+ TerminatorKind::Assert { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable => {}
+ }
+ }
+}
+
+fn place_as_reborrow<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ place: Place<'tcx>,
+) -> Option<PlaceRef<'tcx>> {
+ match place.as_ref().last_projection() {
+ Some((place_base, ProjectionElem::Deref)) => {
+ // A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
+ // that points to the allocation for the static. Don't treat these as reborrows.
+ if body.local_decls[place_base.local].is_ref_to_static() {
+ None
+ } else {
+ // Ensure the type being derefed is a reference and not a raw pointer.
+ // This is sufficient to prevent an access to a `static mut` from being marked as a
+ // reborrow, even if the check above were to disappear.
+ let inner_ty = place_base.ty(body, tcx).ty;
+
+ if let ty::Ref(..) = inner_ty.kind() {
+ return Some(place_base);
+ } else {
+ return None;
+ }
+ }
+ }
+ _ => None,
+ }
+}
+
+fn is_int_bool_or_char(ty: Ty<'_>) -> bool {
+ ty.is_bool() || ty.is_integral() || ty.is_char()
+}
+
+fn emit_unstable_in_stable_error(ccx: &ConstCx<'_, '_>, span: Span, gate: Symbol) {
+ let attr_span = ccx.tcx.def_span(ccx.def_id()).shrink_to_lo();
+
+ ccx.tcx.sess.emit_err(UnstableInStable { gate: gate.to_string(), span, attr_span });
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
new file mode 100644
index 000000000..25b420bed
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -0,0 +1,132 @@
+//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
+//!
+//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
+//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
+//! it finds operations that are invalid in a certain context.
+
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::Symbol;
+
+pub use self::qualifs::Qualif;
+
+pub mod check;
+mod ops;
+pub mod post_drop_elaboration;
+pub mod qualifs;
+mod resolver;
+
+/// Information about the item currently being const-checked, as well as a reference to the global
+/// context.
+pub struct ConstCx<'mir, 'tcx> {
+ pub body: &'mir mir::Body<'tcx>,
+ pub tcx: TyCtxt<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+ pub const_kind: Option<hir::ConstContext>,
+}
+
+impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
+ let def_id = body.source.def_id().expect_local();
+ let param_env = tcx.param_env(def_id);
+ Self::new_with_param_env(tcx, body, param_env)
+ }
+
+ pub fn new_with_param_env(
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
+ ConstCx { body, tcx, param_env, const_kind }
+ }
+
+ pub fn def_id(&self) -> LocalDefId {
+ self.body.source.def_id().expect_local()
+ }
+
+ /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
+ ///
+ /// Panics if this `Item` is not const.
+ pub fn const_kind(&self) -> hir::ConstContext {
+ self.const_kind.expect("`const_kind` must not be called on a non-const fn")
+ }
+
+ pub fn is_const_stable_const_fn(&self) -> bool {
+ self.const_kind == Some(hir::ConstContext::ConstFn)
+ && self.tcx.features().staged_api
+ && is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
+ }
+
+ fn is_async(&self) -> bool {
+ self.tcx.asyncness(self.def_id()) == hir::IsAsync::Async
+ }
+}
+
+pub fn rustc_allow_const_fn_unstable(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ feature_gate: Symbol,
+) -> bool {
+ let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(def_id));
+ attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
+}
+
+// Returns `true` if the given `const fn` is "const-stable".
+//
+// Panics if the given `DefId` does not refer to a `const fn`.
+//
+// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
+// functions can be called in a const-context by users of the stable compiler. "const-stable"
+// functions are subject to more stringent restrictions than "const-unstable" functions: They
+// cannot use unstable features and can only call other "const-stable" functions.
+pub fn is_const_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ // A default body in a `#[const_trait]` is not const-stable because const
+ // trait fns currently cannot be const-stable. We shouldn't
+ // restrict default bodies to only call const-stable functions.
+ if tcx.is_const_default_method(def_id) {
+ return false;
+ }
+
+ // Const-stability is only relevant for `const fn`.
+ assert!(tcx.is_const_fn_raw(def_id));
+
+ // A function is only const-stable if it has `#[rustc_const_stable]` or it the trait it belongs
+ // to is const-stable.
+ match tcx.lookup_const_stability(def_id) {
+ Some(stab) => stab.is_const_stable(),
+ None if is_parent_const_stable_trait(tcx, def_id) => {
+ // Remove this when `#![feature(const_trait_impl)]` is stabilized,
+ // returning `true` unconditionally.
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ "trait implementations cannot be const stable yet",
+ );
+ true
+ }
+ None => false, // By default, items are not const stable.
+ }
+}
+
+fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let local_def_id = def_id.expect_local();
+ let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
+
+ let Some(parent) = tcx.hir().find_parent_node(hir_id) else { return false };
+ let parent_def = tcx.hir().get(parent);
+
+ if !matches!(
+ parent_def,
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+ ..
+ })
+ ) {
+ return false;
+ }
+
+ tcx.lookup_const_stability(parent.owner).map_or(false, |stab| stab.is_const_stable())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
new file mode 100644
index 000000000..338022616
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -0,0 +1,771 @@
+//! Concrete error types for all operations which may be invalid in a certain const context.
+
+use hir::def_id::LocalDefId;
+use rustc_errors::{
+ error_code, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
+};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{
+ suggest_constraining_type_param, Adt, Closure, DefIdTree, FnDef, FnPtr, Param, TraitPredicate,
+ Ty,
+};
+use rustc_middle::ty::{Binder, BoundConstness, ImplPolarity, TraitRef};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_trait_selection::traits::SelectionContext;
+
+use super::ConstCx;
+use crate::errors::{
+ MutDerefErr, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
+ TransientMutBorrowErr, TransientMutBorrowErrRaw,
+};
+use crate::util::{call_kind, CallDesugaringKind, CallKind};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Status {
+ Allowed,
+ Unstable(Symbol),
+ Forbidden,
+}
+
+#[derive(Clone, Copy)]
+pub enum DiagnosticImportance {
+ /// An operation that must be removed for const-checking to pass.
+ Primary,
+
+ /// An operation that causes const-checking to fail, but is usually a side-effect of a `Primary` operation elsewhere.
+ Secondary,
+}
+
+/// An operation that is not *always* allowed in a const context.
+pub trait NonConstOp<'tcx>: std::fmt::Debug {
+ /// Returns an enum indicating whether this operation is allowed within the given item.
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Forbidden
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ DiagnosticImportance::Primary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+}
+
+#[derive(Debug)]
+pub struct FloatingPointOp;
+impl<'tcx> NonConstOp<'tcx> for FloatingPointOp {
+ fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+ if ccx.const_kind() == hir::ConstContext::ConstFn {
+ Status::Unstable(sym::const_fn_floating_point_arithmetic)
+ } else {
+ Status::Allowed
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_fn_floating_point_arithmetic,
+ span,
+ &format!("floating point arithmetic is not allowed in {}s", ccx.const_kind()),
+ )
+ }
+}
+
+/// A function call where the callee is a pointer.
+#[derive(Debug)]
+pub struct FnCallIndirect;
+impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.struct_span_err(
+ span,
+ &format!("function pointer calls are not allowed in {}s", ccx.const_kind()),
+ )
+ }
+}
+
+/// A function call where the callee is not marked as `const`.
+#[derive(Debug, Clone, Copy)]
+pub struct FnCallNonConst<'tcx> {
+ pub caller: LocalDefId,
+ pub callee: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub span: Span,
+ pub from_hir_call: bool,
+}
+
+impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ _: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let FnCallNonConst { caller, callee, substs, span, from_hir_call } = *self;
+ let ConstCx { tcx, param_env, .. } = *ccx;
+
+ let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
+ let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+
+ match self_ty.kind() {
+ Param(param_ty) => {
+ debug!(?param_ty);
+ let caller_hir_id = tcx.hir().local_def_id_to_hir_id(caller);
+ if let Some(generics) = tcx.hir().get(caller_hir_id).generics() {
+ let constraint = with_no_trimmed_paths!(format!(
+ "~const {}",
+ trait_ref.print_only_trait_path()
+ ));
+ suggest_constraining_type_param(
+ tcx,
+ generics,
+ err,
+ &param_ty.name.as_str(),
+ &constraint,
+ None,
+ );
+ }
+ }
+ Adt(..) => {
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ Binder::dummy(TraitPredicate {
+ trait_ref,
+ constness: BoundConstness::NotConst,
+ polarity: ImplPolarity::Positive,
+ }),
+ );
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation)
+ });
+
+ if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
+ let span = tcx.def_span(data.impl_def_id);
+ err.span_note(span, "impl defined here, but it is not `const`");
+ }
+ }
+ _ => {}
+ }
+ };
+
+ let call_kind = call_kind(tcx, ccx.param_env, callee, substs, span, from_hir_call, None);
+
+ debug!(?call_kind);
+
+ let mut err = match call_kind {
+ CallKind::Normal { desugaring: Some((kind, self_ty)), .. } => {
+ macro_rules! error {
+ ($fmt:literal) => {
+ struct_span_err!(tcx.sess, span, E0015, $fmt, self_ty, ccx.const_kind())
+ };
+ }
+
+ let mut err = match kind {
+ CallDesugaringKind::ForLoopIntoIter => {
+ error!("cannot convert `{}` into an iterator in {}s")
+ }
+ CallDesugaringKind::QuestionBranch => {
+ error!("`?` cannot determine the branch of `{}` in {}s")
+ }
+ CallDesugaringKind::QuestionFromResidual => {
+ error!("`?` cannot convert from residual of `{}` in {}s")
+ }
+ CallDesugaringKind::TryBlockFromOutput => {
+ error!("`try` block cannot convert `{}` to the result in {}s")
+ }
+ };
+
+ diag_trait(&mut err, self_ty, kind.trait_def_id(tcx));
+ err
+ }
+ CallKind::FnCall { fn_trait_id, self_ty } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const closure in {}s",
+ ccx.const_kind(),
+ );
+
+ match self_ty.kind() {
+ FnDef(def_id, ..) => {
+ let span = tcx.def_span(*def_id);
+ if ccx.tcx.is_const_fn_raw(*def_id) {
+ span_bug!(span, "calling const FnDef errored when it shouldn't");
+ }
+
+ err.span_note(span, "function defined here, but it is not `const`");
+ }
+ FnPtr(..) => {
+ err.note(&format!(
+ "function pointers need an RFC before allowed to be called in {}s",
+ ccx.const_kind()
+ ));
+ }
+ Closure(..) => {
+ err.note(&format!(
+ "closures need an RFC before allowed to be called in {}s",
+ ccx.const_kind()
+ ));
+ }
+ _ => {}
+ }
+
+ diag_trait(&mut err, self_ty, fn_trait_id);
+ err
+ }
+ CallKind::Operator { trait_id, self_ty, .. } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const operator in {}s",
+ ccx.const_kind()
+ );
+
+ if Some(trait_id) == ccx.tcx.lang_items().eq_trait() {
+ match (substs[0].unpack(), substs[1].unpack()) {
+ (GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty))
+ if self_ty == rhs_ty
+ && self_ty.is_ref()
+ && self_ty.peel_refs().is_primitive() =>
+ {
+ let mut num_refs = 0;
+ let mut tmp_ty = self_ty;
+ while let rustc_middle::ty::Ref(_, inner_ty, _) = tmp_ty.kind() {
+ num_refs += 1;
+ tmp_ty = *inner_ty;
+ }
+ let deref = "*".repeat(num_refs);
+
+ if let Ok(call_str) = ccx.tcx.sess.source_map().span_to_snippet(span) {
+ if let Some(eq_idx) = call_str.find("==") {
+ if let Some(rhs_idx) =
+ call_str[(eq_idx + 2)..].find(|c: char| !c.is_whitespace())
+ {
+ let rhs_pos =
+ span.lo() + BytePos::from_usize(eq_idx + 2 + rhs_idx);
+ let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos);
+ err.multipart_suggestion(
+ "consider dereferencing here",
+ vec![
+ (span.shrink_to_lo(), deref.clone()),
+ (rhs_span, deref),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ diag_trait(&mut err, self_ty, trait_id);
+ err
+ }
+ CallKind::DerefCoercion { deref_target, deref_target_ty, self_ty } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot perform deref coercion on `{}` in {}s",
+ self_ty,
+ ccx.const_kind()
+ );
+
+ err.note(&format!("attempting to deref into `{}`", deref_target_ty));
+
+ // Check first whether the source is accessible (issue #87060)
+ if tcx.sess.source_map().is_span_accessible(deref_target) {
+ err.span_note(deref_target, "deref defined here");
+ }
+
+ diag_trait(&mut err, self_ty, tcx.lang_items().deref_trait().unwrap());
+ err
+ }
+ _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
+ struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const formatting macro in {}s",
+ ccx.const_kind(),
+ )
+ }
+ _ => struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const fn `{}` in {}s",
+ ccx.tcx.def_path_str_with_substs(callee, substs),
+ ccx.const_kind(),
+ ),
+ };
+
+ err.note(&format!(
+ "calls in {}s are limited to constant functions, \
+ tuple structs and tuple variants",
+ ccx.const_kind(),
+ ));
+
+ err
+ }
+}
+
+/// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function.
+///
+/// Contains the name of the feature that would allow the use of this function.
+#[derive(Debug)]
+pub struct FnCallUnstable(pub DefId, pub Option<Symbol>);
+
+impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let FnCallUnstable(def_id, feature) = *self;
+
+ let mut err = ccx.tcx.sess.struct_span_err(
+ span,
+ &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
+ );
+
+ if ccx.is_const_stable_const_fn() {
+ err.help("const-stable functions can only call other const-stable functions");
+ } else if ccx.tcx.sess.is_nightly_build() {
+ if let Some(feature) = feature {
+ err.help(&format!(
+ "add `#![feature({})]` to the crate attributes to enable",
+ feature
+ ));
+ }
+ }
+
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct Generator(pub hir::GeneratorKind);
+impl<'tcx> NonConstOp<'tcx> for Generator {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ Status::Unstable(sym::const_async_blocks)
+ } else {
+ Status::Forbidden
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
+ if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg)
+ } else {
+ ccx.tcx.sess.struct_span_err(span, &msg)
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct HeapAllocation;
+impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0010,
+ "allocations are not allowed in {}s",
+ ccx.const_kind()
+ );
+ err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "The value of statics and constants must be known at compile time, \
+ and they live for the entire lifetime of a program. Creating a boxed \
+ value allocates memory on the heap at runtime, and therefore cannot \
+ be done at compile time.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct InlineAsm;
+impl<'tcx> NonConstOp<'tcx> for InlineAsm {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "inline assembly is not allowed in {}s",
+ ccx.const_kind()
+ )
+ }
+}
+
+#[derive(Debug)]
+pub struct LiveDrop {
+ pub dropped_at: Option<Span>,
+}
+impl<'tcx> NonConstOp<'tcx> for LiveDrop {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0493,
+ "destructors cannot be evaluated at compile-time"
+ );
+ err.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
+ if let Some(span) = self.dropped_at {
+ err.span_label(span, "value is dropped here");
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow never escapes to
+/// the final value of the constant.
+pub struct TransientCellBorrow;
+impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_refs_to_cell)
+ }
+ fn importance(&self) -> DiagnosticImportance {
+ // The cases that cannot possibly work will already emit a `CellBorrow`, so we should
+ // not additionally emit a feature gate error if activating the feature gate won't work.
+ DiagnosticImportance::Secondary
+ }
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_refs_to_cell,
+ span,
+ "cannot borrow here, since the borrowed element may contain interior mutability",
+ )
+ }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow might escape to
+/// the final value of the constant, and thus we cannot allow this (for now). We may allow
+/// it in the future for static items.
+pub struct CellBorrow;
+impl<'tcx> NonConstOp<'tcx> for CellBorrow {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0492,
+ "{}s cannot refer to interior mutable data",
+ ccx.const_kind(),
+ );
+ err.span_label(
+ span,
+ "this borrow of an interior mutable value may end up in the final value",
+ );
+ if let hir::ConstContext::Static(_) = ccx.const_kind() {
+ err.help(
+ "to fix this, the value can be extracted to a separate \
+ `static` item and then referenced",
+ );
+ }
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "A constant containing interior mutable data behind a reference can allow you
+ to modify that data. This would make multiple uses of a constant to be able to
+ see different values and allow circumventing the `Send` and `Sync` requirements
+ for shared mutable data, which is unsound.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+/// This op is for `&mut` borrows in the trailing expression of a constant
+/// which uses the "enclosing scopes rule" to leak its locals into anonymous
+/// static or const items.
+pub struct MutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for MutBorrow {
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Forbidden
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ // If there were primary errors (like non-const function calls), do not emit further
+ // errors about mutable references.
+ DiagnosticImportance::Secondary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let raw = match self.0 {
+ hir::BorrowKind::Raw => "raw ",
+ hir::BorrowKind::Ref => "",
+ };
+
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0764,
+ "{}mutable references are not allowed in the final value of {}s",
+ raw,
+ ccx.const_kind(),
+ );
+
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "References in statics and constants may only refer \
+ to immutable values.\n\n\
+ Statics are shared everywhere, and if they refer to \
+ mutable data one might violate memory safety since \
+ holding multiple mutable references to shared data \
+ is not allowed.\n\n\
+ If you really want global mutable state, try using \
+ static mut or a global UnsafeCell.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct TransientMutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = ccx.const_kind();
+ match self.0 {
+ hir::BorrowKind::Raw => ccx
+ .tcx
+ .sess
+ .create_feature_err(TransientMutBorrowErrRaw { span, kind }, sym::const_mut_refs),
+ hir::BorrowKind::Ref => ccx
+ .tcx
+ .sess
+ .create_feature_err(TransientMutBorrowErr { span, kind }, sym::const_mut_refs),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct MutDeref;
+impl<'tcx> NonConstOp<'tcx> for MutDeref {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ // Usually a side-effect of a `TransientMutBorrow` somewhere.
+ DiagnosticImportance::Secondary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx
+ .sess
+ .create_feature_err(MutDerefErr { span, kind: ccx.const_kind() }, sym::const_mut_refs)
+ }
+}
+
+/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
+#[derive(Debug)]
+pub struct PanicNonStr;
+impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(PanicNonStrErr { span })
+ }
+}
+
+/// Comparing raw pointers for equality.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrComparison;
+impl<'tcx> NonConstOp<'tcx> for RawPtrComparison {
+ fn build_error(
+ &self,
+ _: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ span_bug!(span, "raw ptr comparison should already be caught in the trait system");
+ }
+}
+
+#[derive(Debug)]
+pub struct RawMutPtrDeref;
+impl<'tcx> NonConstOp<'tcx> for RawMutPtrDeref {
+ fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_mut_refs,
+ span,
+ &format!("dereferencing raw mutable pointers in {}s is unstable", ccx.const_kind(),),
+ )
+ }
+}
+
+/// Casting raw pointer or function pointer to an integer.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrToIntCast;
+impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(RawPtrToIntErr { span })
+ }
+}
+
+/// An access to a (non-thread-local) `static`.
+#[derive(Debug)]
+pub struct StaticAccess;
+impl<'tcx> NonConstOp<'tcx> for StaticAccess {
+ fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+ if let hir::ConstContext::Static(_) = ccx.const_kind() {
+ Status::Allowed
+ } else {
+ Status::Forbidden
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(StaticAccessErr {
+ span,
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0013)).then_some(()),
+ })
+ }
+}
+
+/// An access to a thread-local `static`.
+#[derive(Debug)]
+pub struct ThreadLocalAccess;
+impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(NonConstOpErr { span })
+ }
+}
+
+// Types that cannot appear in the signature or locals of a `const fn`.
+pub mod ty {
+ use super::*;
+
+ #[derive(Debug)]
+ pub struct MutRef(pub mir::LocalKind);
+ impl<'tcx> NonConstOp<'tcx> for MutRef {
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ match self.0 {
+ mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
+ mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
+ DiagnosticImportance::Primary
+ }
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_mut_refs,
+ span,
+ &format!("mutable references are not allowed in {}s", ccx.const_kind()),
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
new file mode 100644
index 000000000..4e210f663
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -0,0 +1,123 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::{symbol::sym, Span};
+
+use super::check::Qualifs;
+use super::ops::{self, NonConstOp};
+use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::ConstCx;
+
+/// Returns `true` if we should use the more precise live drop checker that runs after drop
+/// elaboration.
+pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
+ // Const-stable functions must always use the stable live drop checker.
+ if ccx.is_const_stable_const_fn() {
+ return false;
+ }
+
+ ccx.tcx.features().const_precise_live_drops
+}
+
+/// Look for live drops in a const context.
+///
+/// This is separate from the rest of the const checking logic because it must run after drop
+/// elaboration.
+pub fn check_live_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+ let def_id = body.source.def_id().expect_local();
+ let const_kind = tcx.hir().body_const_context(def_id);
+ if const_kind.is_none() {
+ return;
+ }
+
+ if tcx.has_attr(def_id.to_def_id(), sym::rustc_do_not_const_check) {
+ return;
+ }
+
+ let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
+ if !checking_enabled(&ccx) {
+ return;
+ }
+
+ let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+
+ visitor.visit_body(body);
+}
+
+struct CheckLiveDrops<'mir, 'tcx> {
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ qualifs: Qualifs<'mir, 'tcx>,
+}
+
+// So we can access `body` and `tcx`.
+impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
+ type Target = ConstCx<'mir, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+impl CheckLiveDrops<'_, '_> {
+ fn check_live_drop(&self, span: Span) {
+ ops::LiveDrop { dropped_at: None }.build_error(self.ccx, span).emit();
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
+ trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+ // Ignore drop terminators in cleanup blocks.
+ if block.is_cleanup {
+ return;
+ }
+
+ self.super_basic_block_data(bb, block);
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+
+ match &terminator.kind {
+ mir::TerminatorKind::Drop { place: dropped_place, .. }
+ | mir::TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
+ if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+ // Instead of throwing a bug, we just return here. This is because we have to
+ // run custom `const Drop` impls.
+ return;
+ }
+
+ if dropped_place.is_indirect() {
+ self.check_live_drop(terminator.source_info.span);
+ return;
+ }
+
+ // Drop elaboration is not precise enough to accept code like
+ // `src/test/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
+ // initialized with `None` and never changed, it still emits drop glue.
+ // Hence we additionally check the qualifs here to allow more code to pass.
+ if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
+ // Use the span where the dropped local was declared for the error.
+ let span = self.body.local_decls[dropped_place.local].source_info.span;
+ self.check_live_drop(span);
+ }
+ }
+
+ mir::TerminatorKind::Abort
+ | mir::TerminatorKind::Call { .. }
+ | mir::TerminatorKind::Assert { .. }
+ | mir::TerminatorKind::FalseEdge { .. }
+ | mir::TerminatorKind::FalseUnwind { .. }
+ | mir::TerminatorKind::GeneratorDrop
+ | mir::TerminatorKind::Goto { .. }
+ | mir::TerminatorKind::InlineAsm { .. }
+ | mir::TerminatorKind::Resume
+ | mir::TerminatorKind::Return
+ | mir::TerminatorKind::SwitchInt { .. }
+ | mir::TerminatorKind::Unreachable
+ | mir::TerminatorKind::Yield { .. } => {}
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
new file mode 100644
index 000000000..c8a63c9c3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -0,0 +1,384 @@
+//! Structural const qualification.
+//!
+//! See the `Qualif` trait for more info.
+
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::LangItem;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::TraitEngine;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits::{
+ self, ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngineExt,
+};
+
+use super::ConstCx;
+
+pub fn in_any_value_of_ty<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+) -> ConstQualifs {
+ ConstQualifs {
+ has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
+ needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
+ needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+ custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
+ tainted_by_errors,
+ }
+}
+
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time.
+///
+/// Normally, we would determine what qualifications apply to each type and error when an illegal
+/// operation is performed on such a type. However, this was found to be too imprecise, especially
+/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
+/// needn't reject code unless it actually constructs and operates on the qualified variant.
+///
+/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
+/// type-based one). Qualifications propagate structurally across variables: If a local (or a
+/// projection of a local) is assigned a qualified value, that local itself becomes qualified.
+pub trait Qualif {
+ /// The name of the file used to debug the dataflow analysis that computes this qualif.
+ const ANALYSIS_NAME: &'static str;
+
+ /// Whether this `Qualif` is cleared when a local is moved from.
+ const IS_CLEARED_ON_MOVE: bool = false;
+
+ /// Whether this `Qualif` might be evaluated after the promotion and can encounter a promoted.
+ const ALLOW_PROMOTED: bool = false;
+
+ /// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool;
+
+ /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
+ ///
+ /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
+ /// propagation is context-insensitive, this includes function arguments and values returned
+ /// from a call to another function.
+ ///
+ /// It also determines the `Qualif`s for primitive types.
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
+
+ /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
+ ///
+ /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
+ /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
+ /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
+ /// with a custom `Drop` impl is inherently `NeedsDrop`.
+ ///
+ /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> bool;
+}
+
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
+pub struct HasMutInterior;
+
+impl Qualif for HasMutInterior {
+ const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.has_mut_interior
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
+ }
+
+ fn in_adt_inherently<'tcx>(
+ _cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
+ // It arises structurally for all other types.
+ adt.is_unsafe_cell()
+ }
+}
+
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out because implicit promotion would remove side-effects
+/// that occur as part of dropping that value. N.B., the implicit promotion has
+/// to reject const Drop implementations because even if side-effects are ruled
+/// out through other means, the execution of the drop could diverge.
+pub struct NeedsDrop;
+
+impl Qualif for NeedsDrop {
+ const ANALYSIS_NAME: &'static str = "flow_needs_drop";
+ const IS_CLEARED_ON_MOVE: bool = true;
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.needs_drop
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.needs_drop(cx.tcx, cx.param_env)
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ adt.has_dtor(cx.tcx)
+ }
+}
+
+/// Constant containing an ADT that implements non-const `Drop`.
+/// This must be ruled out because we cannot run `Drop` during compile-time.
+pub struct NeedsNonConstDrop;
+
+impl Qualif for NeedsNonConstDrop {
+ const ANALYSIS_NAME: &'static str = "flow_needs_nonconst_drop";
+ const IS_CLEARED_ON_MOVE: bool = true;
+ const ALLOW_PROMOTED: bool = true;
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.needs_non_const_drop
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ // Avoid selecting for simple cases, such as builtin types.
+ if ty::util::is_trivially_const_drop(ty) {
+ return false;
+ }
+
+ let destruct = cx.tcx.require_lang_item(LangItem::Destruct, None);
+
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ cx.param_env,
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: destruct,
+ substs: cx.tcx.mk_substs_trait(ty, &[]),
+ },
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+
+ cx.tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
+ // If we couldn't select a const destruct candidate, then it's bad
+ return true;
+ };
+
+ if !matches!(
+ impl_src,
+ ImplSource::ConstDestruct(_)
+ | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ) {
+ // If our const destruct candidate is not ConstDestruct or implied by the param env,
+ // then it's bad
+ return true;
+ }
+
+ if impl_src.borrow_nested_obligations().is_empty() {
+ return false;
+ }
+
+ // If we successfully found one, then select all of the predicates
+ // implied by our const drop impl.
+ let mut fcx = <dyn TraitEngine<'tcx>>::new(cx.tcx);
+ for nested in impl_src.nested_obligations() {
+ fcx.register_predicate_obligation(&infcx, nested);
+ }
+
+ // If we had any errors, then it's bad
+ !fcx.select_all_or_error(&infcx).is_empty()
+ })
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ adt.has_non_const_dtor(cx.tcx)
+ }
+}
+
+/// A constant that cannot be used as part of a pattern in a `match` expression.
+pub struct CustomEq;
+
+impl Qualif for CustomEq {
+ const ANALYSIS_NAME: &'static str = "flow_custom_eq";
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.custom_eq
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ // If *any* component of a composite data type does not implement `Structural{Partial,}Eq`,
+ // we know that at least some values of that type are not structural-match. I say "some"
+ // because that component may be part of an enum variant (e.g.,
+ // `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
+ // structural-match (`Option::None`).
+ traits::search_for_structural_match_violation(cx.body.span, cx.tcx, ty).is_some()
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> bool {
+ let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+ !ty.is_structural_eq_shallow(cx.tcx)
+ }
+}
+
+// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
+
+/// Returns `true` if this `Rvalue` contains qualif `Q`.
+pub fn in_rvalue<'tcx, Q, F>(
+ cx: &ConstCx<'_, 'tcx>,
+ in_local: &mut F,
+ rvalue: &Rvalue<'tcx>,
+) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ match rvalue {
+ Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
+ Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
+ }
+
+ Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+ in_place::<Q, _>(cx, in_local, place.as_ref())
+ }
+
+ Rvalue::CopyForDeref(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
+
+ Rvalue::Use(operand)
+ | Rvalue::Repeat(operand, _)
+ | Rvalue::UnaryOp(_, operand)
+ | Rvalue::Cast(_, operand, _)
+ | Rvalue::ShallowInitBox(operand, _) => in_operand::<Q, _>(cx, in_local, operand),
+
+ Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
+ in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
+ }
+
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ // Special-case reborrows to be more like a copy of the reference.
+ if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
+ let base_ty = place_base.ty(cx.body, cx.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ return in_place::<Q, _>(cx, in_local, place_base);
+ }
+ }
+
+ in_place::<Q, _>(cx, in_local, place.as_ref())
+ }
+
+ Rvalue::Aggregate(kind, operands) => {
+ // Return early if we know that the struct or enum being constructed is always
+ // qualified.
+ if let AggregateKind::Adt(adt_did, _, substs, ..) = **kind {
+ let def = cx.tcx.adt_def(adt_did);
+ if Q::in_adt_inherently(cx, def, substs) {
+ return true;
+ }
+ if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
+ return true;
+ }
+ }
+
+ // Otherwise, proceed structurally...
+ operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
+ }
+ }
+}
+
+/// Returns `true` if this `Place` contains qualif `Q`.
+pub fn in_place<'tcx, Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ let mut place = place;
+ while let Some((place_base, elem)) = place.last_projection() {
+ match elem {
+ ProjectionElem::Index(index) if in_local(index) => return true,
+
+ ProjectionElem::Deref
+ | ProjectionElem::Field(_, _)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Downcast(_, _)
+ | ProjectionElem::Index(_) => {}
+ }
+
+ let base_ty = place_base.ty(cx.body, cx.tcx);
+ let proj_ty = base_ty.projection_ty(cx.tcx, elem).ty;
+ if !Q::in_any_value_of_ty(cx, proj_ty) {
+ return false;
+ }
+
+ place = place_base;
+ }
+
+ assert!(place.projection.is_empty());
+ in_local(place.local)
+}
+
+/// Returns `true` if this `Operand` contains qualif `Q`.
+pub fn in_operand<'tcx, Q, F>(
+ cx: &ConstCx<'_, 'tcx>,
+ in_local: &mut F,
+ operand: &Operand<'tcx>,
+) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ let constant = match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ return in_place::<Q, _>(cx, in_local, place.as_ref());
+ }
+
+ Operand::Constant(c) => c,
+ };
+
+ // Check the qualifs of the value of `const` items.
+ if let Some(ct) = constant.literal.const_for_ty() {
+ if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted }) = ct.kind()
+ {
+ // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
+ // only for `NeedsNonConstDrop` with precise drop checking. This is the only const
+ // check performed after the promotion. Verify that with an assertion.
+ assert!(promoted.is_none() || Q::ALLOW_PROMOTED);
+ // Don't peek inside trait associated constants.
+ if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() {
+ let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
+ cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
+ } else {
+ cx.tcx.at(constant.span).mir_const_qualif(def.did)
+ };
+
+ if !Q::in_qualifs(&qualifs) {
+ return false;
+ }
+
+ // Just in case the type is more specific than
+ // the definition, e.g., impl associated const
+ // with type parameters, take it into account.
+ }
+ }
+ }
+ // Otherwise use the qualifs of the type.
+ Q::in_any_value_of_ty(cx, constant.literal.ty())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
new file mode 100644
index 000000000..60c1e4950
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -0,0 +1,384 @@
+//! Propagate `Qualif`s between locals and query the results.
+//!
+//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementKind};
+use rustc_mir_dataflow::fmt::DebugWithContext;
+use rustc_mir_dataflow::JoinSemiLattice;
+use rustc_mir_dataflow::{Analysis, AnalysisDomain, CallReturnPlaces};
+use rustc_span::DUMMY_SP;
+
+use std::fmt;
+use std::marker::PhantomData;
+
+use super::{qualifs, ConstCx, Qualif};
+
+/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
+/// `FlowSensitiveAnalysis`.
+///
+/// To account for indirect assignments, data flow conservatively assumes that local becomes
+/// qualified immediately after it is borrowed or its address escapes. The borrow must allow for
+/// mutation, which includes shared borrows of places with interior mutability. The type of
+/// borrowed place must contain the qualif.
+struct TransferFunction<'a, 'mir, 'tcx, Q> {
+ ccx: &'a ConstCx<'mir, 'tcx>,
+ state: &'a mut State,
+ _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> TransferFunction<'a, 'mir, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn new(ccx: &'a ConstCx<'mir, 'tcx>, state: &'a mut State) -> Self {
+ TransferFunction { ccx, state, _qualif: PhantomData }
+ }
+
+ fn initialize_state(&mut self) {
+ self.state.qualif.clear();
+ self.state.borrow.clear();
+
+ for arg in self.ccx.body.args_iter() {
+ let arg_ty = self.ccx.body.local_decls[arg].ty;
+ if Q::in_any_value_of_ty(self.ccx, arg_ty) {
+ self.state.qualif.insert(arg);
+ }
+ }
+ }
+
+ fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, mut value: bool) {
+ debug_assert!(!place.is_indirect());
+
+ if !value {
+ for (base, _elem) in place.iter_projections() {
+ let base_ty = base.ty(self.ccx.body, self.ccx.tcx);
+ if base_ty.ty.is_union() && Q::in_any_value_of_ty(self.ccx, base_ty.ty) {
+ value = true;
+ break;
+ }
+ }
+ }
+
+ match (value, place.as_ref()) {
+ (true, mir::PlaceRef { local, .. }) => {
+ self.state.qualif.insert(local);
+ }
+
+ // For now, we do not clear the qualif if a local is overwritten in full by
+ // an unqualified rvalue (e.g. `y = 5`). This is to be consistent
+ // with aggregates where we overwrite all fields with assignments, which would not
+ // get this feature.
+ (false, mir::PlaceRef { local: _, projection: &[] }) => {
+ // self.state.qualif.remove(*local);
+ }
+
+ _ => {}
+ }
+ }
+
+ fn apply_call_return_effect(
+ &mut self,
+ _block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // We cannot reason about another function's internals, so use conservative type-based
+ // qualification for the result of a function call.
+ let return_ty = place.ty(self.ccx.body, self.ccx.tcx).ty;
+ let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
+
+ if !place.is_indirect() {
+ self.assign_qualif_direct(&place, qualif);
+ }
+ });
+ }
+
+ fn address_of_allows_mutation(&self, _mt: mir::Mutability, _place: mir::Place<'tcx>) -> bool {
+ // Exact set of permissions granted by AddressOf is undecided. Conservatively assume that
+ // it might allow mutation until resolution of #56604.
+ true
+ }
+
+ fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
+ match kind {
+ mir::BorrowKind::Mut { .. } => true,
+ mir::BorrowKind::Shared | mir::BorrowKind::Shallow | mir::BorrowKind::Unique => {
+ self.shared_borrow_allows_mutation(place)
+ }
+ }
+ }
+
+ /// `&` only allow mutation if the borrowed place is `!Freeze`.
+ ///
+ /// This assumes that it is UB to take the address of a struct field whose type is
+ /// `Freeze`, then use pointer arithmetic to derive a pointer to a *different* field of
+ /// that same struct whose type is `!Freeze`. If we decide that this is not UB, we will
+ /// have to check the type of the borrowed **local** instead of the borrowed **place**
+ /// below. See [rust-lang/unsafe-code-guidelines#134].
+ ///
+ /// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
+ fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
+ !place
+ .ty(self.ccx.body, self.ccx.tcx)
+ .ty
+ .is_freeze(self.ccx.tcx.at(DUMMY_SP), self.ccx.param_env)
+ }
+}
+
+impl<'tcx, Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+
+ if !Q::IS_CLEARED_ON_MOVE {
+ return;
+ }
+
+ // If a local with no projections is moved from (e.g. `x` in `y = x`), record that
+ // it no longer needs to be dropped.
+ if let mir::Operand::Move(place) = operand {
+ if let Some(local) = place.as_local() {
+ // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier
+ // implementation we retain qualif if a local had been borrowed before. This might
+ // not be strictly necessary since the local is no longer initialized.
+ if !self.state.borrow.contains(local) {
+ self.state.qualif.remove(local);
+ }
+ }
+ }
+ }
+
+ fn visit_assign(
+ &mut self,
+ place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location,
+ ) {
+ let qualif =
+ qualifs::in_rvalue::<Q, _>(self.ccx, &mut |l| self.state.qualif.contains(l), rvalue);
+ if !place.is_indirect() {
+ self.assign_qualif_direct(place, qualif);
+ }
+
+ // We need to assign qualifs to the left-hand side before visiting `rvalue` since
+ // qualifs can be cleared on move.
+ self.super_assign(place, rvalue, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match rvalue {
+ mir::Rvalue::AddressOf(mt, borrowed_place) => {
+ if !borrowed_place.is_indirect()
+ && self.address_of_allows_mutation(*mt, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Ref(_, kind, borrowed_place) => {
+ if !borrowed_place.is_indirect() && self.ref_allows_mutation(*kind, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Cast(..)
+ | mir::Rvalue::ShallowInitBox(..)
+ | mir::Rvalue::Use(..)
+ | mir::Rvalue::CopyForDeref(..)
+ | mir::Rvalue::ThreadLocalRef(..)
+ | mir::Rvalue::Repeat(..)
+ | mir::Rvalue::Len(..)
+ | mir::Rvalue::BinaryOp(..)
+ | mir::Rvalue::CheckedBinaryOp(..)
+ | mir::Rvalue::NullaryOp(..)
+ | mir::Rvalue::UnaryOp(..)
+ | mir::Rvalue::Discriminant(..)
+ | mir::Rvalue::Aggregate(..) => {}
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match statement.kind {
+ StatementKind::StorageDead(local) => {
+ self.state.qualif.remove(local);
+ self.state.borrow.remove(local);
+ }
+ _ => self.super_statement(statement, location),
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
+ // here; that occurs in `apply_call_return_effect`.
+
+ if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
+ let qualif = qualifs::in_operand::<Q, _>(
+ self.ccx,
+ &mut |l| self.state.qualif.contains(l),
+ value,
+ );
+
+ if !place.is_indirect() {
+ self.assign_qualif_direct(place, qualif);
+ }
+ }
+
+ // We ignore borrow on drop because custom drop impls are not allowed in consts.
+ // FIXME: Reconsider if accounting for borrows in drops is necessary for const drop.
+
+ // We need to assign qualifs to the dropped location before visiting the operand that
+ // replaces it since qualifs can be cleared on move.
+ self.super_terminator(terminator, location);
+ }
+}
+
+/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
+pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
+ ccx: &'a ConstCx<'mir, 'tcx>,
+ _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
+ FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
+ }
+
+ fn transfer_function(&self, state: &'a mut State) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+ TransferFunction::<Q>::new(self.ccx, state)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(super) struct State {
+ /// Describes whether a local contains qualif.
+ pub qualif: BitSet<Local>,
+ /// Describes whether a local's address escaped and it might become qualified as a result an
+ /// indirect mutation.
+ pub borrow: BitSet<Local>,
+}
+
+impl Clone for State {
+ fn clone(&self) -> Self {
+ State { qualif: self.qualif.clone(), borrow: self.borrow.clone() }
+ }
+
+ // Data flow engine when possible uses `clone_from` for domain values.
+ // Providing an implementation will avoid some intermediate memory allocations.
+ fn clone_from(&mut self, other: &Self) {
+ self.qualif.clone_from(&other.qualif);
+ self.borrow.clone_from(&other.borrow);
+ }
+}
+
+impl State {
+ #[inline]
+ pub(super) fn contains(&self, local: Local) -> bool {
+ self.qualif.contains(local)
+ }
+}
+
+impl<C> DebugWithContext<C> for State {
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_with(ctxt, f)?;
+ f.write_str(" borrow: ")?;
+ self.borrow.fmt_with(ctxt, f)?;
+ Ok(())
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self == old {
+ return Ok(());
+ }
+
+ if self.qualif != old.qualif {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_diff_with(&old.qualif, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ if self.borrow != old.borrow {
+ f.write_str("borrow: ")?;
+ self.qualif.fmt_diff_with(&old.borrow, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ Ok(())
+ }
+}
+
+impl JoinSemiLattice for State {
+ fn join(&mut self, other: &Self) -> bool {
+ self.qualif.join(&other.qualif) || self.borrow.join(&other.borrow)
+ }
+}
+
+impl<'tcx, Q> AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ type Domain = State;
+
+ const NAME: &'static str = Q::ANALYSIS_NAME;
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ State {
+ qualif: BitSet::new_empty(body.local_decls.len()),
+ borrow: BitSet::new_empty(body.local_decls.len()),
+ }
+ }
+
+ fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ self.transfer_function(state).initialize_state();
+ }
+}
+
+impl<'tcx, Q> Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn apply_statement_effect(
+ &self,
+ state: &mut Self::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(state).visit_statement(statement, location);
+ }
+
+ fn apply_terminator_effect(
+ &self,
+ state: &mut Self::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(state).visit_terminator(terminator, location);
+ }
+
+ fn apply_call_return_effect(
+ &self,
+ state: &mut Self::Domain,
+ block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ self.transfer_function(state).apply_call_return_effect(block, return_places)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs
new file mode 100644
index 000000000..a2928bdf5
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/mod.rs
@@ -0,0 +1,3 @@
+pub mod check_consts;
+pub mod promote_consts;
+pub mod validate;
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
new file mode 100644
index 000000000..ed4d8c95d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -0,0 +1,1066 @@
+//! A pass that promotes borrows of constant rvalues.
+//!
+//! The rvalues considered constant are trees of temps,
+//! each with exactly one initialization, and holding
+//! a constant value with no interior mutability.
+//! They are placed into a new MIR constant body in
+//! `promoted` and the borrow rvalue is replaced with
+//! a `Literal::Promoted` using the index into `promoted`
+//! of that constant MIR.
+//!
+//! This pass assumes that every use is dominated by an
+//! initialization and can otherwise silence errors, if
+//! move analysis runs after promotion on broken MIR.
+
+use rustc_hir as hir;
+use rustc_middle::mir::traversal::ReversePostorderIter;
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, List, TyCtxt, TypeVisitable};
+use rustc_span::Span;
+
+use rustc_index::vec::{Idx, IndexVec};
+
+use std::cell::Cell;
+use std::{cmp, iter, mem};
+
+use crate::transform::check_consts::{qualifs, ConstCx};
+
+/// A `MirPass` for promotion.
+///
+/// Promotion is the extraction of promotable temps into separate MIR bodies so they can have
+/// `'static` lifetime.
+///
+/// After this pass is run, `promoted_fragments` will hold the MIR body corresponding to each
+/// newly created `Constant`.
+#[derive(Default)]
+pub struct PromoteTemps<'tcx> {
+ pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
+}
+
+impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::ConstsPromoted)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // There's not really any point in promoting errorful MIR.
+ //
+ // This does not include MIR that failed const-checking, which we still try to promote.
+ if body.return_ty().references_error() {
+ tcx.sess.delay_span_bug(body.span, "PromoteTemps: MIR had errors");
+ return;
+ }
+
+ if body.source.promoted.is_some() {
+ return;
+ }
+
+ let mut rpo = traversal::reverse_postorder(body);
+ let ccx = ConstCx::new(tcx, body);
+ let (mut temps, all_candidates) = collect_temps_and_candidates(&ccx, &mut rpo);
+
+ let promotable_candidates = validate_candidates(&ccx, &mut temps, &all_candidates);
+
+ let promoted = promote_candidates(body, tcx, temps, promotable_candidates);
+ self.promoted_fragments.set(promoted);
+ }
+}
+
+/// State of a temporary during collection and promotion.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TempState {
+ /// No references to this temp.
+ Undefined,
+ /// One direct assignment and any number of direct uses.
+ /// A borrow of this temp is promotable if the assigned
+ /// value is qualified as constant.
+ Defined { location: Location, uses: usize, valid: Result<(), ()> },
+ /// Any other combination of assignments/uses.
+ Unpromotable,
+ /// This temp was part of an rvalue which got extracted
+ /// during promotion and needs cleanup.
+ PromotedOut,
+}
+
+impl TempState {
+ pub fn is_promotable(&self) -> bool {
+ debug!("is_promotable: self={:?}", self);
+ matches!(self, TempState::Defined { .. })
+ }
+}
+
+/// A "root candidate" for promotion, which will become the
+/// returned value in a promoted MIR, unless it's a subset
+/// of a larger candidate.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Candidate {
+ location: Location,
+}
+
+struct Collector<'a, 'tcx> {
+ ccx: &'a ConstCx<'a, 'tcx>,
+ temps: IndexVec<Local, TempState>,
+ candidates: Vec<Candidate>,
+}
+
+impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> {
+ fn visit_local(&mut self, index: Local, context: PlaceContext, location: Location) {
+ debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
+ // We're only interested in temporaries and the return place
+ match self.ccx.body.local_kind(index) {
+ LocalKind::Temp | LocalKind::ReturnPointer => {}
+ LocalKind::Arg | LocalKind::Var => return,
+ }
+
+ // Ignore drops, if the temp gets promoted,
+ // then it's constant and thus drop is noop.
+ // Non-uses are also irrelevant.
+ if context.is_drop() || !context.is_use() {
+ debug!(
+ "visit_local: context.is_drop={:?} context.is_use={:?}",
+ context.is_drop(),
+ context.is_use(),
+ );
+ return;
+ }
+
+ let temp = &mut self.temps[index];
+ debug!("visit_local: temp={:?}", temp);
+ if *temp == TempState::Undefined {
+ match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Store)
+ | PlaceContext::MutatingUse(MutatingUseContext::Call) => {
+ *temp = TempState::Defined { location, uses: 0, valid: Err(()) };
+ return;
+ }
+ _ => { /* mark as unpromotable below */ }
+ }
+ } else if let TempState::Defined { ref mut uses, .. } = *temp {
+ // We always allow borrows, even mutable ones, as we need
+ // to promote mutable borrows of some ZSTs e.g., `&mut []`.
+ let allowed_use = match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ | PlaceContext::NonMutatingUse(_) => true,
+ PlaceContext::MutatingUse(_) | PlaceContext::NonUse(_) => false,
+ };
+ debug!("visit_local: allowed_use={:?}", allowed_use);
+ if allowed_use {
+ *uses += 1;
+ return;
+ }
+ /* mark as unpromotable below */
+ }
+ *temp = TempState::Unpromotable;
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match *rvalue {
+ Rvalue::Ref(..) => {
+ self.candidates.push(Candidate { location });
+ }
+ _ => {}
+ }
+ }
+}
+
+pub fn collect_temps_and_candidates<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ rpo: &mut ReversePostorderIter<'_, 'tcx>,
+) -> (IndexVec<Local, TempState>, Vec<Candidate>) {
+ let mut collector = Collector {
+ temps: IndexVec::from_elem(TempState::Undefined, &ccx.body.local_decls),
+ candidates: vec![],
+ ccx,
+ };
+ for (bb, data) in rpo {
+ collector.visit_basic_block_data(bb, data);
+ }
+ (collector.temps, collector.candidates)
+}
+
+/// Checks whether locals that appear in a promotion context (`Candidate`) are actually promotable.
+///
+/// This wraps an `Item`, and has access to all fields of that `Item` via `Deref` coercion.
+struct Validator<'a, 'tcx> {
+ ccx: &'a ConstCx<'a, 'tcx>,
+ temps: &'a mut IndexVec<Local, TempState>,
+}
+
+impl<'a, 'tcx> std::ops::Deref for Validator<'a, 'tcx> {
+ type Target = ConstCx<'a, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+struct Unpromotable;
+
+impl<'tcx> Validator<'_, 'tcx> {
+ fn validate_candidate(&mut self, candidate: Candidate) -> Result<(), Unpromotable> {
+ let loc = candidate.location;
+ let statement = &self.body[loc.block].statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, Rvalue::Ref(_, kind, place))) => {
+ // We can only promote interior borrows of promotable temps (non-temps
+ // don't get promoted anyway).
+ self.validate_local(place.local)?;
+
+ // The reference operation itself must be promotable.
+ // (Needs to come after `validate_local` to avoid ICEs.)
+ self.validate_ref(*kind, place)?;
+
+ // We do not check all the projections (they do not get promoted anyway),
+ // but we do stay away from promoting anything involving a dereference.
+ if place.projection.contains(&ProjectionElem::Deref) {
+ return Err(Unpromotable);
+ }
+
+ // We cannot promote things that need dropping, since the promoted value
+ // would not get dropped.
+ if self.qualif_local::<qualifs::NeedsDrop>(place.local) {
+ return Err(Unpromotable);
+ }
+
+ Ok(())
+ }
+ _ => bug!(),
+ }
+ }
+
+ // FIXME(eddyb) maybe cache this?
+ fn qualif_local<Q: qualifs::Qualif>(&mut self, local: Local) -> bool {
+ if let TempState::Defined { location: loc, .. } = self.temps[local] {
+ let num_stmts = self.body[loc.block].statements.len();
+
+ if loc.statement_index < num_stmts {
+ let statement = &self.body[loc.block].statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, rhs)) => qualifs::in_rvalue::<Q, _>(
+ &self.ccx,
+ &mut |l| self.qualif_local::<Q>(l),
+ rhs,
+ ),
+ _ => {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ }
+ }
+ } else {
+ let terminator = self.body[loc.block].terminator();
+ match &terminator.kind {
+ TerminatorKind::Call { .. } => {
+ let return_ty = self.body.local_decls[local].ty;
+ Q::in_any_value_of_ty(&self.ccx, return_ty)
+ }
+ kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ }
+ }
+ } else {
+ let span = self.body.local_decls[local].source_info.span;
+ span_bug!(span, "{:?} not promotable, qualif_local shouldn't have been called", local);
+ }
+ }
+
+ fn validate_local(&mut self, local: Local) -> Result<(), Unpromotable> {
+ if let TempState::Defined { location: loc, uses, valid } = self.temps[local] {
+ valid.or_else(|_| {
+ let ok = {
+ let block = &self.body[loc.block];
+ let num_stmts = block.statements.len();
+
+ if loc.statement_index < num_stmts {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, rhs)) => self.validate_rvalue(rhs),
+ _ => {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ }
+ }
+ } else {
+ let terminator = block.terminator();
+ match &terminator.kind {
+ TerminatorKind::Call { func, args, .. } => {
+ self.validate_call(func, args)
+ }
+ TerminatorKind::Yield { .. } => Err(Unpromotable),
+ kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ }
+ }
+ };
+ self.temps[local] = match ok {
+ Ok(()) => TempState::Defined { location: loc, uses, valid: Ok(()) },
+ Err(_) => TempState::Unpromotable,
+ };
+ ok
+ })
+ } else {
+ Err(Unpromotable)
+ }
+ }
+
+ fn validate_place(&mut self, place: PlaceRef<'tcx>) -> Result<(), Unpromotable> {
+ match place.last_projection() {
+ None => self.validate_local(place.local),
+ Some((place_base, elem)) => {
+ // Validate topmost projection, then recurse.
+ match elem {
+ ProjectionElem::Deref => {
+ let mut promotable = false;
+ // We need to make sure this is a `Deref` of a local with no further projections.
+ // Discussion can be found at
+ // https://github.com/rust-lang/rust/pull/74945#discussion_r463063247
+ if let Some(local) = place_base.as_local() {
+ // This is a special treatment for cases like *&STATIC where STATIC is a
+ // global static variable.
+ // This pattern is generated only when global static variables are directly
+ // accessed and is qualified for promotion safely.
+ if let TempState::Defined { location, .. } = self.temps[local] {
+ let def_stmt = self.body[location.block]
+ .statements
+ .get(location.statement_index);
+ if let Some(Statement {
+ kind:
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )),
+ ..
+ }) = def_stmt
+ {
+ if let Some(did) = c.check_static_ptr(self.tcx) {
+ // Evaluating a promoted may not read statics except if it got
+ // promoted from a static (this is a CTFE check). So we
+ // can only promote static accesses inside statics.
+ if let Some(hir::ConstContext::Static(..)) = self.const_kind
+ {
+ if !self.tcx.is_thread_local_static(did) {
+ promotable = true;
+ }
+ }
+ }
+ }
+ }
+ }
+ if !promotable {
+ return Err(Unpromotable);
+ }
+ }
+ ProjectionElem::Downcast(..) => {
+ return Err(Unpromotable);
+ }
+
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+
+ ProjectionElem::Index(local) => {
+ let mut promotable = false;
+ // Only accept if we can predict the index and are indexing an array.
+ let val =
+ if let TempState::Defined { location: loc, .. } = self.temps[local] {
+ let block = &self.body[loc.block];
+ if loc.statement_index < block.statements.len() {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )) => c.literal.try_eval_usize(self.tcx, self.param_env),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ if let Some(idx) = val {
+ // Determine the type of the thing we are indexing.
+ let ty = place_base.ty(self.body, self.tcx).ty;
+ match ty.kind() {
+ ty::Array(_, len) => {
+ // It's an array; determine its length.
+ if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+ {
+ // If the index is in-bounds, go ahead.
+ if idx < len {
+ promotable = true;
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ if !promotable {
+ return Err(Unpromotable);
+ }
+
+ self.validate_local(local)?;
+ }
+
+ ProjectionElem::Field(..) => {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if base_ty.is_union() {
+ // No promotion of union field accesses.
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ self.validate_place(place_base)
+ }
+ }
+ }
+
+ fn validate_operand(&mut self, operand: &Operand<'tcx>) -> Result<(), Unpromotable> {
+ match operand {
+ Operand::Copy(place) | Operand::Move(place) => self.validate_place(place.as_ref()),
+
+ // The qualifs for a constant (e.g. `HasMutInterior`) are checked in
+ // `validate_rvalue` upon access.
+ Operand::Constant(c) => {
+ if let Some(def_id) = c.check_static_ptr(self.tcx) {
+ // Only allow statics (not consts) to refer to other statics.
+ // FIXME(eddyb) does this matter at all for promotion?
+ // FIXME(RalfJung) it makes little sense to not promote this in `fn`/`const fn`,
+ // and in `const` this cannot occur anyway. The only concern is that we might
+ // promote even `let x = &STATIC` which would be useless, but this applies to
+ // promotion inside statics as well.
+ let is_static = matches!(self.const_kind, Some(hir::ConstContext::Static(_)));
+ if !is_static {
+ return Err(Unpromotable);
+ }
+
+ let is_thread_local = self.tcx.is_thread_local_static(def_id);
+ if is_thread_local {
+ return Err(Unpromotable);
+ }
+ }
+
+ Ok(())
+ }
+ }
+ }
+
+ fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
+ match kind {
+ // Reject these borrow types just to be safe.
+ // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
+ BorrowKind::Shallow | BorrowKind::Unique => return Err(Unpromotable),
+
+ BorrowKind::Shared => {
+ let has_mut_interior = self.qualif_local::<qualifs::HasMutInterior>(place.local);
+ if has_mut_interior {
+ return Err(Unpromotable);
+ }
+ }
+
+ BorrowKind::Mut { .. } => {
+ let ty = place.ty(self.body, self.tcx).ty;
+
+ // In theory, any zero-sized value could be borrowed
+ // mutably without consequences. However, only &mut []
+ // is allowed right now.
+ if let ty::Array(_, len) = ty.kind() {
+ match len.try_eval_usize(self.tcx, self.param_env) {
+ Some(0) => {}
+ _ => return Err(Unpromotable),
+ }
+ } else {
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn validate_rvalue(&mut self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
+ match rvalue {
+ Rvalue::Use(operand) | Rvalue::Repeat(operand, _) => {
+ self.validate_operand(operand)?;
+ }
+ Rvalue::CopyForDeref(place) => {
+ let op = &Operand::Copy(*place);
+ self.validate_operand(op)?
+ }
+
+ Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+ self.validate_place(place.as_ref())?
+ }
+
+ Rvalue::ThreadLocalRef(_) => return Err(Unpromotable),
+
+ // ptr-to-int casts are not possible in consts and thus not promotable
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => return Err(Unpromotable),
+
+ // all other casts including int-to-ptr casts are fine, they just use the integer value
+ // at pointer type.
+ Rvalue::Cast(_, operand, _) => {
+ self.validate_operand(operand)?;
+ }
+
+ Rvalue::NullaryOp(op, _) => match op {
+ NullOp::SizeOf => {}
+ NullOp::AlignOf => {}
+ },
+
+ Rvalue::ShallowInitBox(_, _) => return Err(Unpromotable),
+
+ Rvalue::UnaryOp(op, operand) => {
+ match op {
+ // These operations can never fail.
+ UnOp::Neg | UnOp::Not => {}
+ }
+
+ self.validate_operand(operand)?;
+ }
+
+ Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+ let op = *op;
+ let lhs_ty = lhs.ty(self.body, self.tcx);
+
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs_ty.kind() {
+ // Raw and fn pointer operations are not allowed inside consts and thus not promotable.
+ assert!(matches!(
+ op,
+ BinOp::Eq
+ | BinOp::Ne
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Offset
+ ));
+ return Err(Unpromotable);
+ }
+
+ match op {
+ BinOp::Div | BinOp::Rem => {
+ if lhs_ty.is_integral() {
+ // Integer division: the RHS must be a non-zero const.
+ let const_val = match rhs {
+ Operand::Constant(c) => {
+ c.literal.try_eval_bits(self.tcx, self.param_env, lhs_ty)
+ }
+ _ => None,
+ };
+ match const_val {
+ Some(x) if x != 0 => {} // okay
+ _ => return Err(Unpromotable), // value not known or 0 -- not okay
+ }
+ }
+ }
+ // The remaining operations can never fail.
+ BinOp::Eq
+ | BinOp::Ne
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Offset
+ | BinOp::Add
+ | BinOp::Sub
+ | BinOp::Mul
+ | BinOp::BitXor
+ | BinOp::BitAnd
+ | BinOp::BitOr
+ | BinOp::Shl
+ | BinOp::Shr => {}
+ }
+
+ self.validate_operand(lhs)?;
+ self.validate_operand(rhs)?;
+ }
+
+ Rvalue::AddressOf(_, place) => {
+ // We accept `&raw *`, i.e., raw reborrows -- creating a raw pointer is
+ // no problem, only using it is.
+ if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection()
+ {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ return self.validate_place(place_base);
+ }
+ }
+ return Err(Unpromotable);
+ }
+
+ Rvalue::Ref(_, kind, place) => {
+ // Special-case reborrows to be more like a copy of the reference.
+ let mut place_simplified = place.as_ref();
+ if let Some((place_base, ProjectionElem::Deref)) =
+ place_simplified.last_projection()
+ {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ place_simplified = place_base;
+ }
+ }
+
+ self.validate_place(place_simplified)?;
+
+ // Check that the reference is fine (using the original place!).
+ // (Needs to come after `validate_place` to avoid ICEs.)
+ self.validate_ref(*kind, place)?;
+ }
+
+ Rvalue::Aggregate(_, operands) => {
+ for o in operands {
+ self.validate_operand(o)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn validate_call(
+ &mut self,
+ callee: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ ) -> Result<(), Unpromotable> {
+ let fn_ty = callee.ty(self.body, self.tcx);
+
+ // Inside const/static items, we promote all (eligible) function calls.
+ // Everywhere else, we require `#[rustc_promotable]` on the callee.
+ let promote_all_const_fn = matches!(
+ self.const_kind,
+ Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+ );
+ if !promote_all_const_fn {
+ if let ty::FnDef(def_id, _) = *fn_ty.kind() {
+ // Never promote runtime `const fn` calls of
+ // functions without `#[rustc_promotable]`.
+ if !self.tcx.is_promotable_const_fn(def_id) {
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ let is_const_fn = match *fn_ty.kind() {
+ ty::FnDef(def_id, _) => self.tcx.is_const_fn_raw(def_id),
+ _ => false,
+ };
+ if !is_const_fn {
+ return Err(Unpromotable);
+ }
+
+ self.validate_operand(callee)?;
+ for arg in args {
+ self.validate_operand(arg)?;
+ }
+
+ Ok(())
+ }
+}
+
+// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
+pub fn validate_candidates(
+ ccx: &ConstCx<'_, '_>,
+ temps: &mut IndexVec<Local, TempState>,
+ candidates: &[Candidate],
+) -> Vec<Candidate> {
+ let mut validator = Validator { ccx, temps };
+
+ candidates
+ .iter()
+ .copied()
+ .filter(|&candidate| validator.validate_candidate(candidate).is_ok())
+ .collect()
+}
+
+struct Promoter<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ source: &'a mut Body<'tcx>,
+ promoted: Body<'tcx>,
+ temps: &'a mut IndexVec<Local, TempState>,
+ extra_statements: &'a mut Vec<(Location, Statement<'tcx>)>,
+
+ /// If true, all nested temps are also kept in the
+ /// source MIR, not moved to the promoted MIR.
+ keep_original: bool,
+}
+
+impl<'a, 'tcx> Promoter<'a, 'tcx> {
+ fn new_block(&mut self) -> BasicBlock {
+ let span = self.promoted.span;
+ self.promoted.basic_blocks_mut().push(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(span),
+ kind: TerminatorKind::Return,
+ }),
+ is_cleanup: false,
+ })
+ }
+
+ fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
+ let last = self.promoted.basic_blocks().last().unwrap();
+ let data = &mut self.promoted[last];
+ data.statements.push(Statement {
+ source_info: SourceInfo::outermost(span),
+ kind: StatementKind::Assign(Box::new((Place::from(dest), rvalue))),
+ });
+ }
+
+ fn is_temp_kind(&self, local: Local) -> bool {
+ self.source.local_kind(local) == LocalKind::Temp
+ }
+
+ /// Copies the initialization of this temp to the
+ /// promoted MIR, recursing through temps.
+ fn promote_temp(&mut self, temp: Local) -> Local {
+ let old_keep_original = self.keep_original;
+ let loc = match self.temps[temp] {
+ TempState::Defined { location, uses, .. } if uses > 0 => {
+ if uses > 1 {
+ self.keep_original = true;
+ }
+ location
+ }
+ state => {
+ span_bug!(self.promoted.span, "{:?} not promotable: {:?}", temp, state);
+ }
+ };
+ if !self.keep_original {
+ self.temps[temp] = TempState::PromotedOut;
+ }
+
+ let num_stmts = self.source[loc.block].statements.len();
+ let new_temp = self.promoted.local_decls.push(LocalDecl::new(
+ self.source.local_decls[temp].ty,
+ self.source.local_decls[temp].source_info.span,
+ ));
+
+ debug!("promote({:?} @ {:?}/{:?}, {:?})", temp, loc, num_stmts, self.keep_original);
+
+ // First, take the Rvalue or Call out of the source MIR,
+ // or duplicate it, depending on keep_original.
+ if loc.statement_index < num_stmts {
+ let (mut rvalue, source_info) = {
+ let statement = &mut self.source[loc.block].statements[loc.statement_index];
+ let StatementKind::Assign(box (_, ref mut rhs)) = statement.kind else {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ };
+
+ (
+ if self.keep_original {
+ rhs.clone()
+ } else {
+ let unit = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: statement.source_info.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(self.tcx.types.unit),
+ })));
+ mem::replace(rhs, unit)
+ },
+ statement.source_info,
+ )
+ };
+
+ self.visit_rvalue(&mut rvalue, loc);
+ self.assign(new_temp, rvalue, source_info.span);
+ } else {
+ let terminator = if self.keep_original {
+ self.source[loc.block].terminator().clone()
+ } else {
+ let terminator = self.source[loc.block].terminator_mut();
+ let target = match terminator.kind {
+ TerminatorKind::Call { target: Some(target), .. } => target,
+ ref kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ };
+ Terminator {
+ source_info: terminator.source_info,
+ kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto { target }),
+ }
+ };
+
+ match terminator.kind {
+ TerminatorKind::Call { mut func, mut args, from_hir_call, fn_span, .. } => {
+ self.visit_operand(&mut func, loc);
+ for arg in &mut args {
+ self.visit_operand(arg, loc);
+ }
+
+ let last = self.promoted.basic_blocks().last().unwrap();
+ let new_target = self.new_block();
+
+ *self.promoted[last].terminator_mut() = Terminator {
+ kind: TerminatorKind::Call {
+ func,
+ args,
+ cleanup: None,
+ destination: Place::from(new_temp),
+ target: Some(new_target),
+ from_hir_call,
+ fn_span,
+ },
+ source_info: SourceInfo::outermost(terminator.source_info.span),
+ ..terminator
+ };
+ }
+ ref kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ };
+ };
+
+ self.keep_original = old_keep_original;
+ new_temp
+ }
+
+ fn promote_candidate(mut self, candidate: Candidate, next_promoted_id: usize) -> Body<'tcx> {
+ let def = self.source.source.with_opt_param();
+ let mut rvalue = {
+ let promoted = &mut self.promoted;
+ let promoted_id = Promoted::new(next_promoted_id);
+ let tcx = self.tcx;
+ let mut promoted_operand = |ty, span| {
+ promoted.span = span;
+ promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
+ let _const = tcx.mk_const(ty::ConstS {
+ ty,
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def,
+ substs: InternalSubsts::for_item(tcx, def.did, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ promoted: Some(promoted_id),
+ }),
+ });
+
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::from_const(_const, tcx),
+ }))
+ };
+ let blocks = self.source.basic_blocks.as_mut();
+ let local_decls = &mut self.source.local_decls;
+ let loc = candidate.location;
+ let statement = &mut blocks[loc.block].statements[loc.statement_index];
+ match statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Ref(ref mut region, borrow_kind, ref mut place),
+ )) => {
+ // Use the underlying local for this (necessarily interior) borrow.
+ let ty = local_decls[place.local].ty;
+ let span = statement.source_info.span;
+
+ let ref_ty = tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
+ );
+
+ *region = tcx.lifetimes.re_erased;
+
+ let mut projection = vec![PlaceElem::Deref];
+ projection.extend(place.projection);
+ place.projection = tcx.intern_place_elems(&projection);
+
+ // Create a temp to hold the promoted reference.
+ // This is because `*r` requires `r` to be a local,
+ // otherwise we would use the `promoted` directly.
+ let mut promoted_ref = LocalDecl::new(ref_ty, span);
+ promoted_ref.source_info = statement.source_info;
+ let promoted_ref = local_decls.push(promoted_ref);
+ assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
+
+ let promoted_ref_statement = Statement {
+ source_info: statement.source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(promoted_ref),
+ Rvalue::Use(promoted_operand(ref_ty, span)),
+ ))),
+ };
+ self.extra_statements.push((loc, promoted_ref_statement));
+
+ Rvalue::Ref(
+ tcx.lifetimes.re_erased,
+ borrow_kind,
+ Place {
+ local: mem::replace(&mut place.local, promoted_ref),
+ projection: List::empty(),
+ },
+ )
+ }
+ _ => bug!(),
+ }
+ };
+
+ assert_eq!(self.new_block(), START_BLOCK);
+ self.visit_rvalue(
+ &mut rvalue,
+ Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+ );
+
+ let span = self.promoted.span;
+ self.assign(RETURN_PLACE, rvalue, span);
+ self.promoted
+ }
+}
+
+/// Replaces all temporaries with their promoted counterparts.
+impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ if self.is_temp_kind(*local) {
+ *local = self.promote_temp(*local);
+ }
+ }
+}
+
+pub fn promote_candidates<'tcx>(
+ body: &mut Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ mut temps: IndexVec<Local, TempState>,
+ candidates: Vec<Candidate>,
+) -> IndexVec<Promoted, Body<'tcx>> {
+ // Visit candidates in reverse, in case they're nested.
+ debug!("promote_candidates({:?})", candidates);
+
+ let mut promotions = IndexVec::new();
+
+ let mut extra_statements = vec![];
+ for candidate in candidates.into_iter().rev() {
+ let Location { block, statement_index } = candidate.location;
+ if let StatementKind::Assign(box (place, _)) = &body[block].statements[statement_index].kind
+ {
+ if let Some(local) = place.as_local() {
+ if temps[local] == TempState::PromotedOut {
+ // Already promoted.
+ continue;
+ }
+ }
+ }
+
+ // Declare return place local so that `mir::Body::new` doesn't complain.
+ let initial_locals = iter::once(LocalDecl::new(tcx.types.never, body.span)).collect();
+
+ let mut scope = body.source_scopes[body.source_info(candidate.location).scope].clone();
+ scope.parent_scope = None;
+
+ let promoted = Body::new(
+ body.source, // `promoted` gets filled in below
+ IndexVec::new(),
+ IndexVec::from_elem_n(scope, 1),
+ initial_locals,
+ IndexVec::new(),
+ 0,
+ vec![],
+ body.span,
+ body.generator_kind(),
+ body.tainted_by_errors,
+ );
+
+ let promoter = Promoter {
+ promoted,
+ tcx,
+ source: body,
+ temps: &mut temps,
+ extra_statements: &mut extra_statements,
+ keep_original: false,
+ };
+
+ let mut promoted = promoter.promote_candidate(candidate, promotions.len());
+ promoted.source.promoted = Some(promotions.next_index());
+ promotions.push(promoted);
+ }
+
+ // Insert each of `extra_statements` before its indicated location, which
+ // has to be done in reverse location order, to not invalidate the rest.
+ extra_statements.sort_by_key(|&(loc, _)| cmp::Reverse(loc));
+ for (loc, statement) in extra_statements {
+ body[loc.block].statements.insert(loc.statement_index, statement);
+ }
+
+ // Eliminate assignments to, and drops of promoted temps.
+ let promoted = |index: Local| temps[index] == TempState::PromotedOut;
+ for block in body.basic_blocks_mut() {
+ block.statements.retain(|statement| match &statement.kind {
+ StatementKind::Assign(box (place, _)) => {
+ if let Some(index) = place.as_local() {
+ !promoted(index)
+ } else {
+ true
+ }
+ }
+ StatementKind::StorageLive(index) | StatementKind::StorageDead(index) => {
+ !promoted(*index)
+ }
+ _ => true,
+ });
+ let terminator = block.terminator_mut();
+ if let TerminatorKind::Drop { place, target, .. } = &terminator.kind {
+ if let Some(index) = place.as_local() {
+ if promoted(index) {
+ terminator.kind = TerminatorKind::Goto { target: *target };
+ }
+ }
+ }
+ }
+
+ promotions
+}
+
+/// This function returns `true` if the function being called in the array
+/// repeat expression is a `const` function.
+pub fn is_const_fn_in_array_repeat_expression<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ place: &Place<'tcx>,
+ body: &Body<'tcx>,
+) -> bool {
+ match place.as_local() {
+ // rule out cases such as: `let my_var = some_fn(); [my_var; N]`
+ Some(local) if body.local_decls[local].is_user_variable() => return false,
+ None => return false,
+ _ => {}
+ }
+
+ for block in body.basic_blocks() {
+ if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
+ &block.terminator
+ {
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, _) = *literal.ty().kind() {
+ if destination == place {
+ if ccx.tcx.is_const_fn(def_id) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ false
+}
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
new file mode 100644
index 000000000..15e820f2d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -0,0 +1,913 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{
+ traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, Local, Location,
+ MirPass, MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope,
+ Statement, StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK,
+};
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_mir_dataflow::impls::MaybeStorageLive;
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_mir_dataflow::{Analysis, ResultsCursor};
+use rustc_target::abi::{Size, VariantIdx};
+
+#[derive(Copy, Clone, Debug)]
+enum EdgeKind {
+ Unwind,
+ Normal,
+}
+
+pub struct Validator {
+ /// Describes at which point in the pipeline this validation is happening.
+ pub when: String,
+ /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+ /// element, this validator will now emit errors if that specific element is encountered.
+ /// Note that phases that change the dialect cause all *following* phases to check the
+ /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+ /// itself.
+ pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
+ // terribly important that they pass the validator. However, I think other passes might
+ // still see them, in which case they might be surprised. It would probably be better if we
+ // didn't put this through the MIR pipeline at all.
+ if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
+ return;
+ }
+ let def_id = body.source.def_id();
+ let param_env = tcx.param_env(def_id);
+ let mir_phase = self.mir_phase;
+
+ let always_live_locals = always_storage_live_locals(body);
+ let storage_liveness = MaybeStorageLive::new(always_live_locals)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ TypeChecker {
+ when: &self.when,
+ body,
+ tcx,
+ param_env,
+ mir_phase,
+ reachable_blocks: traversal::reachable_as_bitset(body),
+ storage_liveness,
+ place_cache: Vec::new(),
+ value_cache: Vec::new(),
+ }
+ .visit_body(body);
+ }
+}
+
+/// Returns whether the two types are equal up to lifetimes.
+/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
+/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
+///
+/// The point of this function is to approximate "equal up to subtyping". However,
+/// the approximation is incorrect as variance is ignored.
+pub fn equal_up_to_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ src: Ty<'tcx>,
+ dest: Ty<'tcx>,
+) -> bool {
+ // Fast path.
+ if src == dest {
+ return true;
+ }
+
+ // Normalize lifetimes away on both sides, then compare.
+ let normalize = |ty: Ty<'tcx>| {
+ tcx.normalize_erasing_regions(
+ param_env,
+ ty.fold_with(&mut BottomUpFolder {
+ tcx,
+ // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
+ // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
+ // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
+ // since one may have an `impl SomeTrait for fn(&32)` and
+ // `impl SomeTrait for fn(&'static u32)` at the same time which
+ // specify distinct values for Assoc. (See also #56105)
+ lt_op: |_| tcx.lifetimes.re_erased,
+ // Leave consts and types unchanged.
+ ct_op: |ct| ct,
+ ty_op: |ty| ty,
+ }),
+ )
+ };
+ tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
+}
+
+struct TypeChecker<'a, 'tcx> {
+ when: &'a str,
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ mir_phase: MirPhase,
+ reachable_blocks: BitSet<BasicBlock>,
+ storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
+ place_cache: Vec<PlaceRef<'tcx>>,
+ value_cache: Vec<u128>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ fn fail(&self, location: Location, msg: impl AsRef<str>) {
+ let span = self.body.source_info(location).span;
+ // We use `delay_span_bug` as we might see broken MIR when other errors have already
+ // occurred.
+ self.tcx.sess.diagnostic().delay_span_bug(
+ span,
+ &format!(
+ "broken MIR in {:?} ({}) at {:?}:\n{}",
+ self.body.source.instance,
+ self.when,
+ location,
+ msg.as_ref()
+ ),
+ );
+ }
+
+ fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+ if bb == START_BLOCK {
+ self.fail(location, "start block must not have predecessors")
+ }
+ if let Some(bb) = self.body.basic_blocks().get(bb) {
+ let src = self.body.basic_blocks().get(location.block).unwrap();
+ match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+ // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+ (false, false, EdgeKind::Normal)
+ // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+ | (false, true, EdgeKind::Unwind)
+ // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+ | (true, true, EdgeKind::Normal) => {}
+ // All other jumps are invalid
+ _ => {
+ self.fail(
+ location,
+ format!(
+ "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+ edge_kind,
+ bb,
+ src.is_cleanup,
+ bb.is_cleanup,
+ )
+ )
+ }
+ }
+ } else {
+ self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+ }
+ }
+
+ /// Check if src can be assigned into dest.
+ /// This is not precise, it will accept some incorrect assignments.
+ fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+ // Fast path before we normalize.
+ if src == dest {
+ // Equal types, all is good.
+ return true;
+ }
+ // Normalization reveals opaque types, but we may be validating MIR while computing
+ // said opaque types, causing cycles.
+ if (src, dest).has_opaque_types() {
+ return true;
+ }
+ // Normalize projections and things like that.
+ let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+ let src = self.tcx.normalize_erasing_regions(param_env, src);
+ let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+
+ // Type-changing assignments can happen when subtyping is used. While
+ // all normal lifetimes are erased, higher-ranked types with their
+ // late-bound lifetimes are still around and can lead to type
+ // differences. So we compare ignoring lifetimes.
+ equal_up_to_regions(self.tcx, param_env, src, dest)
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
+ if self.body.local_decls.get(local).is_none() {
+ self.fail(
+ location,
+ format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+ );
+ }
+
+ if self.reachable_blocks.contains(location.block) && context.is_use() {
+ // We check that the local is live whenever it is used. Technically, violating this
+ // restriction is only UB and not actually indicative of not well-formed MIR. This means
+ // that an optimization which turns MIR that already has UB into MIR that fails this
+ // check is not necessarily wrong. However, we have no such optimizations at the moment,
+ // and so we include this check anyway to help us catch bugs. If you happen to write an
+ // optimization that might cause this to incorrectly fire, feel free to remove this
+ // check.
+ self.storage_liveness.seek_after_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if !locals_with_storage.contains(local) {
+ self.fail(location, format!("use of local {:?}, which has no storage here", local));
+ }
+ }
+ }
+
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+ // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+ if self.tcx.sess.opts.unstable_opts.validate_mir && self.mir_phase < MirPhase::DropsLowered
+ {
+ // `Operand::Copy` is only supposed to be used with `Copy` types.
+ if let Operand::Copy(place) = operand {
+ let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+ let span = self.body.source_info(location).span;
+
+ if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+ self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+ }
+ }
+ }
+
+ self.super_operand(operand, location);
+ }
+
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ match elem {
+ ProjectionElem::Index(index) => {
+ let index_ty = self.body.local_decls[index].ty;
+ if index_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+ }
+ }
+ ProjectionElem::Deref if self.mir_phase >= MirPhase::GeneratorsLowered => {
+ let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty;
+
+ if base_ty.is_box() {
+ self.fail(
+ location,
+ format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
+ )
+ }
+ }
+ ProjectionElem::Field(f, ty) => {
+ let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+ let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
+ let fail_out_of_bounds = |this: &Self, location| {
+ this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
+ };
+ let check_equal = |this: &Self, location, f_ty| {
+ if !this.mir_assign_valid_types(ty, f_ty) {
+ this.fail(
+ location,
+ format!(
+ "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is {:?}",
+ parent, f, ty, f_ty
+ )
+ )
+ }
+ };
+
+ let kind = match parent_ty.ty.kind() {
+ &ty::Opaque(def_id, substs) => {
+ self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+ }
+ kind => kind,
+ };
+
+ match kind {
+ ty::Tuple(fields) => {
+ let Some(f_ty) = fields.get(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, *f_ty);
+ }
+ ty::Adt(adt_def, substs) => {
+ let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
+ let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, field.ty(self.tcx, substs));
+ }
+ ty::Closure(_, substs) => {
+ let substs = substs.as_closure();
+ let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, f_ty);
+ }
+ &ty::Generator(def_id, substs, _) => {
+ let f_ty = if let Some(var) = parent_ty.variant_index {
+ let gen_body = if def_id == self.body.source.def_id() {
+ self.body
+ } else {
+ self.tcx.optimized_mir(def_id)
+ };
+
+ let Some(layout) = gen_body.generator_layout() else {
+ self.fail(location, format!("No generator layout for {:?}", parent_ty));
+ return;
+ };
+
+ let Some(&local) = layout.variant_fields[var].get(f) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+
+ let Some(&f_ty) = layout.field_tys.get(local) else {
+ self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
+ return;
+ };
+
+ f_ty
+ } else {
+ let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+
+ f_ty
+ };
+
+ check_equal(self, location, f_ty);
+ }
+ _ => {
+ self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
+ }
+ }
+ }
+ _ => {}
+ }
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
+ // Set off any `bug!`s in the type computation code
+ let _ = place.ty(&self.body.local_decls, self.tcx);
+
+ if self.mir_phase >= MirPhase::Derefered
+ && place.projection.len() > 1
+ && cntxt != PlaceContext::NonUse(VarDebugInfo)
+ && place.projection[1..].contains(&ProjectionElem::Deref)
+ {
+ self.fail(location, format!("{:?}, has deref at the wrong place", place));
+ }
+
+ self.super_place(place, cntxt, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ macro_rules! check_kinds {
+ ($t:expr, $text:literal, $($patterns:tt)*) => {
+ if !matches!(($t).kind(), $($patterns)*) {
+ self.fail(location, format!($text, $t));
+ }
+ };
+ }
+ match rvalue {
+ Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
+ Rvalue::Aggregate(agg_kind, _) => {
+ let disallowed = match **agg_kind {
+ AggregateKind::Array(..) => false,
+ AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered,
+ _ => self.mir_phase >= MirPhase::Deaggregated,
+ };
+ if disallowed {
+ self.fail(
+ location,
+ format!("{:?} have been lowered to field assignments", rvalue),
+ )
+ }
+ }
+ Rvalue::Ref(_, BorrowKind::Shallow, _) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
+ );
+ }
+ }
+ Rvalue::Ref(..) => {}
+ Rvalue::Len(p) => {
+ let pty = p.ty(&self.body.local_decls, self.tcx).ty;
+ check_kinds!(
+ pty,
+ "Cannot compute length of non-array type {:?}",
+ ty::Array(..) | ty::Slice(..)
+ );
+ }
+ Rvalue::BinaryOp(op, vals) => {
+ use BinOp::*;
+ let a = vals.0.ty(&self.body.local_decls, self.tcx);
+ let b = vals.1.ty(&self.body.local_decls, self.tcx);
+ match op {
+ Offset => {
+ check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
+ if b != self.tcx.types.isize && b != self.tcx.types.usize {
+ self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
+ }
+ }
+ Eq | Lt | Le | Ne | Ge | Gt => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot compare type {:?}",
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::RawPtr(..)
+ | ty::FnPtr(..)
+ )
+ }
+ // The function pointer types can have lifetimes
+ if !self.mir_assign_valid_types(a, b) {
+ self.fail(
+ location,
+ format!("Cannot compare unequal types {:?} and {:?}", a, b),
+ );
+ }
+ }
+ Shl | Shr => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot shift non-integer type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ }
+ BitAnd | BitOr | BitXor => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform bitwise op on type {:?}",
+ ty::Uint(..) | ty::Int(..) | ty::Bool
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform bitwise op on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ Add | Sub | Mul | Div | Rem => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform arithmetic on type {:?}",
+ ty::Uint(..) | ty::Int(..) | ty::Float(..)
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform arithmetic on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ }
+ }
+ Rvalue::CheckedBinaryOp(op, vals) => {
+ use BinOp::*;
+ let a = vals.0.ty(&self.body.local_decls, self.tcx);
+ let b = vals.1.ty(&self.body.local_decls, self.tcx);
+ match op {
+ Add | Sub | Mul => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform checked arithmetic on type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ Shl | Shr => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform checked shift on non-integer type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ }
+ _ => self.fail(location, format!("There is no checked version of {:?}", op)),
+ }
+ }
+ Rvalue::UnaryOp(op, operand) => {
+ let a = operand.ty(&self.body.local_decls, self.tcx);
+ match op {
+ UnOp::Neg => {
+ check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
+ }
+ UnOp::Not => {
+ check_kinds!(
+ a,
+ "Cannot binary not type {:?}",
+ ty::Int(..) | ty::Uint(..) | ty::Bool
+ );
+ }
+ }
+ }
+ Rvalue::ShallowInitBox(operand, _) => {
+ let a = operand.ty(&self.body.local_decls, self.tcx);
+ check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
+ }
+ Rvalue::Cast(kind, operand, target_type) => {
+ match kind {
+ CastKind::Misc => {
+ let op_ty = operand.ty(self.body, self.tcx);
+ if op_ty.is_enum() {
+ self.fail(
+ location,
+ format!(
+ "enum -> int casts should go through `Rvalue::Discriminant`: {operand:?}:{op_ty} as {target_type}",
+ ),
+ );
+ }
+ }
+ // Nothing to check here
+ CastKind::PointerFromExposedAddress
+ | CastKind::PointerExposeAddress
+ | CastKind::Pointer(_) => {}
+ }
+ }
+ Rvalue::Repeat(_, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::Discriminant(_) => {}
+ }
+ self.super_rvalue(rvalue, location);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match &statement.kind {
+ StatementKind::Assign(box (dest, rvalue)) => {
+ // LHS and RHS of the assignment must have the same type.
+ let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+ let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+ if !self.mir_assign_valid_types(right_ty, left_ty) {
+ self.fail(
+ location,
+ format!(
+ "encountered `{:?}` with incompatible types:\n\
+ left-hand side has type: {}\n\
+ right-hand side has type: {}",
+ statement.kind, left_ty, right_ty,
+ ),
+ );
+ }
+ if let Rvalue::CopyForDeref(place) = rvalue {
+ if !place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_some()
+ {
+ self.fail(
+ location,
+ "`CopyForDeref` should only be used for dereferenceable types",
+ )
+ }
+ }
+ // FIXME(JakobDegen): Check this for all rvalues, not just this one.
+ if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
+ // The sides of an assignment must not alias. Currently this just checks whether
+ // the places are identical.
+ if dest == src {
+ self.fail(
+ location,
+ "encountered `Assign` statement with overlapping memory",
+ );
+ }
+ }
+ }
+ StatementKind::AscribeUserType(..) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`AscribeUserType` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::FakeRead(..) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FakeRead` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let src_ty = src.ty(&self.body.local_decls, self.tcx);
+ let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+ src_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+ );
+ return;
+ };
+ let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+ let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+ dst_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+ );
+ return;
+ };
+ // since CopyNonOverlapping is parametrized by 1 type,
+ // we only need to check that they are equal and not keep an extra parameter.
+ if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
+ self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+ }
+
+ let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+ if op_cnt_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+ }
+ }
+ StatementKind::SetDiscriminant { place, .. } => {
+ if self.mir_phase < MirPhase::Deaggregated {
+ self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+ }
+ let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
+ if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
+ self.fail(
+ location,
+ format!(
+ "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
+ pty
+ ),
+ );
+ }
+ }
+ StatementKind::Deinit(..) => {
+ if self.mir_phase < MirPhase::Deaggregated {
+ self.fail(location, "`Deinit`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Retag(_, _) => {
+ // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+ // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+ // seem to fail to set their `MirPhase` correctly.
+ }
+ StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ match &terminator.kind {
+ TerminatorKind::Goto { target } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
+ let ty = discr.ty(&self.body.local_decls, self.tcx);
+ if ty != *switch_ty {
+ self.fail(
+ location,
+ format!(
+ "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
+ ty, switch_ty,
+ ),
+ );
+ }
+
+ let target_width = self.tcx.sess.target.pointer_width;
+
+ let size = Size::from_bits(match switch_ty.kind() {
+ ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+ ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+ ty::Char => 32,
+ ty::Bool => 1,
+ other => bug!("unhandled type: {:?}", other),
+ });
+
+ for (value, target) in targets.iter() {
+ if Scalar::<()>::try_from_uint(value, size).is_none() {
+ self.fail(
+ location,
+ format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+ )
+ }
+
+ self.check_edge(location, target, EdgeKind::Normal);
+ }
+ self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+ self.value_cache.clear();
+ self.value_cache.extend(targets.iter().map(|(value, _)| value));
+ let all_len = self.value_cache.len();
+ self.value_cache.sort_unstable();
+ self.value_cache.dedup();
+ let has_duplicates = all_len != self.value_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "duplicated values in `SwitchInt` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Drop { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::DropAndReplace { target, unwind, .. } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`DropAndReplace` should have been removed during drop elaboration",
+ );
+ }
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::Call { func, args, destination, target, cleanup, .. } => {
+ let func_ty = func.ty(&self.body.local_decls, self.tcx);
+ match func_ty.kind() {
+ ty::FnPtr(..) | ty::FnDef(..) => {}
+ _ => self.fail(
+ location,
+ format!("encountered non-callable type {} in `Call` terminator", func_ty),
+ ),
+ }
+ if let Some(target) = target {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+
+ // The call destination place and Operand::Move place used as an argument might be
+ // passed by a reference to the callee. Consequently they must be non-overlapping.
+ // Currently this simply checks for duplicate places.
+ self.place_cache.clear();
+ self.place_cache.push(destination.as_ref());
+ for arg in args {
+ if let Operand::Move(place) = arg {
+ self.place_cache.push(place.as_ref());
+ }
+ }
+ let all_len = self.place_cache.len();
+ let mut dedup = FxHashSet::default();
+ self.place_cache.retain(|p| dedup.insert(*p));
+ let has_duplicates = all_len != self.place_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "encountered overlapping memory in `Call` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Assert { cond, target, cleanup, .. } => {
+ let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+ if cond_ty != self.tcx.types.bool {
+ self.fail(
+ location,
+ format!(
+ "encountered non-boolean condition of type {} in `Assert` terminator",
+ cond_ty
+ ),
+ );
+ }
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::Yield { resume, drop, .. } => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`Yield` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(location, "`Yield` should have been replaced by generator lowering");
+ }
+ self.check_edge(location, *resume, EdgeKind::Normal);
+ if let Some(drop) = drop {
+ self.check_edge(location, *drop, EdgeKind::Normal);
+ }
+ }
+ TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseEdge` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+ }
+ TerminatorKind::FalseUnwind { real_target, unwind } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseUnwind` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::InlineAsm { destination, cleanup, .. } => {
+ if let Some(destination) = destination {
+ self.check_edge(location, *destination, EdgeKind::Normal);
+ }
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::GeneratorDrop => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(
+ location,
+ "`GeneratorDrop` should have been replaced by generator lowering",
+ );
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ let bb = location.block;
+ if !self.body.basic_blocks()[bb].is_cleanup {
+ self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
+ }
+ }
+ TerminatorKind::Return => {
+ let bb = location.block;
+ if self.body.basic_blocks()[bb].is_cleanup {
+ self.fail(location, "Cannot `Return` from cleanup basic block")
+ }
+ }
+ TerminatorKind::Unreachable => {}
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_source_scope(&mut self, scope: SourceScope) {
+ if self.body.source_scopes.get(scope).is_none() {
+ self.tcx.sess.diagnostic().delay_span_bug(
+ self.body.span,
+ &format!(
+ "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+ self.body.source.instance, self.when, scope,
+ ),
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/util/aggregate.rs b/compiler/rustc_const_eval/src/util/aggregate.rs
new file mode 100644
index 000000000..180a40043
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/aggregate.rs
@@ -0,0 +1,77 @@
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use std::convert::TryFrom;
+use std::iter::TrustedLen;
+
+/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
+///
+/// Produces something like
+///
+/// (lhs as Variant).field0 = arg0; // We only have a downcast if this is an enum
+/// (lhs as Variant).field1 = arg1;
+/// discriminant(lhs) = variant_index; // If lhs is an enum or generator.
+pub fn expand_aggregate<'tcx>(
+ orig_lhs: Place<'tcx>,
+ operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
+ kind: AggregateKind<'tcx>,
+ source_info: SourceInfo,
+ tcx: TyCtxt<'tcx>,
+) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
+ let mut lhs = orig_lhs;
+ let mut set_discriminant = None;
+ let active_field_index = match kind {
+ AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
+ let adt_def = tcx.adt_def(adt_did);
+ if adt_def.is_enum() {
+ set_discriminant = Some(Statement {
+ kind: StatementKind::SetDiscriminant {
+ place: Box::new(orig_lhs),
+ variant_index,
+ },
+ source_info,
+ });
+ lhs = tcx.mk_place_downcast(orig_lhs, adt_def, variant_index);
+ }
+ active_field_index
+ }
+ AggregateKind::Generator(..) => {
+ // Right now we only support initializing generators to
+ // variant 0 (Unresumed).
+ let variant_index = VariantIdx::new(0);
+ set_discriminant = Some(Statement {
+ kind: StatementKind::SetDiscriminant { place: Box::new(orig_lhs), variant_index },
+ source_info,
+ });
+
+ // Operands are upvars stored on the base place, so no
+ // downcast is necessary.
+
+ None
+ }
+ _ => None,
+ };
+
+ let operands = operands.enumerate().map(move |(i, (op, ty))| {
+ let lhs_field = if let AggregateKind::Array(_) = kind {
+ let offset = u64::try_from(i).unwrap();
+ tcx.mk_place_elem(
+ lhs,
+ ProjectionElem::ConstantIndex { offset, min_length: offset + 1, from_end: false },
+ )
+ } else {
+ let field = Field::new(active_field_index.unwrap_or(i));
+ tcx.mk_place_field(lhs, field, ty)
+ };
+ Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((lhs_field, Rvalue::Use(op)))),
+ }
+ });
+ [Statement { source_info, kind: StatementKind::Deinit(Box::new(orig_lhs)) }]
+ .into_iter()
+ .chain(operands)
+ .chain(set_discriminant)
+}
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
new file mode 100644
index 000000000..4f39dad20
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -0,0 +1,63 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::Align;
+
+/// Returns `true` if this place is allowed to be less aligned
+/// than its containing struct (because it is within a packed
+/// struct).
+pub fn is_disaligned<'tcx, L>(
+ tcx: TyCtxt<'tcx>,
+ local_decls: &L,
+ param_env: ty::ParamEnv<'tcx>,
+ place: Place<'tcx>,
+) -> bool
+where
+ L: HasLocalDecls<'tcx>,
+{
+ debug!("is_disaligned({:?})", place);
+ let Some(pack) = is_within_packed(tcx, local_decls, place) else {
+ debug!("is_disaligned({:?}) - not within packed", place);
+ return false;
+ };
+
+ let ty = place.ty(local_decls, tcx).ty;
+ match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) if layout.align.abi <= pack => {
+ // If the packed alignment is greater or equal to the field alignment, the type won't be
+ // further disaligned.
+ debug!(
+ "is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
+ place,
+ layout.align.abi.bytes(),
+ pack.bytes()
+ );
+ false
+ }
+ _ => {
+ debug!("is_disaligned({:?}) - true", place);
+ true
+ }
+ }
+}
+
+fn is_within_packed<'tcx, L>(
+ tcx: TyCtxt<'tcx>,
+ local_decls: &L,
+ place: Place<'tcx>,
+) -> Option<Align>
+where
+ L: HasLocalDecls<'tcx>,
+{
+ place
+ .iter_projections()
+ .rev()
+ // Stop at `Deref`; standard ABI alignment applies there.
+ .take_while(|(_base, elem)| !matches!(elem, ProjectionElem::Deref))
+ // Consider the packed alignments at play here...
+ .filter_map(|(base, _elem)| {
+ base.ty(local_decls, tcx).ty.ty_adt_def().and_then(|adt| adt.repr().pack)
+ })
+ // ... and compute their minimum.
+ // The overall smallest alignment is what matters.
+ .min()
+}
diff --git a/compiler/rustc_const_eval/src/util/call_kind.rs b/compiler/rustc_const_eval/src/util/call_kind.rs
new file mode 100644
index 000000000..af9d83f06
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/call_kind.rs
@@ -0,0 +1,146 @@
+//! Common logic for borrowck use-after-move errors when moved into a `fn(self)`,
+//! as well as errors when attempting to call a non-const function in a const
+//! context.
+
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItemGroup;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, AssocItemContainer, DefIdTree, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_span::symbol::Ident;
+use rustc_span::{sym, DesugaringKind, Span};
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum CallDesugaringKind {
+ /// for _ in x {} calls x.into_iter()
+ ForLoopIntoIter,
+ /// x? calls x.branch()
+ QuestionBranch,
+ /// x? calls type_of(x)::from_residual()
+ QuestionFromResidual,
+ /// try { ..; x } calls type_of(x)::from_output(x)
+ TryBlockFromOutput,
+}
+
+impl CallDesugaringKind {
+ pub fn trait_def_id(self, tcx: TyCtxt<'_>) -> DefId {
+ match self {
+ Self::ForLoopIntoIter => tcx.get_diagnostic_item(sym::IntoIterator).unwrap(),
+ Self::QuestionBranch | Self::TryBlockFromOutput => {
+ tcx.lang_items().try_trait().unwrap()
+ }
+ Self::QuestionFromResidual => tcx.get_diagnostic_item(sym::FromResidual).unwrap(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum CallKind<'tcx> {
+ /// A normal method call of the form `receiver.foo(a, b, c)`
+ Normal {
+ self_arg: Option<Ident>,
+ desugaring: Option<(CallDesugaringKind, Ty<'tcx>)>,
+ /// Whether the self type of the method call has an `.as_ref()` method.
+ /// Used for better diagnostics.
+ is_option_or_result: bool,
+ },
+ /// A call to `Fn(..)::call(..)`, desugared from `my_closure(a, b, c)`
+ FnCall { fn_trait_id: DefId, self_ty: Ty<'tcx> },
+ /// A call to an operator trait, desugared from operator syntax (e.g. `a << b`)
+ Operator { self_arg: Option<Ident>, trait_id: DefId, self_ty: Ty<'tcx> },
+ DerefCoercion {
+ /// The `Span` of the `Target` associated type
+ /// in the `Deref` impl we are using.
+ deref_target: Span,
+ /// The type `T::Deref` we are dereferencing to
+ deref_target_ty: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ },
+}
+
+pub fn call_kind<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ method_did: DefId,
+ method_substs: SubstsRef<'tcx>,
+ fn_call_span: Span,
+ from_hir_call: bool,
+ self_arg: Option<Ident>,
+) -> CallKind<'tcx> {
+ let parent = tcx.opt_associated_item(method_did).and_then(|assoc| {
+ let container_id = assoc.container_id(tcx);
+ match assoc.container {
+ AssocItemContainer::ImplContainer => tcx.trait_id_of_impl(container_id),
+ AssocItemContainer::TraitContainer => Some(container_id),
+ }
+ });
+
+ let fn_call = parent
+ .and_then(|p| tcx.lang_items().group(LangItemGroup::Fn).iter().find(|did| **did == p));
+
+ let operator = (!from_hir_call)
+ .then(|| parent)
+ .flatten()
+ .and_then(|p| tcx.lang_items().group(LangItemGroup::Op).iter().find(|did| **did == p));
+
+ let is_deref = !from_hir_call && tcx.is_diagnostic_item(sym::deref_method, method_did);
+
+ // Check for a 'special' use of 'self' -
+ // an FnOnce call, an operator (e.g. `<<`), or a
+ // deref coercion.
+ let kind = if let Some(&trait_id) = fn_call {
+ Some(CallKind::FnCall { fn_trait_id: trait_id, self_ty: method_substs.type_at(0) })
+ } else if let Some(&trait_id) = operator {
+ Some(CallKind::Operator { self_arg, trait_id, self_ty: method_substs.type_at(0) })
+ } else if is_deref {
+ let deref_target = tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| {
+ Instance::resolve(tcx, param_env, deref_target, method_substs).transpose()
+ });
+ if let Some(Ok(instance)) = deref_target {
+ let deref_target_ty = instance.ty(tcx, param_env);
+ Some(CallKind::DerefCoercion {
+ deref_target: tcx.def_span(instance.def_id()),
+ deref_target_ty,
+ self_ty: method_substs.type_at(0),
+ })
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ kind.unwrap_or_else(|| {
+ // This isn't a 'special' use of `self`
+ debug!(?method_did, ?fn_call_span);
+ let desugaring = if Some(method_did) == tcx.lang_items().into_iter_fn()
+ && fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop)
+ {
+ Some((CallDesugaringKind::ForLoopIntoIter, method_substs.type_at(0)))
+ } else if fn_call_span.desugaring_kind() == Some(DesugaringKind::QuestionMark) {
+ if Some(method_did) == tcx.lang_items().branch_fn() {
+ Some((CallDesugaringKind::QuestionBranch, method_substs.type_at(0)))
+ } else if Some(method_did) == tcx.lang_items().from_residual_fn() {
+ Some((CallDesugaringKind::QuestionFromResidual, method_substs.type_at(0)))
+ } else {
+ None
+ }
+ } else if Some(method_did) == tcx.lang_items().from_output_fn()
+ && fn_call_span.desugaring_kind() == Some(DesugaringKind::TryBlock)
+ {
+ Some((CallDesugaringKind::TryBlockFromOutput, method_substs.type_at(0)))
+ } else {
+ None
+ };
+ let parent_did = tcx.parent(method_did);
+ let parent_self_ty = (tcx.def_kind(parent_did) == rustc_hir::def::DefKind::Impl)
+ .then_some(parent_did)
+ .and_then(|did| match tcx.type_of(did).kind() {
+ ty::Adt(def, ..) => Some(def.did()),
+ _ => None,
+ });
+ let is_option_or_result = parent_self_ty.map_or(false, |def_id| {
+ matches!(tcx.get_diagnostic_name(def_id), Some(sym::Option | sym::Result))
+ });
+ CallKind::Normal { self_arg, desugaring, is_option_or_result }
+ })
+}
diff --git a/compiler/rustc_const_eval/src/util/collect_writes.rs b/compiler/rustc_const_eval/src/util/collect_writes.rs
new file mode 100644
index 000000000..8d92bb359
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/collect_writes.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::visit::PlaceContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Body, Local, Location};
+
+pub trait FindAssignments {
+ // Finds all statements that assign directly to local (i.e., X = ...)
+ // and returns their locations.
+ fn find_assignments(&self, local: Local) -> Vec<Location>;
+}
+
+impl<'tcx> FindAssignments for Body<'tcx> {
+ fn find_assignments(&self, local: Local) -> Vec<Location> {
+ let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] };
+ visitor.visit_body(self);
+ visitor.locations
+ }
+}
+
+// The Visitor walks the MIR to return the assignment statements corresponding
+// to a Local.
+struct FindLocalAssignmentVisitor {
+ needle: Local,
+ locations: Vec<Location>,
+}
+
+impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor {
+ fn visit_local(&mut self, local: Local, place_context: PlaceContext, location: Location) {
+ if self.needle != local {
+ return;
+ }
+
+ if place_context.is_place_assignment() {
+ self.locations.push(location);
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/util/find_self_call.rs b/compiler/rustc_const_eval/src/util/find_self_call.rs
new file mode 100644
index 000000000..33ad128ee
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/find_self_call.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefId;
+
+/// Checks if the specified `local` is used as the `self` parameter of a method call
+/// in the provided `BasicBlock`. If it is, then the `DefId` of the called method is
+/// returned.
+pub fn find_self_call<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ local: Local,
+ block: BasicBlock,
+) -> Option<(DefId, SubstsRef<'tcx>)> {
+ debug!("find_self_call(local={:?}): terminator={:?}", local, &body[block].terminator);
+ if let Some(Terminator { kind: TerminatorKind::Call { func, args, .. }, .. }) =
+ &body[block].terminator
+ {
+ debug!("find_self_call: func={:?}", func);
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
+ tcx.opt_associated_item(def_id)
+ {
+ debug!("find_self_call: args={:?}", args);
+ if let [Operand::Move(self_place) | Operand::Copy(self_place), ..] = **args {
+ if self_place.as_local() == Some(local) {
+ return Some((def_id, substs));
+ }
+ }
+ }
+ }
+ }
+ }
+ None
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
new file mode 100644
index 000000000..a1876bed8
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -0,0 +1,10 @@
+pub mod aggregate;
+mod alignment;
+mod call_kind;
+pub mod collect_writes;
+mod find_self_call;
+
+pub use self::aggregate::expand_aggregate;
+pub use self::alignment::is_disaligned;
+pub use self::call_kind::{call_kind, CallDesugaringKind, CallKind};
+pub use self::find_self_call::find_self_call;