summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/const_eval
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src/const_eval')
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs252
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs395
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs82
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs527
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs163
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs475
6 files changed, 1894 insertions, 0 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
new file mode 100644
index 000000000..322bfd5ce
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -0,0 +1,252 @@
+use std::error::Error;
+use std::fmt;
+
+use rustc_errors::Diagnostic;
+use rustc_hir as hir;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
+use rustc_span::{Span, Symbol};
+
+use super::InterpCx;
+use crate::interpret::{
+ struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType, UnsupportedOpInfo,
+};
+
+/// The CTFE machine has some custom error kinds.
+#[derive(Clone, Debug)]
+pub enum ConstEvalErrKind {
+ NeedsRfc(String),
+ ConstAccessesStatic,
+ ModifiedGlobal,
+ AssertFailure(AssertKind<ConstInt>),
+ Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+ Abort(String),
+}
+
+impl MachineStopType for ConstEvalErrKind {
+ fn is_hard_err(&self) -> bool {
+ matches!(self, Self::Panic { .. })
+ }
+}
+
+// The errors become `MachineStop` with plain strings when being raised.
+// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
+// handle these.
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
+ fn into(self) -> InterpErrorInfo<'tcx> {
+ err_machine_stop!(self).into()
+ }
+}
+
+impl fmt::Display for ConstEvalErrKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::ConstEvalErrKind::*;
+ match *self {
+ NeedsRfc(ref msg) => {
+ write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
+ }
+ ConstAccessesStatic => write!(f, "constant accesses static"),
+ ModifiedGlobal => {
+ write!(f, "modifying a static's initial value from another static's initializer")
+ }
+ AssertFailure(ref msg) => write!(f, "{:?}", msg),
+ Panic { msg, line, col, file } => {
+ write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
+ }
+ Abort(ref msg) => write!(f, "{}", msg),
+ }
+ }
+}
+
+impl Error for ConstEvalErrKind {}
+
+/// When const-evaluation errors, this type is constructed with the resulting information,
+/// and then used to emit the error as a lint or hard error.
+#[derive(Debug)]
+pub struct ConstEvalErr<'tcx> {
+ pub span: Span,
+ pub error: InterpError<'tcx>,
+ pub stacktrace: Vec<FrameInfo<'tcx>>,
+}
+
+impl<'tcx> ConstEvalErr<'tcx> {
+ /// Turn an interpreter error into something to report to the user.
+ /// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
+ /// Should be called only if the error is actually going to to be reported!
+ pub fn new<'mir, M: Machine<'mir, 'tcx>>(
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ error: InterpErrorInfo<'tcx>,
+ span: Option<Span>,
+ ) -> ConstEvalErr<'tcx>
+ where
+ 'tcx: 'mir,
+ {
+ error.print_backtrace();
+ let mut stacktrace = ecx.generate_stacktrace();
+ // Filter out `requires_caller_location` frames.
+ stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*ecx.tcx));
+ // If `span` is missing, use topmost remaining frame, or else the "root" span from `ecx.tcx`.
+ let span = span.or_else(|| stacktrace.first().map(|f| f.span)).unwrap_or(ecx.tcx.span);
+ ConstEvalErr { error: error.into_kind(), stacktrace, span }
+ }
+
+ pub fn struct_error(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ decorate: impl FnOnce(&mut Diagnostic),
+ ) -> ErrorHandled {
+ self.struct_generic(tcx, message, decorate, None)
+ }
+
+ pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
+ self.struct_error(tcx, message, |_| {})
+ }
+
+ pub fn report_as_lint(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ lint_root: hir::HirId,
+ span: Option<Span>,
+ ) -> ErrorHandled {
+ self.struct_generic(
+ tcx,
+ message,
+ |lint: &mut Diagnostic| {
+ // Apply the span.
+ if let Some(span) = span {
+ let primary_spans = lint.span.primary_spans().to_vec();
+ // point at the actual error as the primary span
+ lint.replace_span_with(span);
+ // point to the `const` statement as a secondary span
+ // they don't have any label
+ for sp in primary_spans {
+ if sp != span {
+ lint.span_label(sp, "");
+ }
+ }
+ }
+ },
+ Some(lint_root),
+ )
+ }
+
+ /// Create a diagnostic for this const eval error.
+ ///
+ /// Sets the message passed in via `message` and adds span labels with detailed error
+ /// information before handing control back to `decorate` to do any final annotations,
+ /// after which the diagnostic is emitted.
+ ///
+ /// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
+ /// (Except that for some errors, we ignore all that -- see `must_error` below.)
+ #[instrument(skip(self, tcx, decorate, lint_root), level = "debug")]
+ fn struct_generic(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ decorate: impl FnOnce(&mut Diagnostic),
+ lint_root: Option<hir::HirId>,
+ ) -> ErrorHandled {
+ let finish = |err: &mut Diagnostic, span_msg: Option<String>| {
+ trace!("reporting const eval failure at {:?}", self.span);
+ if let Some(span_msg) = span_msg {
+ err.span_label(self.span, span_msg);
+ }
+ // Add some more context for select error types.
+ match self.error {
+ InterpError::Unsupported(
+ UnsupportedOpInfo::ReadPointerAsBytes
+ | UnsupportedOpInfo::PartialPointerOverwrite(_)
+ ) => {
+ err.help("this code performed an operation that depends on the underlying bytes representing a pointer");
+ err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported");
+ }
+ _ => {}
+ }
+ // Add spans for the stacktrace. Don't print a single-line backtrace though.
+ if self.stacktrace.len() > 1 {
+ // Helper closure to print duplicated lines.
+ let mut flush_last_line = |last_frame, times| {
+ if let Some((line, span)) = last_frame {
+ err.span_label(span, &line);
+ // Don't print [... additional calls ...] if the number of lines is small
+ if times < 3 {
+ for _ in 0..times {
+ err.span_label(span, &line);
+ }
+ } else {
+ err.span_label(
+ span,
+ format!("[... {} additional calls {} ...]", times, &line),
+ );
+ }
+ }
+ };
+
+ let mut last_frame = None;
+ let mut times = 0;
+ for frame_info in &self.stacktrace {
+ let frame = (frame_info.to_string(), frame_info.span);
+ if last_frame.as_ref() == Some(&frame) {
+ times += 1;
+ } else {
+ flush_last_line(last_frame, times);
+ last_frame = Some(frame);
+ times = 0;
+ }
+ }
+ flush_last_line(last_frame, times);
+ }
+ // Let the caller attach any additional information it wants.
+ decorate(err);
+ };
+
+ debug!("self.error: {:?}", self.error);
+ // Special handling for certain errors
+ match &self.error {
+ // Don't emit a new diagnostic for these errors
+ err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
+ return ErrorHandled::TooGeneric;
+ }
+ err_inval!(AlreadyReported(error_reported)) => {
+ return ErrorHandled::Reported(*error_reported);
+ }
+ err_inval!(Layout(LayoutError::SizeOverflow(_))) => {
+ // We must *always* hard error on these, even if the caller wants just a lint.
+ // The `message` makes little sense here, this is a more serious error than the
+ // caller thinks anyway.
+ // See <https://github.com/rust-lang/rust/pull/63152>.
+ let mut err = struct_error(tcx, &self.error.to_string());
+ finish(&mut err, None);
+ return ErrorHandled::Reported(err.emit());
+ }
+ _ => {}
+ };
+
+ let err_msg = self.error.to_string();
+
+ // Regular case - emit a lint.
+ if let Some(lint_root) = lint_root {
+ // Report as lint.
+ let hir_id =
+ self.stacktrace.iter().rev().find_map(|frame| frame.lint_root).unwrap_or(lint_root);
+ tcx.struct_span_lint_hir(
+ rustc_session::lint::builtin::CONST_ERR,
+ hir_id,
+ tcx.span,
+ |lint| {
+ let mut lint = lint.build(message);
+ finish(&mut lint, Some(err_msg));
+ lint.emit();
+ },
+ );
+ ErrorHandled::Linted
+ } else {
+ // Report as hard error.
+ let mut err = struct_error(tcx, message);
+ finish(&mut err, Some(err_msg));
+ ErrorHandled::Reported(err.emit())
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
new file mode 100644
index 000000000..975fb4b22
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -0,0 +1,395 @@
+use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr};
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+ intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
+ Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
+ ScalarMaybeUninit, StackPopCleanup, InterpError,
+};
+
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::mir::pretty::display_allocation;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, subst::Subst, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_target::abi::{self, Abi};
+use std::borrow::Cow;
+use std::convert::TryInto;
+
+const NOTE_ON_UNDEFINED_BEHAVIOR_ERROR: &str = "The rules on what exactly is undefined behavior aren't clear, \
+ so this check might be overzealous. Please open an issue on the rustc \
+ repository if you believe it should not be considered undefined behavior.";
+
+// Returns a pointer to where the result lives
+fn eval_body_using_ecx<'mir, 'tcx>(
+ ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+ cid: GlobalId<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
+ debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
+ let tcx = *ecx.tcx;
+ assert!(
+ cid.promoted.is_some()
+ || matches!(
+ ecx.tcx.def_kind(cid.instance.def_id()),
+ DefKind::Const
+ | DefKind::Static(_)
+ | DefKind::ConstParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::AssocConst
+ ),
+ "Unexpected DefKind: {:?}",
+ ecx.tcx.def_kind(cid.instance.def_id())
+ );
+ let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
+ assert!(!layout.is_unsized());
+ let ret = ecx.allocate(layout, MemoryKind::Stack)?;
+
+ trace!(
+ "eval_body_using_ecx: pushing stack frame for global: {}{}",
+ with_no_trimmed_paths!(ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()))),
+ cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
+ );
+
+ ecx.push_stack_frame(
+ cid.instance,
+ body,
+ &ret.into(),
+ StackPopCleanup::Root { cleanup: false },
+ )?;
+
+ // The main interpreter loop.
+ ecx.run()?;
+
+ // Intern the result
+ let intern_kind = if cid.promoted.is_some() {
+ InternKind::Promoted
+ } else {
+ match tcx.static_mutability(cid.instance.def_id()) {
+ Some(m) => InternKind::Static(m),
+ None => InternKind::Constant,
+ }
+ };
+ intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+
+ debug!("eval_body_using_ecx done: {:?}", *ret);
+ Ok(ret)
+}
+
+/// The `InterpCx` is only meant to be used to do field and index projections into constants for
+/// `simd_shuffle` and const patterns in match arms.
+///
+/// The function containing the `match` that is currently being analyzed may have generic bounds
+/// that inform us about the generic bounds of the constant. E.g., using an associated constant
+/// of a function's generic parameter will require knowledge about the bounds on the generic
+/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
+pub(super) fn mk_eval_cx<'mir, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ root_span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ can_access_statics: bool,
+) -> CompileTimeEvalContext<'mir, 'tcx> {
+ debug!("mk_eval_cx: {:?}", param_env);
+ InterpCx::new(
+ tcx,
+ root_span,
+ param_env,
+ CompileTimeInterpreter::new(tcx.const_eval_limit(), can_access_statics),
+ )
+}
+
+/// This function converts an interpreter value into a constant that is meant for use in the
+/// type system.
+#[instrument(skip(ecx), level = "debug")]
+pub(super) fn op_to_const<'tcx>(
+ ecx: &CompileTimeEvalContext<'_, 'tcx>,
+ op: &OpTy<'tcx>,
+) -> ConstValue<'tcx> {
+ // We do not have value optimizations for everything.
+ // Only scalars and slices, since they are very common.
+ // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
+ // from scalar unions that are initialized with one of their zero sized variants. We could
+ // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
+ // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
+ // `Undef` situation.
+ let try_as_immediate = match op.layout.abi {
+ Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
+ Abi::ScalarPair(..) => match op.layout.ty.kind() {
+ ty::Ref(_, inner, _) => match *inner.kind() {
+ ty::Slice(elem) => elem == ecx.tcx.types.u8,
+ ty::Str => true,
+ _ => false,
+ },
+ _ => false,
+ },
+ _ => false,
+ };
+ let immediate = if try_as_immediate {
+ Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
+ } else {
+ // It is guaranteed that any non-slice scalar pair is actually ByRef here.
+ // When we come back from raw const eval, we are always by-ref. The only way our op here is
+ // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we
+ // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
+ // structs containing such.
+ op.try_as_mplace()
+ };
+
+ debug!(?immediate);
+
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let to_const_value = |mplace: &MPlaceTy<'_>| {
+ debug!("to_const_value(mplace: {:?})", mplace);
+ match mplace.ptr.into_parts() {
+ (Some(alloc_id), offset) => {
+ let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+ ConstValue::ByRef { alloc, offset }
+ }
+ (None, offset) => {
+ assert!(mplace.layout.is_zst());
+ assert_eq!(
+ offset.bytes() % mplace.layout.align.abi.bytes(),
+ 0,
+ "this MPlaceTy must come from a validated constant, thus we can assume the \
+ alignment is correct",
+ );
+ ConstValue::ZeroSized
+ }
+ }
+ };
+ match immediate {
+ Ok(ref mplace) => to_const_value(mplace),
+ // see comment on `let try_as_immediate` above
+ Err(imm) => match *imm {
+ _ if imm.layout.is_zst() => ConstValue::ZeroSized,
+ Immediate::Scalar(x) => match x {
+ ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
+ ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
+ },
+ Immediate::ScalarPair(a, b) => {
+ debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (data, start) = match a.to_pointer(ecx).unwrap().into_parts() {
+ (Some(alloc_id), offset) => {
+ (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
+ }
+ (None, _offset) => (
+ ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
+ b"" as &[u8],
+ )),
+ 0,
+ ),
+ };
+ let len = b.to_machine_usize(ecx).unwrap();
+ let start = start.try_into().unwrap();
+ let len: usize = len.try_into().unwrap();
+ ConstValue::Slice { data, start, end: start + len }
+ }
+ Immediate::Uninit => to_const_value(&op.assert_mem_place()),
+ },
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn turn_into_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ constant: ConstAlloc<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ConstValue<'tcx> {
+ let cid = key.value;
+ let def_id = cid.instance.def.def_id();
+ let is_static = tcx.is_static(def_id);
+ let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
+
+ let mplace = ecx.raw_const_to_mplace(constant).expect(
+ "can only fail if layout computation failed, \
+ which should have given a good error before ever invoking this function",
+ );
+ assert!(
+ !is_static || cid.promoted.is_some(),
+ "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
+ );
+
+ // Turn this into a proper constant.
+ let const_val = op_to_const(&ecx, &mplace.into());
+ debug!(?const_val);
+
+ const_val
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_const_value_raw_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
+ assert!(key.param_env.is_const());
+ // see comment in eval_to_allocation_raw_provider for what we're doing here
+ if key.param_env.reveal() == Reveal::All {
+ let mut key = key;
+ key.param_env = key.param_env.with_user_facing();
+ match tcx.eval_to_const_value_raw(key) {
+ // try again with reveal all as requested
+ Err(ErrorHandled::TooGeneric) => {}
+ // deduplicate calls
+ other => return other,
+ }
+ }
+
+ // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
+ // Catch such calls and evaluate them instead of trying to load a constant's MIR.
+ if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
+ let ty = key.value.instance.ty(tcx, key.param_env);
+ let ty::FnDef(_, substs) = ty.kind() else {
+ bug!("intrinsic with type {:?}", ty);
+ };
+ return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+ let span = tcx.def_span(def_id);
+ let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
+ error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
+ });
+ }
+
+ tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_allocation_raw_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
+ assert!(key.param_env.is_const());
+ // Because the constant is computed twice (once per value of `Reveal`), we are at risk of
+ // reporting the same error twice here. To resolve this, we check whether we can evaluate the
+ // constant in the more restrictive `Reveal::UserFacing`, which most likely already was
+ // computed. For a large percentage of constants that will already have succeeded. Only
+ // associated constants of generic functions will fail due to not enough monomorphization
+ // information being available.
+
+ // In case we fail in the `UserFacing` variant, we just do the real computation.
+ if key.param_env.reveal() == Reveal::All {
+ let mut key = key;
+ key.param_env = key.param_env.with_user_facing();
+ match tcx.eval_to_allocation_raw(key) {
+ // try again with reveal all as requested
+ Err(ErrorHandled::TooGeneric) => {}
+ // deduplicate calls
+ other => return other,
+ }
+ }
+ if cfg!(debug_assertions) {
+ // Make sure we format the instance even if we do not print it.
+ // This serves as a regression test against an ICE on printing.
+ // The next two lines concatenated contain some discussion:
+ // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
+ // subject/anon_const_instance_printing/near/135980032
+ let instance = with_no_trimmed_paths!(key.value.instance.to_string());
+ trace!("const eval: {:?} ({})", key, instance);
+ }
+
+ let cid = key.value;
+ let def = cid.instance.def.with_opt_param();
+ let is_static = tcx.is_static(def.did);
+
+ let mut ecx = InterpCx::new(
+ tcx,
+ tcx.def_span(def.did),
+ key.param_env,
+ // Statics (and promoteds inside statics) may access other statics, because unlike consts
+ // they do not have to behave "as if" they were evaluated at runtime.
+ CompileTimeInterpreter::new(tcx.const_eval_limit(), /*can_access_statics:*/ is_static),
+ );
+
+ let res = ecx.load_mir(cid.instance.def, cid.promoted);
+ match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
+ Err(error) => {
+ let err = ConstEvalErr::new(&ecx, error, None);
+ // Some CTFE errors raise just a lint, not a hard error; see
+ // <https://github.com/rust-lang/rust/issues/71800>.
+ let is_hard_err = if let Some(def) = def.as_local() {
+ // (Associated) consts only emit a lint, since they might be unused.
+ !matches!(tcx.def_kind(def.did.to_def_id()), DefKind::Const | DefKind::AssocConst)
+ // check if the inner InterpError is hard
+ || err.error.is_hard_err()
+ } else {
+ // use of broken constant from other crate: always an error
+ true
+ };
+
+ if is_hard_err {
+ let msg = if is_static {
+ Cow::from("could not evaluate static initializer")
+ } else {
+ // If the current item has generics, we'd like to enrich the message with the
+ // instance and its substs: to show the actual compile-time values, in addition to
+ // the expression, leading to the const eval error.
+ let instance = &key.value.instance;
+ if !instance.substs.is_empty() {
+ let instance = with_no_trimmed_paths!(instance.to_string());
+ let msg = format!("evaluation of `{}` failed", instance);
+ Cow::from(msg)
+ } else {
+ Cow::from("evaluation of constant value failed")
+ }
+ };
+
+ Err(err.report_as_error(ecx.tcx.at(err.span), &msg))
+ } else {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.as_local().unwrap().did);
+ Err(err.report_as_lint(
+ tcx.at(tcx.def_span(def.did)),
+ "any use of this value will cause an error",
+ hir_id,
+ Some(err.span),
+ ))
+ }
+ }
+ Ok(mplace) => {
+ // Since evaluation had no errors, validate the resulting constant.
+ // This is a separate `try` block to provide more targeted error reporting.
+ let validation = try {
+ let mut ref_tracking = RefTracking::new(mplace);
+ let mut inner = false;
+ while let Some((mplace, path)) = ref_tracking.todo.pop() {
+ let mode = match tcx.static_mutability(cid.instance.def_id()) {
+ Some(_) if cid.promoted.is_some() => {
+ // Promoteds in statics are allowed to point to statics.
+ CtfeValidationMode::Const { inner, allow_static_ptrs: true }
+ }
+ Some(_) => CtfeValidationMode::Regular, // a `static`
+ None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
+ };
+ ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
+ inner = true;
+ }
+ };
+ let alloc_id = mplace.ptr.provenance.unwrap();
+ if let Err(error) = validation {
+ // Validation failed, report an error. This is always a hard error.
+ let err = ConstEvalErr::new(&ecx, error, None);
+ Err(err.struct_error(
+ ecx.tcx,
+ "it is undefined behavior to use this value",
+ |diag| {
+ if matches!(err.error, InterpError::UndefinedBehavior(_)) {
+ diag.note(NOTE_ON_UNDEFINED_BEHAVIOR_ERROR);
+ }
+ diag.note(&format!(
+ "the raw bytes of the constant ({}",
+ display_allocation(
+ *ecx.tcx,
+ ecx.tcx.global_alloc(alloc_id).unwrap_memory().inner()
+ )
+ ));
+ },
+ ))
+ } else {
+ // Convert to raw constant
+ Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
new file mode 100644
index 000000000..f1674d04f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -0,0 +1,82 @@
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{DefIdTree, TyCtxt};
+use rustc_span::symbol::Symbol;
+
+/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
+ if tcx.is_const_fn_raw(def_id) {
+ let const_stab = tcx.lookup_const_stability(def_id)?;
+ if const_stab.is_const_unstable() { Some(const_stab.feature) } else { None }
+ } else {
+ None
+ }
+}
+
+pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ let parent_id = tcx.local_parent(def_id);
+ tcx.def_kind(parent_id) == DefKind::Impl && tcx.constness(parent_id) == hir::Constness::Const
+}
+
+/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
+/// it is a trait impl/function, return if it has a `const` modifier. If it is an intrinsic,
+/// report whether said intrinsic has a `rustc_const_{un,}stable` attribute. Otherwise, return
+/// `Constness::NotConst`.
+fn constness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Constness {
+ let def_id = def_id.expect_local();
+ let node = tcx.hir().get_by_def_id(def_id);
+
+ match node {
+ hir::Node::Ctor(_) => hir::Constness::Const,
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
+ hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
+ // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+ // foreign items cannot be evaluated at compile-time.
+ let is_const = if tcx.is_intrinsic(def_id) {
+ tcx.lookup_const_stability(def_id).is_some()
+ } else {
+ false
+ };
+ if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+ }
+ _ => {
+ if let Some(fn_kind) = node.fn_kind() {
+ if fn_kind.constness() == hir::Constness::Const {
+ return hir::Constness::Const;
+ }
+
+ // If the function itself is not annotated with `const`, it may still be a `const fn`
+ // if it resides in a const trait impl.
+ let is_const = is_parent_const_impl_raw(tcx, def_id);
+ if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+ } else {
+ hir::Constness::NotConst
+ }
+ }
+ }
+}
+
+fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.is_const_fn(def_id)
+ && match tcx.lookup_const_stability(def_id) {
+ Some(stab) => {
+ if cfg!(debug_assertions) && stab.promotable {
+ let sig = tcx.fn_sig(def_id);
+ assert_eq!(
+ sig.unsafety(),
+ hir::Unsafety::Normal,
+ "don't mark const unsafe fns as promotable",
+ // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
+ );
+ }
+ stab.promotable
+ }
+ None => false,
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { constness, is_promotable_const_fn, ..*providers };
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
new file mode 100644
index 000000000..fc2e6652a
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -0,0 +1,527 @@
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::hash::Hash;
+
+use rustc_data_structures::fx::FxHashMap;
+use std::fmt;
+
+use rustc_ast::Mutability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::AssertMessage;
+use rustc_session::Limit;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use crate::interpret::{
+ self, compile_time_machine, AllocId, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
+ OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
+};
+
+use super::error::*;
+
+impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
+ /// "Intercept" a function call to a panic-related function
+ /// because we have something special to do for it.
+ /// If this returns successfully (`Ok`), the function should just be evaluated normally.
+ fn hook_special_const_fn(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx>],
+ ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
+ // All `#[rustc_do_not_const_check]` functions should be hooked here.
+ let def_id = instance.def_id();
+
+ if Some(def_id) == self.tcx.lang_items().const_eval_select() {
+ // redirect to const_eval_select_ct
+ if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_eval_select,
+ instance.substs,
+ )
+ .unwrap()
+ .unwrap(),
+ ));
+ }
+ } else if Some(def_id) == self.tcx.lang_items().panic_display()
+ || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+ {
+ // &str or &&str
+ assert!(args.len() == 1);
+
+ let mut msg_place = self.deref_operand(&args[0])?;
+ while msg_place.layout.ty.is_ref() {
+ msg_place = self.deref_operand(&msg_place.into())?;
+ }
+
+ let msg = Symbol::intern(self.read_str(&msg_place)?);
+ let span = self.find_closest_untracked_caller_location();
+ let (file, line, col) = self.location_triple_for_span(span);
+ return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
+ } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
+ // For panic_fmt, call const_panic_fmt instead.
+ if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_panic_fmt,
+ self.tcx.intern_substs(&[]),
+ )
+ .unwrap()
+ .unwrap(),
+ ));
+ }
+ }
+ Ok(None)
+ }
+}
+
+/// Extra machine state for CTFE, and the Machine instance
+pub struct CompileTimeInterpreter<'mir, 'tcx> {
+ /// For now, the number of terminators that can be evaluated before we throw a resource
+ /// exhaustion error.
+ ///
+ /// Setting this to `0` disables the limit and allows the interpreter to run forever.
+ pub steps_remaining: usize,
+
+ /// The virtual call stack.
+ pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
+
+ /// We need to make sure consts never point to anything mutable, even recursively. That is
+ /// relied on for pattern matching on consts with references.
+ /// To achieve this, two pieces have to work together:
+ /// * Interning makes everything outside of statics immutable.
+ /// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
+ /// This boolean here controls the second part.
+ pub(super) can_access_statics: bool,
+}
+
+impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+ pub(crate) fn new(const_eval_limit: Limit, can_access_statics: bool) -> Self {
+ CompileTimeInterpreter {
+ steps_remaining: const_eval_limit.0,
+ stack: Vec::new(),
+ can_access_statics,
+ }
+ }
+}
+
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
+ #[inline(always)]
+ fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ {
+ FxHashMap::contains_key(self, k)
+ }
+
+ #[inline(always)]
+ fn insert(&mut self, k: K, v: V) -> Option<V> {
+ FxHashMap::insert(self, k, v)
+ }
+
+ #[inline(always)]
+ fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>,
+ {
+ FxHashMap::remove(self, k)
+ }
+
+ #[inline(always)]
+ fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
+ self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
+ }
+
+ #[inline(always)]
+ fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
+ match self.get(&k) {
+ Some(v) => Ok(v),
+ None => {
+ vacant()?;
+ bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
+ match self.entry(k) {
+ Entry::Occupied(e) => Ok(e.into_mut()),
+ Entry::Vacant(e) => {
+ let v = vacant()?;
+ Ok(e.insert(v))
+ }
+ }
+ }
+}
+
+pub(crate) type CompileTimeEvalContext<'mir, 'tcx> =
+ InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum MemoryKind {
+ Heap,
+}
+
+impl fmt::Display for MemoryKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MemoryKind::Heap => write!(f, "heap allocation"),
+ }
+ }
+}
+
+impl interpret::MayLeak for MemoryKind {
+ #[inline(always)]
+ fn may_leak(self) -> bool {
+ match self {
+ MemoryKind::Heap => false,
+ }
+ }
+}
+
+impl interpret::MayLeak for ! {
+ #[inline(always)]
+ fn may_leak(self) -> bool {
+ // `self` is uninhabited
+ self
+ }
+}
+
+impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+ fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ Ok(match (a, b) {
+ // Comparisons between integers are always known.
+ (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
+ // Equality with integers can never be known for sure.
+ (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
+ // FIXME: return `true` for when both sides are the same pointer, *except* that
+ // some things (like functions and vtables) do not have stable addresses
+ // so we need to be careful around them (see e.g. #73722).
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ })
+ }
+
+ fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ Ok(match (a, b) {
+ // Comparisons between integers are always known.
+ (Scalar::Int(_), Scalar::Int(_)) => a != b,
+ // Comparisons of abstract pointers with null pointers are known if the pointer
+ // is in bounds, because if they are in bounds, the pointer can't be null.
+ // Inequality with integers other than null can never be known for sure.
+ (Scalar::Int(int), ptr @ Scalar::Ptr(..))
+ | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
+ int.is_null() && !self.scalar_may_be_null(ptr)?
+ }
+ // FIXME: return `true` for at least some comparisons where we can reliably
+ // determine the result of runtime inequality tests at compile-time.
+ // Examples include comparison of addresses in different static items.
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ })
+ }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
+ compile_time_machine!(<'mir, 'tcx>);
+
+ type MemoryKind = MemoryKind;
+
+ const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
+
+ fn load_mir(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ match instance {
+ ty::InstanceDef::Item(def) => {
+ if ecx.tcx.is_ctfe_mir_available(def.did) {
+ Ok(ecx.tcx.mir_for_ctfe_opt_const_arg(def))
+ } else if ecx.tcx.def_kind(def.did) == DefKind::AssocConst {
+ let guar = ecx.tcx.sess.delay_span_bug(
+ rustc_span::DUMMY_SP,
+ "This is likely a const item that is missing from its impl",
+ );
+ throw_inval!(AlreadyReported(guar));
+ } else {
+ let path = ecx.tcx.def_path_str(def.did);
+ Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
+ .into())
+ }
+ }
+ _ => Ok(ecx.tcx.instance_mir(instance)),
+ }
+ }
+
+ fn find_mir_or_eval_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ _abi: CallAbi,
+ args: &[OpTy<'tcx>],
+ _dest: &PlaceTy<'tcx>,
+ _ret: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind, // unwinding is not supported in consts
+ ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
+ debug!("find_mir_or_eval_fn: {:?}", instance);
+
+ // Only check non-glue functions
+ if let ty::InstanceDef::Item(def) = instance.def {
+ // Execution might have wandered off into other crates, so we cannot do a stability-
+ // sensitive check here. But we can at least rule out functions that are not const
+ // at all.
+ if !ecx.tcx.is_const_fn_raw(def.did) {
+ // allow calling functions inside a trait marked with #[const_trait].
+ if !ecx.tcx.is_const_default_method(def.did) {
+ // We certainly do *not* want to actually call the fn
+ // though, so be sure we return here.
+ throw_unsup_format!("calling non-const function `{}`", instance)
+ }
+ }
+
+ if let Some(new_instance) = ecx.hook_special_const_fn(instance, args)? {
+ // We call another const fn instead.
+ // However, we return the *original* instance to make backtraces work out
+ // (and we hope this does not confuse the FnAbi checks too much).
+ return Ok(Self::find_mir_or_eval_fn(
+ ecx,
+ new_instance,
+ _abi,
+ args,
+ _dest,
+ _ret,
+ _unwind,
+ )?
+ .map(|(body, _instance)| (body, instance)));
+ }
+ }
+ // This is a const fn. Call it.
+ Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
+ }
+
+ fn call_intrinsic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ // Shared intrinsics.
+ if ecx.emulate_intrinsic(instance, args, dest, target)? {
+ return Ok(());
+ }
+ let intrinsic_name = ecx.tcx.item_name(instance.def_id());
+
+ // CTFE-specific intrinsics.
+ let Some(ret) = target else {
+ return Err(ConstEvalErrKind::NeedsRfc(format!(
+ "calling intrinsic `{}`",
+ intrinsic_name
+ ))
+ .into());
+ };
+ match intrinsic_name {
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ let a = ecx.read_immediate(&args[0])?.to_scalar()?;
+ let b = ecx.read_immediate(&args[1])?.to_scalar()?;
+ let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
+ ecx.guaranteed_eq(a, b)?
+ } else {
+ ecx.guaranteed_ne(a, b)?
+ };
+ ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
+ }
+ sym::const_allocate => {
+ let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+
+ let align = match Align::from_bytes(align) {
+ Ok(a) => a,
+ Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+ };
+
+ let ptr = ecx.allocate_ptr(
+ Size::from_bytes(size as u64),
+ align,
+ interpret::MemoryKind::Machine(MemoryKind::Heap),
+ )?;
+ ecx.write_pointer(ptr, dest)?;
+ }
+ sym::const_deallocate => {
+ let ptr = ecx.read_pointer(&args[0])?;
+ let size = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[2])?.to_machine_usize(ecx)?;
+
+ let size = Size::from_bytes(size);
+ let align = match Align::from_bytes(align) {
+ Ok(a) => a,
+ Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+ };
+
+ // If an allocation is created in an another const,
+ // we don't deallocate it.
+ let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr)?;
+ let is_allocated_in_another_const = matches!(
+ ecx.tcx.try_get_global_alloc(alloc_id),
+ Some(interpret::GlobalAlloc::Memory(_))
+ );
+
+ if !is_allocated_in_another_const {
+ ecx.deallocate_ptr(
+ ptr,
+ Some((size, align)),
+ interpret::MemoryKind::Machine(MemoryKind::Heap),
+ )?;
+ }
+ }
+ _ => {
+ return Err(ConstEvalErrKind::NeedsRfc(format!(
+ "calling intrinsic `{}`",
+ intrinsic_name
+ ))
+ .into());
+ }
+ }
+
+ ecx.go_to_block(ret);
+ Ok(())
+ }
+
+ fn assert_panic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ msg: &AssertMessage<'tcx>,
+ _unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::AssertKind::*;
+ // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
+ let eval_to_int =
+ |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+ let err = match msg {
+ BoundsCheck { ref len, ref index } => {
+ let len = eval_to_int(len)?;
+ let index = eval_to_int(index)?;
+ BoundsCheck { len, index }
+ }
+ Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
+ OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
+ DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
+ RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
+ ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
+ ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
+ };
+ Err(ConstEvalErrKind::AssertFailure(err).into())
+ }
+
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
+ Err(ConstEvalErrKind::Abort(msg).into())
+ }
+
+ fn binary_ptr_op(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _bin_op: mir::BinOp,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
+ }
+
+ fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ // The step limit has already been hit in a previous call to `before_terminator`.
+ if ecx.machine.steps_remaining == 0 {
+ return Ok(());
+ }
+
+ ecx.machine.steps_remaining -= 1;
+ if ecx.machine.steps_remaining == 0 {
+ throw_exhaust!(StepLimitReached)
+ }
+
+ Ok(())
+ }
+
+ #[inline(always)]
+ fn expose_ptr(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _ptr: Pointer<AllocId>,
+ ) -> InterpResult<'tcx> {
+ Err(ConstEvalErrKind::NeedsRfc("exposing pointers".to_string()).into())
+ }
+
+ #[inline(always)]
+ fn init_frame_extra(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+ // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
+ if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
+ throw_exhaust!(StackFrameLimitReached)
+ } else {
+ Ok(frame)
+ }
+ }
+
+ #[inline(always)]
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+ &ecx.machine.stack
+ }
+
+ #[inline(always)]
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+ &mut ecx.machine.stack
+ }
+
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ machine: &Self,
+ alloc_id: AllocId,
+ alloc: ConstAllocation<'tcx>,
+ static_def_id: Option<DefId>,
+ is_write: bool,
+ ) -> InterpResult<'tcx> {
+ let alloc = alloc.inner();
+ if is_write {
+ // Write access. These are never allowed, but we give a targeted error message.
+ if alloc.mutability == Mutability::Not {
+ Err(err_ub!(WriteToReadOnly(alloc_id)).into())
+ } else {
+ Err(ConstEvalErrKind::ModifiedGlobal.into())
+ }
+ } else {
+ // Read access. These are usually allowed, with some exceptions.
+ if machine.can_access_statics {
+ // Machine configuration allows us read from anything (e.g., `static` initializer).
+ Ok(())
+ } else if static_def_id.is_some() {
+ // Machine configuration does not allow us to read statics
+ // (e.g., `const` initializer).
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // this check is so important: if we could read statics, we could read pointers
+ // to mutable allocations *inside* statics. These allocations are not themselves
+ // statics, so pointers to them can get around the check in `validity.rs`.
+ Err(ConstEvalErrKind::ConstAccessesStatic.into())
+ } else {
+ // Immutable global, this read is fine.
+ // But make sure we never accept a read from something mutable, that would be
+ // unsound. The reason is that as the content of this allocation may be different
+ // now and at run-time, so if we permit reading now we might return the wrong value.
+ assert_eq!(alloc.mutability, Mutability::Not);
+ Ok(())
+ }
+ }
+ }
+}
+
+// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
+// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
+// at the bottom of this file.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
new file mode 100644
index 000000000..948c33494
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -0,0 +1,163 @@
+// Not in interpret to make sure we do not use private implementation details
+
+use rustc_hir::Mutability;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
+
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta,
+ Scalar,
+};
+
+mod error;
+mod eval_queries;
+mod fn_queries;
+mod machine;
+mod valtrees;
+
+pub use error::*;
+pub use eval_queries::*;
+pub use fn_queries::*;
+pub use machine::*;
+pub(crate) use valtrees::{const_to_valtree_inner, valtree_to_const_value};
+
+pub(crate) fn const_caller_location(
+ tcx: TyCtxt<'_>,
+ (file, line, col): (Symbol, u32, u32),
+) -> ConstValue<'_> {
+ trace!("const_caller_location: {}:{}:{}", file, line, col);
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
+
+ let loc_place = ecx.alloc_caller_location(file, line, col);
+ if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
+ bug!("intern_const_alloc_recursive should not error in this case")
+ }
+ ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx))
+}
+
+// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
+const VALTREE_MAX_NODES: usize = 100000;
+
+pub(crate) enum ValTreeCreationError {
+ NodesOverflow,
+ NonSupportedType,
+ Other,
+}
+pub(crate) type ValTreeCreationResult<'tcx> = Result<ty::ValTree<'tcx>, ValTreeCreationError>;
+
+/// Evaluates a constant and turns it into a type-level constant value.
+pub(crate) fn eval_to_valtree<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+) -> EvalToValTreeResult<'tcx> {
+ let const_alloc = tcx.eval_to_allocation_raw(param_env.and(cid))?;
+
+ // FIXME Need to provide a span to `eval_to_valtree`
+ let ecx = mk_eval_cx(
+ tcx, DUMMY_SP, param_env,
+ // It is absolutely crucial for soundness that
+ // we do not read from static items or other mutable memory.
+ false,
+ );
+ let place = ecx.raw_const_to_mplace(const_alloc).unwrap();
+ debug!(?place);
+
+ let mut num_nodes = 0;
+ let valtree_result = const_to_valtree_inner(&ecx, &place, &mut num_nodes);
+
+ match valtree_result {
+ Ok(valtree) => Ok(Some(valtree)),
+ Err(err) => {
+ let did = cid.instance.def_id();
+ let s = cid.display(tcx);
+ match err {
+ ValTreeCreationError::NodesOverflow => {
+ let msg = format!("maximum number of nodes exceeded in constant {}", &s);
+ let mut diag = match tcx.hir().span_if_local(did) {
+ Some(span) => tcx.sess.struct_span_err(span, &msg),
+ None => tcx.sess.struct_err(&msg),
+ };
+ diag.emit();
+
+ Ok(None)
+ }
+ ValTreeCreationError::NonSupportedType | ValTreeCreationError::Other => Ok(None),
+ }
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn try_destructure_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ val: mir::ConstantKind<'tcx>,
+) -> InterpResult<'tcx, mir::DestructuredMirConstant<'tcx>> {
+ trace!("destructure_mir_constant: {:?}", val);
+ let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+ let op = ecx.mir_const_to_op(&val, None)?;
+
+ // We go to `usize` as we cannot allocate anything bigger anyway.
+ let (field_count, variant, down) = match val.ty().kind() {
+ ty::Array(_, len) => (len.eval_usize(tcx, param_env) as usize, None, op),
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ throw_ub!(Unreachable)
+ }
+ ty::Adt(def, _) => {
+ let variant = ecx.read_discriminant(&op)?.1;
+ let down = ecx.operand_downcast(&op, variant)?;
+ (def.variants()[variant].fields.len(), Some(variant), down)
+ }
+ ty::Tuple(substs) => (substs.len(), None, op),
+ _ => bug!("cannot destructure mir constant {:?}", val),
+ };
+
+ let fields_iter = (0..field_count)
+ .map(|i| {
+ let field_op = ecx.operand_field(&down, i)?;
+ let val = op_to_const(&ecx, &field_op);
+ Ok(mir::ConstantKind::Val(val, field_op.layout.ty))
+ })
+ .collect::<InterpResult<'tcx, Vec<_>>>()?;
+ let fields = tcx.arena.alloc_from_iter(fields_iter);
+
+ Ok(mir::DestructuredMirConstant { variant, fields })
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn deref_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ val: mir::ConstantKind<'tcx>,
+) -> mir::ConstantKind<'tcx> {
+ let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+ let op = ecx.mir_const_to_op(&val, None).unwrap();
+ let mplace = ecx.deref_operand(&op).unwrap();
+ if let Some(alloc_id) = mplace.ptr.provenance {
+ assert_eq!(
+ tcx.global_alloc(alloc_id).unwrap_memory().0.0.mutability,
+ Mutability::Not,
+ "deref_mir_constant cannot be used with mutable allocations as \
+ that could allow pattern matching to observe mutable statics",
+ );
+ }
+
+ let ty = match mplace.meta {
+ MemPlaceMeta::None => mplace.layout.ty,
+ // In case of unsized types, figure out the real type behind.
+ MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
+ ty::Str => bug!("there's no sized equivalent of a `str`"),
+ ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
+ _ => bug!(
+ "type {} should not have metadata, but had {:?}",
+ mplace.layout.ty,
+ mplace.meta
+ ),
+ },
+ };
+
+ mir::ConstantKind::Val(op_to_const(&ecx, &mplace.into()), ty)
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
new file mode 100644
index 000000000..8fff4571d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -0,0 +1,475 @@
+use super::eval_queries::{mk_eval_cx, op_to_const};
+use super::machine::CompileTimeEvalContext;
+use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
+ MemoryKind, PlaceTy, Scalar, ScalarMaybeUninit,
+};
+use crate::interpret::{MPlaceTy, Value};
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_span::source_map::DUMMY_SP;
+use rustc_target::abi::{Align, VariantIdx};
+
+#[instrument(skip(ecx), level = "debug")]
+fn branches<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ n: usize,
+ variant: Option<VariantIdx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let place = match variant {
+ Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
+ None => *place,
+ };
+ let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
+ debug!(?place, ?variant);
+
+ let mut fields = Vec::with_capacity(n);
+ for i in 0..n {
+ let field = ecx.mplace_field(&place, i).unwrap();
+ let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
+ fields.push(Some(valtree));
+ }
+
+ // For enums, we prepend their variant index before the variant's fields so we can figure out
+ // the variant again when just seeing a valtree.
+ let branches = variant
+ .into_iter()
+ .chain(fields.into_iter())
+ .collect::<Option<Vec<_>>>()
+ .expect("should have already checked for errors in ValTree creation");
+
+ // Have to account for ZSTs here
+ if branches.len() == 0 {
+ *num_nodes += 1;
+ }
+
+ Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn slice_branches<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let n = place
+ .len(&ecx.tcx.tcx)
+ .unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
+
+ let mut elems = Vec::with_capacity(n as usize);
+ for i in 0..n {
+ let place_elem = ecx.mplace_index(place, i).unwrap();
+ let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
+ elems.push(valtree);
+ }
+
+ Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(elems)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+pub(crate) fn const_to_valtree_inner<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let ty = place.layout.ty;
+ debug!("ty kind: {:?}", ty.kind());
+
+ if *num_nodes >= VALTREE_MAX_NODES {
+ return Err(ValTreeCreationError::NodesOverflow);
+ }
+
+ match ty.kind() {
+ ty::FnDef(..) => {
+ *num_nodes += 1;
+ Ok(ty::ValTree::zst())
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let Ok(val) = ecx.read_immediate(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ let val = val.to_scalar().unwrap();
+ *num_nodes += 1;
+
+ Ok(ty::ValTree::Leaf(val.assert_int()))
+ }
+
+ // Raw pointers are not allowed in type level constants, as we cannot properly test them for
+ // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
+ // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
+ // agree with runtime equality tests.
+ ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
+
+ ty::Ref(_, _, _) => {
+ let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ debug!(?derefd_place);
+
+ const_to_valtree_inner(ecx, &derefd_place, num_nodes)
+ }
+
+ ty::Str | ty::Slice(_) | ty::Array(_, _) => {
+ slice_branches(ecx, place, num_nodes)
+ }
+ // Trait objects are not allowed in type level constants, as we have no concept for
+ // resolving their backing type, even if we can do that at const eval time. We may
+ // hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
+ // but it is unclear if this is useful.
+ ty::Dynamic(..) => Err(ValTreeCreationError::NonSupportedType),
+
+ ty::Tuple(elem_tys) => {
+ branches(ecx, place, elem_tys.len(), None, num_nodes)
+ }
+
+ ty::Adt(def, _) => {
+ if def.is_union() {
+ return Err(ValTreeCreationError::NonSupportedType);
+ } else if def.variants().is_empty() {
+ bug!("uninhabited types should have errored and never gotten converted to valtree")
+ }
+
+ let Ok((_, variant)) = ecx.read_discriminant(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
+ }
+
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ // FIXME(oli-obk): we could look behind opaque types
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ // FIXME(oli-obk): we can probably encode closures just like structs
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+ }
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn create_mplace_from_layout<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+) -> MPlaceTy<'tcx> {
+ let tcx = ecx.tcx;
+ let param_env = ecx.param_env;
+ let layout = tcx.layout_of(param_env.and(ty)).unwrap();
+ debug!(?layout);
+
+ ecx.allocate(layout, MemoryKind::Stack).unwrap()
+}
+
+// Walks custom DSTs and gets the type of the unsized field and the number of elements
+// in the unsized field.
+fn get_info_on_unsized_field<'tcx>(
+ ty: Ty<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+ tcx: TyCtxt<'tcx>,
+) -> (Ty<'tcx>, usize) {
+ let mut last_valtree = valtree;
+ let tail = tcx.struct_tail_with_normalize(
+ ty,
+ |ty| ty,
+ || {
+ let branches = last_valtree.unwrap_branch();
+ last_valtree = branches[branches.len() - 1];
+ debug!(?branches, ?last_valtree);
+ },
+ );
+ let unsized_inner_ty = match tail.kind() {
+ ty::Slice(t) => *t,
+ ty::Str => tail,
+ _ => bug!("expected Slice or Str"),
+ };
+
+ // Have to adjust type for ty::Str
+ let unsized_inner_ty = match unsized_inner_ty.kind() {
+ ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ _ => unsized_inner_ty,
+ };
+
+ // Get the number of elements in the unsized field
+ let num_elems = last_valtree.unwrap_branch().len();
+
+ (unsized_inner_ty, num_elems)
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn create_pointee_place<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+) -> MPlaceTy<'tcx> {
+ let tcx = ecx.tcx.tcx;
+
+ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) {
+ // We need to create `Allocation`s for custom DSTs
+
+ let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
+ let unsized_inner_ty = match unsized_inner_ty.kind() {
+ ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ _ => unsized_inner_ty,
+ };
+ let unsized_inner_ty_size =
+ tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size();
+ debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems);
+
+ // for custom DSTs only the last field/element is unsized, but we need to also allocate
+ // space for the other fields/elements
+ let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap();
+ let size_of_sized_part = layout.layout.size();
+
+ // Get the size of the memory behind the DST
+ let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap();
+
+ let size = size_of_sized_part.checked_add(dst_size, &tcx).unwrap();
+ let align = Align::from_bytes(size.bytes().next_power_of_two()).unwrap();
+ let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap();
+ debug!(?ptr);
+
+ let place = MPlaceTy::from_aligned_ptr_with_meta(
+ ptr.into(),
+ layout,
+ MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)),
+ );
+ debug!(?place);
+
+ place
+ } else {
+ create_mplace_from_layout(ecx, ty)
+ }
+}
+
+/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
+/// construction has finished.
+// FIXME Merge `valtree_to_const_value` and `valtree_into_mplace` into one function
+#[instrument(skip(tcx), level = "debug")]
+pub fn valtree_to_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ valtree: ty::ValTree<'tcx>,
+) -> ConstValue<'tcx> {
+ // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
+ // (those for constants with type bool, int, uint, float or char).
+ // For all other types we create an `MPlace` and fill that by walking
+ // the `ValTree` and using `place_projection` and `place_field` to
+ // create inner `MPlace`s which are filled recursively.
+ // FIXME Does this need an example?
+
+ let (param_env, ty) = param_env_ty.into_parts();
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+
+ match ty.kind() {
+ ty::FnDef(..) => {
+ assert!(valtree.unwrap_branch().is_empty());
+ ConstValue::ZeroSized
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
+ ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => bug!(
+ "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
+ ),
+ },
+ ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
+ let mut place = match ty.kind() {
+ ty::Ref(_, inner_ty, _) => {
+ // Need to create a place for the pointee to fill for Refs
+ create_pointee_place(&mut ecx, *inner_ty, valtree)
+ }
+ _ => create_mplace_from_layout(&mut ecx, ty),
+ };
+ debug!(?place);
+
+ valtree_into_mplace(&mut ecx, &mut place, valtree);
+ dump_place(&ecx, place.into());
+ intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
+
+ let const_val = match ty.kind() {
+ ty::Ref(_, _, _) => {
+ let ref_place = place.to_ref(&tcx);
+ let imm =
+ ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap());
+
+ op_to_const(&ecx, &imm.into())
+ }
+ _ => op_to_const(&ecx, &place.into()),
+ };
+ debug!(?const_val);
+
+ const_val
+ }
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(_)
+ | ty::Str
+ | ty::Slice(_)
+ | ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()),
+ }
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn valtree_into_mplace<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &mut MPlaceTy<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+) {
+ // This will match on valtree and write the value(s) corresponding to the ValTree
+ // inside the place recursively.
+
+ let tcx = ecx.tcx.tcx;
+ let ty = place.layout.ty;
+
+ match ty.kind() {
+ ty::FnDef(_, _) => {
+ ecx.write_immediate(Immediate::Uninit, &place.into()).unwrap();
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let scalar_int = valtree.unwrap_leaf();
+ debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
+ ecx.write_immediate(
+ Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())),
+ &place.into(),
+ )
+ .unwrap();
+ }
+ ty::Ref(_, inner_ty, _) => {
+ let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
+ debug!(?pointee_place);
+
+ valtree_into_mplace(ecx, &mut pointee_place, valtree);
+ dump_place(ecx, pointee_place.into());
+ intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
+
+ let imm = match inner_ty.kind() {
+ ty::Slice(_) | ty::Str => {
+ let len = valtree.unwrap_branch().len();
+ let len_scalar =
+ ScalarMaybeUninit::Scalar(Scalar::from_machine_usize(len as u64, &tcx));
+
+ Immediate::ScalarPair(
+ ScalarMaybeUninit::from_maybe_pointer((*pointee_place).ptr, &tcx),
+ len_scalar,
+ )
+ }
+ _ => pointee_place.to_ref(&tcx),
+ };
+ debug!(?imm);
+
+ ecx.write_immediate(imm, &place.into()).unwrap();
+ }
+ ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
+ let branches = valtree.unwrap_branch();
+
+ // Need to downcast place for enums
+ let (place_adjusted, branches, variant_idx) = match ty.kind() {
+ ty::Adt(def, _) if def.is_enum() => {
+ // First element of valtree corresponds to variant
+ let scalar_int = branches[0].unwrap_leaf();
+ let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
+ let variant = def.variant(variant_idx);
+ debug!(?variant);
+
+ (
+ place.project_downcast(ecx, variant_idx).unwrap(),
+ &branches[1..],
+ Some(variant_idx),
+ )
+ }
+ _ => (*place, branches, None),
+ };
+ debug!(?place_adjusted, ?branches);
+
+ // Create the places (by indexing into `place`) for the fields and fill
+ // them recursively
+ for (i, inner_valtree) in branches.iter().enumerate() {
+ debug!(?i, ?inner_valtree);
+
+ let mut place_inner = match ty.kind() {
+ ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
+ _ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty())
+ && i == branches.len() - 1 =>
+ {
+ // Note: For custom DSTs we need to manually process the last unsized field.
+ // We created a `Pointer` for the `Allocation` of the complete sized version of
+ // the Adt in `create_pointee_place` and now we fill that `Allocation` with the
+ // values in the ValTree. For the unsized field we have to additionally add the meta
+ // data.
+
+ let (unsized_inner_ty, num_elems) =
+ get_info_on_unsized_field(ty, valtree, tcx);
+ debug!(?unsized_inner_ty);
+
+ let inner_ty = match ty.kind() {
+ ty::Adt(def, substs) => {
+ def.variant(VariantIdx::from_u32(0)).fields[i].ty(tcx, substs)
+ }
+ ty::Tuple(inner_tys) => inner_tys[i],
+ _ => bug!("unexpected unsized type {:?}", ty),
+ };
+
+ let inner_layout =
+ tcx.layout_of(ty::ParamEnv::empty().and(inner_ty)).unwrap();
+ debug!(?inner_layout);
+
+ let offset = place_adjusted.layout.fields.offset(i);
+ place
+ .offset_with_meta(
+ offset,
+ MemPlaceMeta::Meta(Scalar::from_machine_usize(
+ num_elems as u64,
+ &tcx,
+ )),
+ inner_layout,
+ &tcx,
+ )
+ .unwrap()
+ }
+ _ => ecx.mplace_field(&place_adjusted, i).unwrap(),
+ };
+
+ debug!(?place_inner);
+ valtree_into_mplace(ecx, &mut place_inner, *inner_valtree);
+ dump_place(&ecx, place_inner.into());
+ }
+
+ debug!("dump of place_adjusted:");
+ dump_place(ecx, place_adjusted.into());
+
+ if let Some(variant_idx) = variant_idx {
+ // don't forget filling the place with the discriminant of the enum
+ ecx.write_discriminant(variant_idx, &place.into()).unwrap();
+ }
+
+ debug!("dump of place after writing discriminant:");
+ dump_place(ecx, place.into());
+ }
+ _ => bug!("shouldn't have created a ValTree for {:?}", ty),
+ }
+}
+
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
+ trace!("{:?}", ecx.dump_place(*place));
+}