diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:06:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:06:37 +0000 |
commit | 246f239d9f40f633160f0c18f87a20922d4e77bb (patch) | |
tree | 5a88572663584b3d4d28e5a20e10abab1be40884 /compiler/rustc_const_eval/src | |
parent | Releasing progress-linux version 1.64.0+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-246f239d9f40f633160f0c18f87a20922d4e77bb.tar.xz rustc-246f239d9f40f633160f0c18f87a20922d4e77bb.zip |
Merging debian version 1.65.0+dfsg1-2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
29 files changed, 861 insertions, 776 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs index 322bfd5ce..09d53331b 100644 --- a/compiler/rustc_const_eval/src/const_eval/error.rs +++ b/compiler/rustc_const_eval/src/const_eval/error.rs @@ -9,13 +9,13 @@ use rustc_span::{Span, Symbol}; use super::InterpCx; use crate::interpret::{ - struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType, UnsupportedOpInfo, + struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType, + UnsupportedOpInfo, }; /// The CTFE machine has some custom error kinds. #[derive(Clone, Debug)] pub enum ConstEvalErrKind { - NeedsRfc(String), ConstAccessesStatic, ModifiedGlobal, AssertFailure(AssertKind<ConstInt>), @@ -42,9 +42,6 @@ impl fmt::Display for ConstEvalErrKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use self::ConstEvalErrKind::*; match *self { - NeedsRfc(ref msg) => { - write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg) - } ConstAccessesStatic => write!(f, "constant accesses static"), ModifiedGlobal => { write!(f, "modifying a static's initial value from another static's initializer") @@ -158,6 +155,7 @@ impl<'tcx> ConstEvalErr<'tcx> { InterpError::Unsupported( UnsupportedOpInfo::ReadPointerAsBytes | UnsupportedOpInfo::PartialPointerOverwrite(_) + | UnsupportedOpInfo::PartialPointerCopy(_), ) => { err.help("this code performed an operation that depends on the underlying bytes representing a pointer"); err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported"); diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 975fb4b22..a2f14e753 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -2,8 +2,8 @@ use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr}; use crate::interpret::eval_nullary_intrinsic; use crate::interpret::{ intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId, - Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, - ScalarMaybeUninit, StackPopCleanup, InterpError, + Immediate, InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, + RefTracking, StackPopCleanup, }; use rustc_hir::def::DefKind; @@ -74,14 +74,16 @@ fn eval_body_using_ecx<'mir, 'tcx>( None => InternKind::Constant, } }; + ecx.machine.check_alignment = false; // interning doesn't need to respect alignment intern_const_alloc_recursive(ecx, intern_kind, &ret)?; + // we leave alignment checks off, since this `ecx` will not be used for further evaluation anyway debug!("eval_body_using_ecx done: {:?}", *ret); Ok(ret) } /// The `InterpCx` is only meant to be used to do field and index projections into constants for -/// `simd_shuffle` and const patterns in match arms. +/// `simd_shuffle` and const patterns in match arms. It never performs alignment checks. /// /// The function containing the `match` that is currently being analyzed may have generic bounds /// that inform us about the generic bounds of the constant. E.g., using an associated constant @@ -98,7 +100,11 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>( tcx, root_span, param_env, - CompileTimeInterpreter::new(tcx.const_eval_limit(), can_access_statics), + CompileTimeInterpreter::new( + tcx.const_eval_limit(), + can_access_statics, + /*check_alignment:*/ false, + ), ) } @@ -166,10 +172,7 @@ pub(super) fn op_to_const<'tcx>( // see comment on `let try_as_immediate` above Err(imm) => match *imm { _ if imm.layout.is_zst() => ConstValue::ZeroSized, - Immediate::Scalar(x) => match x { - ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s), - ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()), - }, + Immediate::Scalar(x) => ConstValue::Scalar(x), Immediate::ScalarPair(a, b) => { debug!("ScalarPair(a: {:?}, b: {:?})", a, b); // We know `offset` is relative to the allocation, so we can use `into_parts`. @@ -194,7 +197,7 @@ pub(super) fn op_to_const<'tcx>( } } -#[instrument(skip(tcx), level = "debug")] +#[instrument(skip(tcx), level = "debug", ret)] pub(crate) fn turn_into_const_value<'tcx>( tcx: TyCtxt<'tcx>, constant: ConstAlloc<'tcx>, @@ -203,7 +206,13 @@ pub(crate) fn turn_into_const_value<'tcx>( let cid = key.value; let def_id = cid.instance.def.def_id(); let is_static = tcx.is_static(def_id); - let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static); + // This is just accessing an already computed constant, so no need to check alginment here. + let ecx = mk_eval_cx( + tcx, + tcx.def_span(key.value.instance.def_id()), + key.param_env, + /*can_access_statics:*/ is_static, + ); let mplace = ecx.raw_const_to_mplace(constant).expect( "can only fail if layout computation failed, \ @@ -215,10 +224,7 @@ pub(crate) fn turn_into_const_value<'tcx>( ); // Turn this into a proper constant. - let const_val = op_to_const(&ecx, &mplace.into()); - debug!(?const_val); - - const_val + op_to_const(&ecx, &mplace.into()) } #[instrument(skip(tcx), level = "debug")] @@ -300,7 +306,11 @@ pub fn eval_to_allocation_raw_provider<'tcx>( key.param_env, // Statics (and promoteds inside statics) may access other statics, because unlike consts // they do not have to behave "as if" they were evaluated at runtime. - CompileTimeInterpreter::new(tcx.const_eval_limit(), /*can_access_statics:*/ is_static), + CompileTimeInterpreter::new( + tcx.const_eval_limit(), + /*can_access_statics:*/ is_static, + /*check_alignment:*/ tcx.sess.opts.unstable_opts.extra_const_ub_checks, + ), ); let res = ecx.load_mir(cid.instance.def, cid.promoted); diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index fc2e6652a..e5acacd91 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -35,21 +35,7 @@ impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> { // All `#[rustc_do_not_const_check]` functions should be hooked here. let def_id = instance.def_id(); - if Some(def_id) == self.tcx.lang_items().const_eval_select() { - // redirect to const_eval_select_ct - if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() { - return Ok(Some( - ty::Instance::resolve( - *self.tcx, - ty::ParamEnv::reveal_all(), - const_eval_select, - instance.substs, - ) - .unwrap() - .unwrap(), - )); - } - } else if Some(def_id) == self.tcx.lang_items().panic_display() + if Some(def_id) == self.tcx.lang_items().panic_display() || Some(def_id) == self.tcx.lang_items().begin_panic_fn() { // &str or &&str @@ -89,10 +75,10 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> { /// exhaustion error. /// /// Setting this to `0` disables the limit and allows the interpreter to run forever. - pub steps_remaining: usize, + pub(super) steps_remaining: usize, /// The virtual call stack. - pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>, + pub(super) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>, /// We need to make sure consts never point to anything mutable, even recursively. That is /// relied on for pattern matching on consts with references. @@ -101,14 +87,22 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> { /// * Pointers to allocations inside of statics can never leak outside, to a non-static global. /// This boolean here controls the second part. pub(super) can_access_statics: bool, + + /// Whether to check alignment during evaluation. + pub(super) check_alignment: bool, } impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> { - pub(crate) fn new(const_eval_limit: Limit, can_access_statics: bool) -> Self { + pub(crate) fn new( + const_eval_limit: Limit, + can_access_statics: bool, + check_alignment: bool, + ) -> Self { CompileTimeInterpreter { steps_remaining: const_eval_limit.0, stack: Vec::new(), can_access_statics, + check_alignment, } } } @@ -197,34 +191,35 @@ impl interpret::MayLeak for ! { } impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { - fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> { + /// See documentation on the `ptr_guaranteed_cmp` intrinsic. + fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> { Ok(match (a, b) { // Comparisons between integers are always known. - (Scalar::Int { .. }, Scalar::Int { .. }) => a == b, - // Equality with integers can never be known for sure. - (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false, - // FIXME: return `true` for when both sides are the same pointer, *except* that - // some things (like functions and vtables) do not have stable addresses - // so we need to be careful around them (see e.g. #73722). - (Scalar::Ptr(..), Scalar::Ptr(..)) => false, - }) - } - - fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> { - Ok(match (a, b) { - // Comparisons between integers are always known. - (Scalar::Int(_), Scalar::Int(_)) => a != b, + (Scalar::Int { .. }, Scalar::Int { .. }) => { + if a == b { + 1 + } else { + 0 + } + } // Comparisons of abstract pointers with null pointers are known if the pointer // is in bounds, because if they are in bounds, the pointer can't be null. // Inequality with integers other than null can never be known for sure. (Scalar::Int(int), ptr @ Scalar::Ptr(..)) - | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => { - int.is_null() && !self.scalar_may_be_null(ptr)? + | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) + if int.is_null() && !self.scalar_may_be_null(ptr)? => + { + 0 } - // FIXME: return `true` for at least some comparisons where we can reliably + // Equality with integers can never be known for sure. + (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2, + // FIXME: return a `1` for when both sides are the same pointer, *except* that + // some things (like functions and vtables) do not have stable addresses + // so we need to be careful around them (see e.g. #73722). + // FIXME: return `0` for at least some comparisons where we can reliably // determine the result of runtime inequality tests at compile-time. // Examples include comparison of addresses in different static items. - (Scalar::Ptr(..), Scalar::Ptr(..)) => false, + (Scalar::Ptr(..), Scalar::Ptr(..)) => 2, }) } } @@ -236,6 +231,16 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error + #[inline(always)] + fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { + ecx.machine.check_alignment + } + + #[inline(always)] + fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { + ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks + } + fn load_mir( ecx: &InterpCx<'mir, 'tcx, Self>, instance: ty::InstanceDef<'tcx>, @@ -251,9 +256,10 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, ); throw_inval!(AlreadyReported(guar)); } else { + // `find_mir_or_eval_fn` checks that this is a const fn before even calling us, + // so this should be unreachable. let path = ecx.tcx.def_path_str(def.did); - Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path)) - .into()) + bug!("trying to call extern function `{path}` at compile-time"); } } _ => Ok(ecx.tcx.instance_mir(instance)), @@ -321,22 +327,14 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, // CTFE-specific intrinsics. let Some(ret) = target else { - return Err(ConstEvalErrKind::NeedsRfc(format!( - "calling intrinsic `{}`", - intrinsic_name - )) - .into()); + throw_unsup_format!("intrinsic `{intrinsic_name}` is not supported at compile-time"); }; match intrinsic_name { - sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { - let a = ecx.read_immediate(&args[0])?.to_scalar()?; - let b = ecx.read_immediate(&args[1])?.to_scalar()?; - let cmp = if intrinsic_name == sym::ptr_guaranteed_eq { - ecx.guaranteed_eq(a, b)? - } else { - ecx.guaranteed_ne(a, b)? - }; - ecx.write_scalar(Scalar::from_bool(cmp), dest)?; + sym::ptr_guaranteed_cmp => { + let a = ecx.read_scalar(&args[0])?; + let b = ecx.read_scalar(&args[1])?; + let cmp = ecx.guaranteed_cmp(a, b)?; + ecx.write_scalar(Scalar::from_u8(cmp), dest)?; } sym::const_allocate => { let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?; @@ -382,11 +380,9 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } } _ => { - return Err(ConstEvalErrKind::NeedsRfc(format!( - "calling intrinsic `{}`", - intrinsic_name - )) - .into()); + throw_unsup_format!( + "intrinsic `{intrinsic_name}` is not supported at compile-time" + ); } } @@ -429,7 +425,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, _left: &ImmTy<'tcx>, _right: &ImmTy<'tcx>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { - Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into()) + throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time"); } fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { @@ -451,7 +447,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, _ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer<AllocId>, ) -> InterpResult<'tcx> { - Err(ConstEvalErrKind::NeedsRfc("exposing pointers".to_string()).into()) + // This is only reachable with -Zunleash-the-miri-inside-of-you. + throw_unsup_format!("exposing pointers is not possible at compile-time") } #[inline(always)] diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index 948c33494..d9c4ae4d5 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -1,16 +1,16 @@ // Not in interpret to make sure we do not use private implementation details +use crate::errors::MaxNumNodesInConstErr; +use crate::interpret::{ + intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta, + Scalar, +}; use rustc_hir::Mutability; use rustc_middle::mir; use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId}; use rustc_middle::ty::{self, TyCtxt}; use rustc_span::{source_map::DUMMY_SP, symbol::Symbol}; -use crate::interpret::{ - intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta, - Scalar, -}; - mod error; mod eval_queries; mod fn_queries; @@ -72,12 +72,17 @@ pub(crate) fn eval_to_valtree<'tcx>( Ok(valtree) => Ok(Some(valtree)), Err(err) => { let did = cid.instance.def_id(); - let s = cid.display(tcx); + let global_const_id = cid.display(tcx); match err { ValTreeCreationError::NodesOverflow => { - let msg = format!("maximum number of nodes exceeded in constant {}", &s); + let msg = format!( + "maximum number of nodes exceeded in constant {}", + &global_const_id + ); let mut diag = match tcx.hir().span_if_local(did) { - Some(span) => tcx.sess.struct_span_err(span, &msg), + Some(span) => { + tcx.sess.create_err(MaxNumNodesInConstErr { span, global_const_id }) + } None => tcx.sess.struct_err(&msg), }; diag.emit(); diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 8fff4571d..a964fe846 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -3,7 +3,7 @@ use super::machine::CompileTimeEvalContext; use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES}; use crate::interpret::{ intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta, - MemoryKind, PlaceTy, Scalar, ScalarMaybeUninit, + MemoryKind, PlaceTy, Scalar, }; use crate::interpret::{MPlaceTy, Value}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; @@ -90,14 +90,14 @@ pub(crate) fn const_to_valtree_inner<'tcx>( let Ok(val) = ecx.read_immediate(&place.into()) else { return Err(ValTreeCreationError::Other); }; - let val = val.to_scalar().unwrap(); + let val = val.to_scalar(); *num_nodes += 1; Ok(ty::ValTree::Leaf(val.assert_int())) } // Raw pointers are not allowed in type level constants, as we cannot properly test them for - // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`). + // equality at compile-time (see `ptr_guaranteed_cmp`). // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to // agree with runtime equality tests. ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType), @@ -204,7 +204,7 @@ fn get_info_on_unsized_field<'tcx>( (unsized_inner_ty, num_elems) } -#[instrument(skip(ecx), level = "debug")] +#[instrument(skip(ecx), level = "debug", ret)] fn create_pointee_place<'tcx>( ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, ty: Ty<'tcx>, @@ -237,14 +237,11 @@ fn create_pointee_place<'tcx>( let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap(); debug!(?ptr); - let place = MPlaceTy::from_aligned_ptr_with_meta( + MPlaceTy::from_aligned_ptr_with_meta( ptr.into(), layout, MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)), - ); - debug!(?place); - - place + ) } else { create_mplace_from_layout(ecx, ty) } @@ -253,7 +250,7 @@ fn create_pointee_place<'tcx>( /// Converts a `ValTree` to a `ConstValue`, which is needed after mir /// construction has finished. // FIXME Merge `valtree_to_const_value` and `valtree_into_mplace` into one function -#[instrument(skip(tcx), level = "debug")] +#[instrument(skip(tcx), level = "debug", ret)] pub fn valtree_to_const_value<'tcx>( tcx: TyCtxt<'tcx>, param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, @@ -294,7 +291,7 @@ pub fn valtree_to_const_value<'tcx>( dump_place(&ecx, place.into()); intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap(); - let const_val = match ty.kind() { + match ty.kind() { ty::Ref(_, _, _) => { let ref_place = place.to_ref(&tcx); let imm = @@ -303,10 +300,7 @@ pub fn valtree_to_const_value<'tcx>( op_to_const(&ecx, &imm.into()) } _ => op_to_const(&ecx, &place.into()), - }; - debug!(?const_val); - - const_val + } } ty::Never | ty::Error(_) @@ -349,11 +343,7 @@ fn valtree_into_mplace<'tcx>( ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { let scalar_int = valtree.unwrap_leaf(); debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place); - ecx.write_immediate( - Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())), - &place.into(), - ) - .unwrap(); + ecx.write_immediate(Immediate::Scalar(scalar_int.into()), &place.into()).unwrap(); } ty::Ref(_, inner_ty, _) => { let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree); @@ -366,11 +356,10 @@ fn valtree_into_mplace<'tcx>( let imm = match inner_ty.kind() { ty::Slice(_) | ty::Str => { let len = valtree.unwrap_branch().len(); - let len_scalar = - ScalarMaybeUninit::Scalar(Scalar::from_machine_usize(len as u64, &tcx)); + let len_scalar = Scalar::from_machine_usize(len as u64, &tcx); Immediate::ScalarPair( - ScalarMaybeUninit::from_maybe_pointer((*pointee_place).ptr, &tcx), + Scalar::from_maybe_pointer((*pointee_place).ptr, &tcx), len_scalar, ) } diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index a463fe7b9..c3547cb3a 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -3,7 +3,7 @@ use rustc_macros::SessionDiagnostic; use rustc_span::Span; #[derive(SessionDiagnostic)] -#[error(const_eval::unstable_in_stable)] +#[diag(const_eval::unstable_in_stable)] pub(crate) struct UnstableInStable { pub gate: String, #[primary_span] @@ -22,14 +22,14 @@ pub(crate) struct UnstableInStable { } #[derive(SessionDiagnostic)] -#[error(const_eval::thread_local_access, code = "E0625")] +#[diag(const_eval::thread_local_access, code = "E0625")] pub(crate) struct NonConstOpErr { #[primary_span] pub span: Span, } #[derive(SessionDiagnostic)] -#[error(const_eval::static_access, code = "E0013")] +#[diag(const_eval::static_access, code = "E0013")] #[help] pub(crate) struct StaticAccessErr { #[primary_span] @@ -41,7 +41,7 @@ pub(crate) struct StaticAccessErr { } #[derive(SessionDiagnostic)] -#[error(const_eval::raw_ptr_to_int)] +#[diag(const_eval::raw_ptr_to_int)] #[note] #[note(const_eval::note2)] pub(crate) struct RawPtrToIntErr { @@ -50,7 +50,7 @@ pub(crate) struct RawPtrToIntErr { } #[derive(SessionDiagnostic)] -#[error(const_eval::raw_ptr_comparison)] +#[diag(const_eval::raw_ptr_comparison)] #[note] pub(crate) struct RawPtrComparisonErr { #[primary_span] @@ -58,14 +58,14 @@ pub(crate) struct RawPtrComparisonErr { } #[derive(SessionDiagnostic)] -#[error(const_eval::panic_non_str)] +#[diag(const_eval::panic_non_str)] pub(crate) struct PanicNonStrErr { #[primary_span] pub span: Span, } #[derive(SessionDiagnostic)] -#[error(const_eval::mut_deref, code = "E0658")] +#[diag(const_eval::mut_deref, code = "E0658")] pub(crate) struct MutDerefErr { #[primary_span] pub span: Span, @@ -73,7 +73,7 @@ pub(crate) struct MutDerefErr { } #[derive(SessionDiagnostic)] -#[error(const_eval::transient_mut_borrow, code = "E0658")] +#[diag(const_eval::transient_mut_borrow, code = "E0658")] pub(crate) struct TransientMutBorrowErr { #[primary_span] pub span: Span, @@ -81,9 +81,116 @@ pub(crate) struct TransientMutBorrowErr { } #[derive(SessionDiagnostic)] -#[error(const_eval::transient_mut_borrow_raw, code = "E0658")] +#[diag(const_eval::transient_mut_borrow_raw, code = "E0658")] pub(crate) struct TransientMutBorrowErrRaw { #[primary_span] pub span: Span, pub kind: ConstContext, } + +#[derive(SessionDiagnostic)] +#[diag(const_eval::max_num_nodes_in_const)] +pub(crate) struct MaxNumNodesInConstErr { + #[primary_span] + pub span: Span, + pub global_const_id: String, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unallowed_fn_pointer_call)] +pub(crate) struct UnallowedFnPointerCall { + #[primary_span] + pub span: Span, + pub kind: ConstContext, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unstable_const_fn)] +pub(crate) struct UnstableConstFn { + #[primary_span] + pub span: Span, + pub def_path: String, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unallowed_mutable_refs, code = "E0764")] +pub(crate) struct UnallowedMutableRefs { + #[primary_span] + pub span: Span, + pub kind: ConstContext, + #[note(const_eval::teach_note)] + pub teach: Option<()>, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unallowed_mutable_refs_raw, code = "E0764")] +pub(crate) struct UnallowedMutableRefsRaw { + #[primary_span] + pub span: Span, + pub kind: ConstContext, + #[note(const_eval::teach_note)] + pub teach: Option<()>, +} +#[derive(SessionDiagnostic)] +#[diag(const_eval::non_const_fmt_macro_call, code = "E0015")] +pub(crate) struct NonConstFmtMacroCall { + #[primary_span] + pub span: Span, + pub kind: ConstContext, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::non_const_fn_call, code = "E0015")] +pub(crate) struct NonConstFnCall { + #[primary_span] + pub span: Span, + pub def_path_str: String, + pub kind: ConstContext, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unallowed_op_in_const_context)] +pub(crate) struct UnallowedOpInConstContext { + #[primary_span] + pub span: Span, + pub msg: String, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unallowed_heap_allocations, code = "E0010")] +pub(crate) struct UnallowedHeapAllocations { + #[primary_span] + #[label] + pub span: Span, + pub kind: ConstContext, + #[note(const_eval::teach_note)] + pub teach: Option<()>, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::unallowed_inline_asm, code = "E0015")] +pub(crate) struct UnallowedInlineAsm { + #[primary_span] + pub span: Span, + pub kind: ConstContext, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::interior_mutable_data_refer, code = "E0492")] +pub(crate) struct InteriorMutableDataRefer { + #[primary_span] + #[label] + pub span: Span, + #[help] + pub opt_help: Option<()>, + pub kind: ConstContext, + #[note(const_eval::teach_note)] + pub teach: Option<()>, +} + +#[derive(SessionDiagnostic)] +#[diag(const_eval::interior_mutability_borrow)] +pub(crate) struct InteriorMutabilityBorrow { + #[primary_span] + pub span: Span, +} diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index c97c31eb9..cbe985480 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -108,6 +108,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty), } } + + DynStar => { + if let ty::Dynamic(data, _, ty::DynStar) = cast_ty.kind() { + // Initial cast from sized to dyn trait + let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?; + let vtable = Scalar::from_maybe_pointer(vtable, self); + let data = self.read_immediate(src)?.to_scalar(); + let _assert_pointer_sized = data.to_pointer(self)?; + let val = Immediate::ScalarPair(data, vtable); + self.write_immediate(val, dest)?; + } else { + bug!() + } + } } Ok(()) } @@ -123,10 +137,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match src.layout.ty.kind() { // Floating point Float(FloatTy::F32) => { - return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into()); + return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into()); } Float(FloatTy::F64) => { - return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into()); + return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into()); } // The rest is integer/pointer-"like", including fn ptr casts _ => assert!( @@ -153,7 +167,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_eq!(dest_layout.size, self.pointer_size()); assert!(src.layout.ty.is_unsafe_ptr()); return match **src { - Immediate::ScalarPair(data, _) => Ok(data.check_init()?.into()), + Immediate::ScalarPair(data, _) => Ok(data.into()), Immediate::Scalar(..) => span_bug!( self.cur_span(), "{:?} input to a fat-to-thin cast ({:?} -> {:?})", @@ -167,7 +181,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } // # The remaining source values are scalar and "int-like". - let scalar = src.to_scalar()?; + let scalar = src.to_scalar(); Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into()) } @@ -179,7 +193,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_)); assert!(cast_ty.is_integral()); - let scalar = src.to_scalar()?; + let scalar = src.to_scalar(); let ptr = scalar.to_pointer(self)?; match ptr.into_pointer_or_addr() { Ok(ptr) => M::expose_ptr(self, ptr)?, @@ -197,7 +211,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_matches!(cast_ty.kind(), ty::RawPtr(_)); // First cast to usize. - let scalar = src.to_scalar()?; + let scalar = src.to_scalar(); let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?; let addr = addr.to_machine_usize(self)?; @@ -291,14 +305,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) { (&ty::Array(_, length), &ty::Slice(_)) => { - let ptr = self.read_immediate(src)?.to_scalar()?; + let ptr = self.read_scalar(src)?; // u64 cast is from usize to u64, which is always good let val = Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self); self.write_immediate(val, dest) } (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { - let (old_data, old_vptr) = self.read_immediate(src)?.to_scalar_pair()?; + let val = self.read_immediate(src)?; + if data_a.principal() == data_b.principal() { + // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables. + return self.write_immediate(*val, dest); + } + let (old_data, old_vptr) = val.to_scalar_pair(); let old_vptr = old_vptr.to_pointer(self)?; let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?; if old_trait != data_a.principal() { @@ -307,10 +326,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?; self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest) } - (_, &ty::Dynamic(ref data, _)) => { + (_, &ty::Dynamic(ref data, _, ty::Dyn)) => { // Initial cast from sized to dyn trait let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?; - let ptr = self.read_immediate(src)?.to_scalar()?; + let ptr = self.read_scalar(src)?; let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx); self.write_immediate(val, dest) } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 150d6589b..d37eaeed0 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -21,7 +21,7 @@ use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayou use super::{ AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance, - Scalar, ScalarMaybeUninit, StackPopJump, + Scalar, StackPopJump, }; use crate::transform::validate::equal_up_to_regions; @@ -187,9 +187,6 @@ pub enum LocalValue<Prov: Provenance = AllocId> { impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> { /// Read the local's value or error if the local is not yet live or not live anymore. - /// - /// Note: This may only be invoked from the `Machine::access_local` hook and not from - /// anywhere else. You may be invalidating machine invariants if you do! #[inline] pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> { match &self.value { @@ -782,7 +779,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_eq!( unwinding, match self.frame().loc { - Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup, + Ok(loc) => self.body().basic_blocks[loc.block].is_cleanup, Err(_) => true, } ); @@ -991,16 +988,16 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug } LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { write!(fmt, " {:?}", val)?; - if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val { + if let Scalar::Ptr(ptr, _size) = val { allocs.push(ptr.provenance.get_alloc_id()); } } LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { write!(fmt, " ({:?}, {:?})", val1, val2)?; - if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 { + if let Scalar::Ptr(ptr, _size) = val1 { allocs.push(ptr.provenance.get_alloc_id()); } - if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 { + if let Scalar::Ptr(ptr, _size) = val2 { allocs.push(ptr.provenance.get_alloc_id()); } } diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 376b8872c..24dbc7695 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -134,7 +134,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval: alloc.mutability = Mutability::Not; }; // link the alloc id to the actual allocation - leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id)); + leftover_allocations.extend(alloc.provenance().iter().map(|&(_, alloc_id)| alloc_id)); let alloc = tcx.intern_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); None @@ -191,10 +191,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory return Ok(true); }; - // If there are no relocations in this allocation, it does not contain references + // If there is no provenance in this allocation, it does not contain references // that point to another allocation, and we can avoid the interning walk. if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? { - if !alloc.has_relocations() { + if !alloc.has_provenance() { return Ok(false); } } else { @@ -233,8 +233,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory } fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> { - // Handle Reference types, as these are the only relocations supported by const eval. - // Raw pointers (and boxes) are handled by the `leftover_relocations` logic. + // Handle Reference types, as these are the only types with provenance supported by const eval. + // Raw pointers (and boxes) are handled by the `leftover_allocations` logic. let tcx = self.ecx.tcx; let ty = mplace.layout.ty; if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() { @@ -334,7 +334,7 @@ pub enum InternKind { /// tracks where in the value we are and thus can show much better error messages. /// Any errors here would anyway be turned into `const_err` lints, whereas validation failures /// are hard errors. -#[tracing::instrument(level = "debug", skip(ecx))] +#[instrument(level = "debug", skip(ecx))] pub fn intern_const_alloc_recursive< 'mir, 'tcx: 'mir, @@ -410,7 +410,7 @@ pub fn intern_const_alloc_recursive< // references and a `leftover_allocations` set (where we only have a todo-list here). // So we hand-roll the interning logic here again. match intern_kind { - // Statics may contain mutable allocations even behind relocations. + // Statics may point to mutable allocations. // Even for immutable statics it would be ok to have mutable allocations behind // raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`. InternKind::Static(_) => {} @@ -441,7 +441,7 @@ pub fn intern_const_alloc_recursive< } let alloc = tcx.intern_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); - for &(_, alloc_id) in alloc.inner().relocations().iter() { + for &(_, alloc_id) in alloc.inner().provenance().iter() { if leftover_allocations.insert(alloc_id) { todo.push(alloc_id); } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 08209eb79..8637d6a77 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -8,7 +8,7 @@ use rustc_hir::def_id::DefId; use rustc_middle::mir::{ self, interpret::{ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar}, - BinOp, + BinOp, NonDivergingIntrinsic, }; use rustc_middle::ty; use rustc_middle::ty::layout::LayoutOf as _; @@ -79,9 +79,9 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( ty::Projection(_) | ty::Opaque(_, _) | ty::Param(_) - | ty::Bound(_, _) | ty::Placeholder(_) | ty::Infer(_) => throw_inval!(TooGeneric), + ty::Bound(_, _) => bug!("bound ty during ctfe"), ty::Bool | ty::Char | ty::Int(_) @@ -95,7 +95,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( | ty::Ref(_, _, _) | ty::FnDef(_, _) | ty::FnPtr(_) - | ty::Dynamic(_, _) + | ty::Dynamic(_, _, _) | ty::Closure(_, _) | ty::Generator(_, _, _) | ty::GeneratorWitness(_) @@ -184,7 +184,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { | sym::bitreverse => { let ty = substs.type_at(0); let layout_of = self.layout_of(ty)?; - let val = self.read_scalar(&args[0])?.check_init()?; + let val = self.read_scalar(&args[0])?; let bits = val.to_bits(layout_of.size)?; let kind = match layout_of.abi { Abi::Scalar(scalar) => scalar.primitive(), @@ -256,7 +256,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?; if overflowed { let layout = self.layout_of(substs.type_at(0))?; - let r_val = r.to_scalar()?.to_bits(layout.size)?; + let r_val = r.to_scalar().to_bits(layout.size)?; if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name { throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name); } else { @@ -269,9 +269,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) let layout = self.layout_of(substs.type_at(0))?; - let val = self.read_scalar(&args[0])?.check_init()?; + let val = self.read_scalar(&args[0])?; let val_bits = val.to_bits(layout.size)?; - let raw_shift = self.read_scalar(&args[1])?.check_init()?; + let raw_shift = self.read_scalar(&args[1])?; let raw_shift_bits = raw_shift.to_bits(layout.size)?; let width_bits = u128::from(layout.size.bits()); let shift_bits = raw_shift_bits % width_bits; @@ -320,7 +320,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (a_offset, b_offset) = match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) { (Err(a), Err(b)) => { - // Neither poiner points to an allocation. + // Neither pointer points to an allocation. // If these are inequal or null, this *will* fail the deref check below. (a, b) } @@ -506,12 +506,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // These just return their argument self.copy_op(&args[0], dest, /*allow_transmute*/ false)?; } - sym::assume => { - let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?; - if !cond { - throw_ub_format!("`assume` intrinsic called with `false`"); - } - } sym::raw_eq => { let result = self.raw_eq_intrinsic(&args[0], &args[1])?; self.write_scalar(result, dest)?; @@ -536,6 +530,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(true) } + pub(super) fn emulate_nondiverging_intrinsic( + &mut self, + intrinsic: &NonDivergingIntrinsic<'tcx>, + ) -> InterpResult<'tcx> { + match intrinsic { + NonDivergingIntrinsic::Assume(op) => { + let op = self.eval_operand(op, None)?; + let cond = self.read_scalar(&op)?.to_bool()?; + if !cond { + throw_ub_format!("`assume` called with `false`"); + } + Ok(()) + } + NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping { + count, + src, + dst, + }) => { + let src = self.eval_operand(src, None)?; + let dst = self.eval_operand(dst, None)?; + let count = self.eval_operand(count, None)?; + self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true) + } + } + } + pub fn exact_div( &mut self, a: &ImmTy<'tcx, M::Provenance>, @@ -570,7 +590,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // term since the sign of the second term can be inferred from this and // the fact that the operation has overflowed (if either is 0 no // overflow can occur) - let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?; + let first_term: u128 = l.to_scalar().to_bits(l.layout.size)?; let first_term_positive = first_term & (1 << (num_bits - 1)) == 0; if first_term_positive { // Negative overflow not possible since the positive first term @@ -687,10 +707,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?; assert!(!layout.is_unsized()); - let lhs = self.read_pointer(lhs)?; - let rhs = self.read_pointer(rhs)?; - let lhs_bytes = self.read_bytes_ptr(lhs, layout.size)?; - let rhs_bytes = self.read_bytes_ptr(rhs, layout.size)?; + let get_bytes = |this: &InterpCx<'mir, 'tcx, M>, + op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + size| + -> InterpResult<'tcx, &[u8]> { + let ptr = this.read_pointer(op)?; + let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else { + // zero-sized access + return Ok(&[]); + }; + if alloc_ref.has_provenance() { + throw_ub_format!("`raw_eq` on bytes with provenance"); + } + alloc_ref.get_bytes_strip_provenance() + }; + + let lhs_bytes = get_bytes(self, lhs, layout.size)?; + let rhs_bytes = get_bytes(self, rhs, layout.size)?; Ok(Scalar::from_bool(lhs_bytes == rhs_bytes)) } } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs index 5864b9215..91f4f0425 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs @@ -28,7 +28,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let mut source_info = *frame.body.source_info(loc); // If this is a `Call` terminator, use the `fn_span` instead. - let block = &frame.body.basic_blocks()[loc.block]; + let block = &frame.body.basic_blocks[loc.block]; if loc.statement_index == block.statements.len() { debug!( "find_closest_untracked_caller_location: got terminator {:?} ({:?})", diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs index f9847742f..7e4c5fcb0 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs @@ -48,7 +48,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { | ty::FnPtr(_) | ty::Never | ty::Tuple(_) - | ty::Dynamic(_, _) => self.pretty_print_type(ty), + | ty::Dynamic(_, _, _) => self.pretty_print_type(ty), // Placeholders (all printed as `_` to uniformize them). ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => { diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 71ccd1799..530e252b7 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -6,6 +6,7 @@ use std::borrow::{Borrow, Cow}; use std::fmt::Debug; use std::hash::Hash; +use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_middle::mir; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::def_id::DefId; @@ -123,18 +124,15 @@ pub trait Machine<'mir, 'tcx>: Sized { /// Whether memory accesses should be alignment-checked. fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; - /// Whether, when checking alignment, we should `force_int` and thus support + /// Whether, when checking alignment, we should look at the actual address and thus support /// custom alignment logic based on whatever the integer address happens to be. /// - /// Requires Provenance::OFFSET_IS_ADDR to be true. - fn force_int_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + /// If this returns true, Provenance::OFFSET_IS_ADDR must be true. + fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; /// Whether to enforce the validity invariant fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; - /// Whether to enforce integers and floats being initialized. - fn enforce_number_init(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; - /// Whether function calls should be [ABI](CallAbi)-checked. fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { true @@ -218,23 +216,12 @@ pub trait Machine<'mir, 'tcx>: Sized { right: &ImmTy<'tcx, Self::Provenance>, ) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>; - /// Called to read the specified `local` from the `frame`. - /// Since reading a ZST is not actually accessing memory or locals, this is never invoked - /// for ZST reads. - #[inline] - fn access_local<'a>( - frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>, - local: mir::Local, - ) -> InterpResult<'tcx, &'a Operand<Self::Provenance>> - where - 'tcx: 'mir, - { - frame.locals[local].access() - } - /// Called to write the specified `local` from the `frame`. /// Since writing a ZST is not actually accessing memory or locals, this is never invoked /// for ZST reads. + /// + /// Due to borrow checker trouble, we indicate the `frame` as an index rather than an `&mut + /// Frame`. #[inline] fn access_local_mut<'a>( ecx: &'a mut InterpCx<'mir, 'tcx, Self>, @@ -329,7 +316,7 @@ pub trait Machine<'mir, 'tcx>: Sized { /// cache the result. (This relies on `AllocMap::get_or` being able to add the /// owned allocation to the map even when the map is shared.) /// - /// This must only fail if `alloc` contains relocations. + /// This must only fail if `alloc` contains provenance. fn adjust_allocation<'b>( ecx: &InterpCx<'mir, 'tcx, Self>, id: AllocId, @@ -337,13 +324,22 @@ pub trait Machine<'mir, 'tcx>: Sized { kind: Option<MemoryKind<Self::MemoryKind>>, ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>; + fn eval_inline_asm( + _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _template: &'tcx [InlineAsmTemplatePiece], + _operands: &[mir::InlineAsmOperand<'tcx>], + _options: InlineAsmOptions, + ) -> InterpResult<'tcx> { + throw_unsup_format!("inline assembly is not supported") + } + /// Hook for performing extra checks on a memory read access. /// /// Takes read-only access to the allocation so we can keep all the memory read /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you /// need to mutate. #[inline(always)] - fn memory_read( + fn before_memory_read( _tcx: TyCtxt<'tcx>, _machine: &Self, _alloc_extra: &Self::AllocExtra, @@ -355,7 +351,7 @@ pub trait Machine<'mir, 'tcx>: Sized { /// Hook for performing extra checks on a memory write access. #[inline(always)] - fn memory_written( + fn before_memory_write( _tcx: TyCtxt<'tcx>, _machine: &mut Self, _alloc_extra: &mut Self::AllocExtra, @@ -367,7 +363,7 @@ pub trait Machine<'mir, 'tcx>: Sized { /// Hook for performing extra operations on a memory deallocation. #[inline(always)] - fn memory_deallocated( + fn before_memory_deallocation( _tcx: TyCtxt<'tcx>, _machine: &mut Self, _alloc_extra: &mut Self::AllocExtra, @@ -437,29 +433,12 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { type FrameExtra = (); #[inline(always)] - fn enforce_alignment(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { - // We do not check for alignment to avoid having to carry an `Align` - // in `ConstValue::ByRef`. - false - } - - #[inline(always)] - fn force_int_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { - // We do not support `force_int`. + fn use_addr_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { + // We do not support `use_addr`. false } #[inline(always)] - fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { - false // for now, we don't enforce validity - } - - #[inline(always)] - fn enforce_number_init(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { - true - } - - #[inline(always)] fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { true } @@ -510,6 +489,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { ) -> InterpResult<$tcx, Pointer<Option<AllocId>>> { // Allow these casts, but make the pointer not dereferenceable. // (I.e., they behave like transmutation.) + // This is correct because no pointers can ever be exposed in compile-time evaluation. Ok(Pointer::from_addr(addr)) } diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index ed2c4edf9..ed155fbfe 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -21,7 +21,6 @@ use rustc_target::abi::{Align, HasDataLayout, Size}; use super::{ alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar, - ScalarMaybeUninit, }; #[derive(Debug, PartialEq, Copy, Clone)] @@ -215,7 +214,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.allocate_raw_ptr(alloc, kind).unwrap() } - /// This can fail only of `alloc` contains relocations. + /// This can fail only of `alloc` contains provenance. pub fn allocate_raw_ptr( &mut self, alloc: Allocation, @@ -327,7 +326,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Let the machine take some extra action let size = alloc.size(); - M::memory_deallocated( + M::before_memory_deallocation( *self.tcx, &mut self.machine, &mut alloc.extra, @@ -438,15 +437,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { msg, }) } - // Ensure we never consider the null pointer dereferencable. + // Ensure we never consider the null pointer dereferenceable. if M::Provenance::OFFSET_IS_ADDR { assert_ne!(ptr.addr(), Size::ZERO); } // Test align. Check this last; if both bounds and alignment are violated // we want the error to be about the bounds. if let Some(align) = align { - if M::force_int_for_alignment_check(self) { - // `force_int_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true. + if M::use_addr_for_alignment_check(self) { + // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true. check_offset_align(ptr.addr().bytes(), align)?; } else { // Check allocation alignment and offset alignment. @@ -520,6 +519,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Gives raw access to the `Allocation`, without bounds or alignment checks. /// The caller is responsible for calling the access hooks! + /// + /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead. fn get_alloc_raw( &self, id: AllocId, @@ -573,7 +574,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { )?; if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc { let range = alloc_range(offset, size); - M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?; + M::before_memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?; Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id })) } else { // Even in this branch we have to be sure that we actually access the allocation, in @@ -589,6 +590,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(&self.get_alloc_raw(id)?.extra) } + /// Return the `mutability` field of the given allocation. + pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> { + Ok(self.get_alloc_raw(id)?.mutability) + } + /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks. /// The caller is responsible for calling the access hooks! /// @@ -634,7 +640,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`. let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?; let range = alloc_range(offset, size); - M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?; + M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?; Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id })) } else { Ok(None) @@ -788,10 +794,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { todo.extend(static_roots); while let Some(id) = todo.pop() { if reachable.insert(id) { - // This is a new allocation, add its relocations to `todo`. + // This is a new allocation, add the allocation it points to to `todo`. if let Some((_, alloc)) = self.memory.alloc_map.get(id) { todo.extend( - alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()), + alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()), ); } } @@ -827,7 +833,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, allocs_to_print: &mut VecDeque<AllocId>, alloc: &Allocation<Prov, Extra>, ) -> std::fmt::Result { - for alloc_id in alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()) { + for alloc_id in alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()) { allocs_to_print.push_back(alloc_id); } write!(fmt, "{}", display_allocation(tcx, alloc)) @@ -894,11 +900,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, /// Reading and writing. impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> { /// `range` is relative to this allocation reference, not the base of the allocation. - pub fn write_scalar( - &mut self, - range: AllocRange, - val: ScalarMaybeUninit<Prov>, - ) -> InterpResult<'tcx> { + pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> { let range = self.range.subrange(range); debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id); Ok(self @@ -908,15 +910,11 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> { } /// `offset` is relative to this allocation reference, not the base of the allocation. - pub fn write_ptr_sized( - &mut self, - offset: Size, - val: ScalarMaybeUninit<Prov>, - ) -> InterpResult<'tcx> { + pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> { self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val) } - /// Mark the entire referenced range as uninitalized + /// Mark the entire referenced range as uninitialized pub fn write_uninit(&mut self) -> InterpResult<'tcx> { Ok(self .alloc @@ -931,7 +929,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> { &self, range: AllocRange, read_provenance: bool, - ) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> { + ) -> InterpResult<'tcx, Scalar<Prov>> { let range = self.range.subrange(range); let res = self .alloc @@ -942,12 +940,12 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> { } /// `range` is relative to this allocation reference, not the base of the allocation. - pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> { + pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> { self.read_scalar(range, /*read_provenance*/ false) } /// `offset` is relative to this allocation reference, not the base of the allocation. - pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> { + pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> { self.read_scalar( alloc_range(offset, self.tcx.data_layout().pointer_size), /*read_provenance*/ true, @@ -955,29 +953,25 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> { } /// `range` is relative to this allocation reference, not the base of the allocation. - pub fn check_bytes( - &self, - range: AllocRange, - allow_uninit: bool, - allow_ptr: bool, - ) -> InterpResult<'tcx> { + pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> { Ok(self .alloc - .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit, allow_ptr) + .get_bytes_strip_provenance(&self.tcx, self.range) .map_err(|e| e.to_interp_error(self.alloc_id))?) } - /// Returns whether the allocation has relocations for the entire range of the `AllocRef`. - pub(crate) fn has_relocations(&self) -> bool { - self.alloc.has_relocations(&self.tcx, self.range) + /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`. + pub(crate) fn has_provenance(&self) -> bool { + self.alloc.range_has_provenance(&self.tcx, self.range) } } impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { - /// Reads the given number of bytes from memory. Returns them as a slice. + /// Reads the given number of bytes from memory, and strips their provenance if possible. + /// Returns them as a slice. /// /// Performs appropriate bounds checks. - pub fn read_bytes_ptr( + pub fn read_bytes_ptr_strip_provenance( &self, ptr: Pointer<Option<M::Provenance>>, size: Size, @@ -990,7 +984,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // (We are staying inside the bounds here so all is good.) Ok(alloc_ref .alloc - .get_bytes(&alloc_ref.tcx, alloc_ref.range) + .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range) .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?) } @@ -1071,7 +1065,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; let src_alloc = self.get_alloc_raw(src_alloc_id)?; let src_range = alloc_range(src_offset, size); - M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_prov), src_range)?; + M::before_memory_read( + *tcx, + &self.machine, + &src_alloc.extra, + (src_alloc_id, src_prov), + src_range, + )?; // We need the `dest` ptr for the next operation, so we get it now. // We already did the source checks and called the hooks so we are good to return early. let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else { @@ -1079,24 +1079,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return Ok(()); }; - // This checks relocation edges on the src, which needs to happen before - // `prepare_relocation_copy`. - let src_bytes = src_alloc - .get_bytes_with_uninit_and_ptr(&tcx, src_range) - .map_err(|e| e.to_interp_error(src_alloc_id))? - .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation - // first copy the relocations to a temporary buffer, because - // `get_bytes_mut` will clear the relocations, which is correct, - // since we don't want to keep any relocations at the target. - let relocations = - src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies); + // Checks provenance edges on the src, which needs to happen before + // `prepare_provenance_copy`. + if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.start, Size::ZERO)) { + throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.start))); + } + if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.end(), Size::ZERO)) { + throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.end()))); + } + let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation + // first copy the provenance to a temporary buffer, because + // `get_bytes_mut` will clear the provenance, which is correct, + // since we don't want to keep any provenance at the target. + let provenance = + src_alloc.prepare_provenance_copy(self, src_range, dest_offset, num_copies); // Prepare a copy of the initialization mask. let compressed = src_alloc.compress_uninit_range(src_range); // Destination alloc preparations and access hooks. let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?; let dest_range = alloc_range(dest_offset, size * num_copies); - M::memory_written( + M::before_memory_write( *tcx, extra, &mut dest_alloc.extra, @@ -1118,7 +1121,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { dest_alloc .write_uninit(&tcx, dest_range) .map_err(|e| e.to_interp_error(dest_alloc_id))?; - // We can forget about the relocations, this is all not initialized anyway. + // We can forget about the provenance, this is all not initialized anyway. return Ok(()); } @@ -1162,8 +1165,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`) num_copies, ); - // copy the relocations to the destination - dest_alloc.mark_relocation_range(relocations); + // copy the provenance to the destination + dest_alloc.mark_provenance_range(provenance); Ok(()) } diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 94ba62c16..0e3959b61 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -1,11 +1,9 @@ //! Functions concerning immediate values and operands, and reading from operands. //! All high-level functions to read from memory work on operands as sources. -use std::fmt::Write; - use rustc_hir::def::Namespace; use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; -use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer}; +use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter}; use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty}; use rustc_middle::{mir, ty}; use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding}; @@ -14,7 +12,7 @@ use rustc_target::abi::{VariantIdx, Variants}; use super::{ alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer, - Provenance, Scalar, ScalarMaybeUninit, + Provenance, Scalar, }; /// An `Immediate` represents a single immediate self-contained Rust value. @@ -27,23 +25,14 @@ use super::{ #[derive(Copy, Clone, Debug)] pub enum Immediate<Prov: Provenance = AllocId> { /// A single scalar value (must have *initialized* `Scalar` ABI). - /// FIXME: we also currently often use this for ZST. - /// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead. - Scalar(ScalarMaybeUninit<Prov>), + Scalar(Scalar<Prov>), /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are /// `Scalar::Initialized`). - ScalarPair(ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>), + ScalarPair(Scalar<Prov>, Scalar<Prov>), /// A value of fully uninitialized memory. Can have and size and layout. Uninit, } -impl<Prov: Provenance> From<ScalarMaybeUninit<Prov>> for Immediate<Prov> { - #[inline(always)] - fn from(val: ScalarMaybeUninit<Prov>) -> Self { - Immediate::Scalar(val) - } -} - impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> { #[inline(always)] fn from(val: Scalar<Prov>) -> Self { @@ -51,13 +40,13 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> { } } -impl<'tcx, Prov: Provenance> Immediate<Prov> { +impl<Prov: Provenance> Immediate<Prov> { pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self { - Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx)) + Immediate::Scalar(Scalar::from_pointer(p, cx)) } pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self { - Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx)) + Immediate::Scalar(Scalar::from_maybe_pointer(p, cx)) } pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self { @@ -69,41 +58,28 @@ impl<'tcx, Prov: Provenance> Immediate<Prov> { vtable: Pointer<Option<Prov>>, cx: &impl HasDataLayout, ) -> Self { - Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx)) + Immediate::ScalarPair(val.into(), Scalar::from_maybe_pointer(vtable, cx)) } #[inline] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Prov> { + pub fn to_scalar(self) -> Scalar<Prov> { match self { Immediate::Scalar(val) => val, Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"), - Immediate::Uninit => ScalarMaybeUninit::Uninit, + Immediate::Uninit => bug!("Got uninit where a scalar was expected"), } } #[inline] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Prov>> { - self.to_scalar_or_uninit().check_init() - } - - #[inline] - #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>) { + pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) { match self { Immediate::ScalarPair(val1, val2) => (val1, val2), Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"), - Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit), + Immediate::Uninit => bug!("Got uninit where a scalar pair was expected"), } } - - #[inline] - #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Prov>, Scalar<Prov>)> { - let (val1, val2) = self.to_scalar_or_uninit_pair(); - Ok((val1.check_init()?, val2.check_init()?)) - } } // ScalarPair needs a type to interpret, so we often have an immediate and a type together @@ -119,27 +95,17 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> { /// Helper function for printing a scalar to a FmtPrinter fn p<'a, 'tcx, Prov: Provenance>( cx: FmtPrinter<'a, 'tcx>, - s: ScalarMaybeUninit<Prov>, + s: Scalar<Prov>, ty: Ty<'tcx>, ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> { match s { - ScalarMaybeUninit::Scalar(Scalar::Int(int)) => { - cx.pretty_print_const_scalar_int(int, ty, true) - } - ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => { + Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true), + Scalar::Ptr(ptr, _sz) => { // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to // print what is points to, which would fail since it has no access to the local // memory. cx.pretty_print_const_pointer(ptr, ty, true) } - ScalarMaybeUninit::Uninit => cx.typed_value( - |mut this| { - this.write_str("uninit ")?; - Ok(this) - }, - |this| this.print_type(ty), - " ", - ), } } ty::tls::with(|tcx| { @@ -269,7 +235,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn to_const_int(self) -> ConstInt { assert!(self.layout.ty.is_integral()); - let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int(); + let int = self.to_scalar().assert_int(); ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral()) } } @@ -327,7 +293,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn read_immediate_from_mplace_raw( &self, mplace: &MPlaceTy<'tcx, M::Provenance>, - force: bool, ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> { if mplace.layout.is_unsized() { // Don't touch unsized @@ -345,47 +310,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // case where some of the bytes are initialized and others are not. So, we need an extra // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). - let scalar_layout = match mplace.layout.abi { - // `if` does not work nested inside patterns, making this a bit awkward to express. - Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s), - Abi::Scalar(s) if force => Some(s.primitive()), - _ => None, - }; - if let Some(s) = scalar_layout { - let size = s.size(self); - assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); - let scalar = alloc - .read_scalar(alloc_range(Size::ZERO, size), /*read_provenance*/ s.is_ptr())?; - return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout })); - } - let scalar_pair_layout = match mplace.layout.abi { + Ok(match mplace.layout.abi { + Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => { + let size = s.size(self); + assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); + let scalar = alloc.read_scalar( + alloc_range(Size::ZERO, size), + /*read_provenance*/ s.is_ptr(), + )?; + Some(ImmTy { imm: scalar.into(), layout: mplace.layout }) + } Abi::ScalarPair( abi::Scalar::Initialized { value: a, .. }, abi::Scalar::Initialized { value: b, .. }, - ) => Some((a, b)), - Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())), - _ => None, - }; - if let Some((a, b)) = scalar_pair_layout { - // We checked `ptr_align` above, so all fields will have the alignment they need. - // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, - // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. - let (a_size, b_size) = (a.size(self), b.size(self)); - let b_offset = a_size.align_to(b.align(self).abi); - assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields - let a_val = alloc.read_scalar( - alloc_range(Size::ZERO, a_size), - /*read_provenance*/ a.is_ptr(), - )?; - let b_val = alloc - .read_scalar(alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr())?; - return Ok(Some(ImmTy { - imm: Immediate::ScalarPair(a_val, b_val), - layout: mplace.layout, - })); - } - // Neither a scalar nor scalar pair. - return Ok(None); + ) => { + // We checked `ptr_align` above, so all fields will have the alignment they need. + // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, + // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. + let (a_size, b_size) = (a.size(self), b.size(self)); + let b_offset = a_size.align_to(b.align(self).abi); + assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields + let a_val = alloc.read_scalar( + alloc_range(Size::ZERO, a_size), + /*read_provenance*/ a.is_ptr(), + )?; + let b_val = alloc.read_scalar( + alloc_range(b_offset, b_size), + /*read_provenance*/ b.is_ptr(), + )?; + Some(ImmTy { + imm: Immediate::ScalarPair(a_val.into(), b_val.into()), + layout: mplace.layout, + }) + } + _ => { + // Neither a scalar nor scalar pair. + None + } + }) } /// Try returning an immediate for the operand. If the layout does not permit loading this as an @@ -394,20 +356,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// succeed! Whether it succeeds depends on whether the layout can be represented /// in an `Immediate`, not on which data is stored there currently. /// - /// If `force` is `true`, then even scalars with fields that can be ununit will be - /// read. This means the load is lossy and should not be written back! - /// This flag exists only for validity checking. - /// /// This is an internal function that should not usually be used; call `read_immediate` instead. /// ConstProp needs it, though. pub fn read_immediate_raw( &self, src: &OpTy<'tcx, M::Provenance>, - force: bool, ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> { Ok(match src.try_as_mplace() { Ok(ref mplace) => { - if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? { + if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? { Ok(val) } else { Err(*mplace) @@ -418,24 +375,33 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } /// Read an immediate from a place, asserting that that is possible with the given layout. + /// + /// If this suceeds, the `ImmTy` is never `Uninit`. #[inline(always)] pub fn read_immediate( &self, op: &OpTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { - if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? { - Ok(imm) - } else { - span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty); + if !matches!( + op.layout.abi, + Abi::Scalar(abi::Scalar::Initialized { .. }) + | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. }) + ) { + span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty); } + let imm = self.read_immediate_raw(op)?.unwrap(); + if matches!(*imm, Immediate::Uninit) { + throw_ub!(InvalidUninitBytes(None)); + } + Ok(imm) } /// Read a scalar from a place pub fn read_scalar( &self, op: &OpTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> { - Ok(self.read_immediate(op)?.to_scalar_or_uninit()) + ) -> InterpResult<'tcx, Scalar<M::Provenance>> { + Ok(self.read_immediate(op)?.to_scalar()) } /// Read a pointer from a place. @@ -449,7 +415,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Turn the wide MPlace into a string (must already be dereferenced!) pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> { let len = mplace.len(self)?; - let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?; + let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len))?; let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?; Ok(str) } @@ -478,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - /// Read from a local. Will not actually access the local if reading from a ZST. + /// Read from a local. /// Will not access memory, instead an indirect `Operand` is returned. /// /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an @@ -490,12 +456,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { let layout = self.layout_of_local(frame, local, layout)?; - let op = if layout.is_zst() { - // Bypass `access_local` (helps in ConstProp) - Operand::Immediate(Immediate::Uninit) - } else { - *M::access_local(frame, local)? - }; + let op = *frame.locals[local].access()?; Ok(OpTy { op, layout, align: Some(layout.align.abi) }) } @@ -598,15 +559,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { match c.kind() { - ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric), + ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => throw_inval!(TooGeneric), ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => { throw_inval!(AlreadyReported(reported)) } ty::ConstKind::Unevaluated(uv) => { + // NOTE: We evaluate to a `ValTree` here as a check to ensure + // we're working with valid constants, even though we never need it. let instance = self.resolve(uv.def, uv.substs)?; - Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into()) + let cid = GlobalId { instance, promoted: None }; + let _valtree = self + .tcx + .eval_to_valtree(self.param_env.and(cid))? + .unwrap_or_else(|| bug!("unable to create ValTree for {:?}", uv)); + + Ok(self.eval_to_allocation(cid)?.into()) } - ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => { + ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => { span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c) } ty::ConstKind::Value(valtree) => { @@ -622,9 +591,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { val: &mir::ConstantKind<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { + // FIXME(const_prop): normalization needed b/c const prop lint in + // `mir_drops_elaborated_and_const_checked`, which happens before + // optimized MIR. Only after optimizing the MIR can we guarantee + // that the `RevealAll` pass has happened and that the body's consts + // are normalized, so any call to resolve before that needs to be + // manually normalized. + let val = self.tcx.normalize_erasing_regions(self.param_env, *val); match val { - mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout), - mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout), + mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout), + mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout), + mir::ConstantKind::Unevaluated(uv, _) => { + let instance = self.resolve(uv.def, uv.substs)?; + Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into()) + } } } @@ -727,7 +707,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Figure out which discriminant and variant this corresponds to. Ok(match *tag_encoding { TagEncoding::Direct => { - let scalar = tag_val.to_scalar()?; + let scalar = tag_val.to_scalar(); // Generate a specific error if `tag_val` is not an integer. // (`tag_bits` itself is only used for error messages below.) let tag_bits = scalar @@ -757,8 +737,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Return the cast value, and the index. (discr_val, index.0) } - TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => { - let tag_val = tag_val.to_scalar()?; + TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { + let tag_val = tag_val.to_scalar(); // Compute the variant this niche value/"tag" corresponds to. With niche layout, // discriminant (encoded in niche/tag) and variant index are the same. let variants_start = niche_variants.start().as_u32(); @@ -775,7 +755,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if !ptr_valid { throw_ub!(InvalidTag(dbg_val)) } - dataful_variant + untagged_variant } Ok(tag_bits) => { let tag_bits = tag_bits.assert_bits(tag_layout.size); @@ -785,9 +765,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); let variant_index_relative_val = self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; - let variant_index_relative = variant_index_relative_val - .to_scalar()? - .assert_bits(tag_val.layout.size); + let variant_index_relative = + variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); // Check if this is in the range that indicates an actual discriminant. if variant_index_relative <= u128::from(variants_end - variants_start) { let variant_index_relative = u32::try_from(variant_index_relative) @@ -806,7 +785,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(usize::try_from(variant_index).unwrap() < variants_len); VariantIdx::from_u32(variant_index) } else { - dataful_variant + untagged_variant } } }; @@ -820,12 +799,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } // Some nodes are used a lot. Make sure they don't unintentionally get bigger. -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] +#[cfg(all(target_arch = "x86_64", target_pointer_width = "64", not(bootstrap)))] mod size_asserts { use super::*; + use rustc_data_structures::static_assert_size; // These are in alphabetical order, which is easy to maintain. - rustc_data_structures::static_assert_size!(Immediate, 56); - rustc_data_structures::static_assert_size!(ImmTy<'_>, 72); - rustc_data_structures::static_assert_size!(Operand, 64); - rustc_data_structures::static_assert_size!(OpTy<'_>, 88); + static_assert_size!(Immediate, 48); + static_assert_size!(ImmTy<'_>, 64); + static_assert_size!(Operand, 56); + static_assert_size!(OpTy<'_>, 80); } diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index f9912d706..1f1d06651 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -329,21 +329,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match left.layout.ty.kind() { ty::Char => { assert_eq!(left.layout.ty, right.layout.ty); - let left = left.to_scalar()?; - let right = right.to_scalar()?; + let left = left.to_scalar(); + let right = right.to_scalar(); Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?)) } ty::Bool => { assert_eq!(left.layout.ty, right.layout.ty); - let left = left.to_scalar()?; - let right = right.to_scalar()?; + let left = left.to_scalar(); + let right = right.to_scalar(); Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?)) } ty::Float(fty) => { assert_eq!(left.layout.ty, right.layout.ty); let ty = left.layout.ty; - let left = left.to_scalar()?; - let right = right.to_scalar()?; + let left = left.to_scalar(); + let right = right.to_scalar(); Ok(match fty { FloatTy::F32 => { self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?) @@ -363,8 +363,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { right.layout.ty ); - let l = left.to_scalar()?.to_bits(left.layout.size)?; - let r = right.to_scalar()?.to_bits(right.layout.size)?; + let l = left.to_scalar().to_bits(left.layout.size)?; + let r = right.to_scalar().to_bits(right.layout.size)?; self.binary_int_op(bin_op, l, left.layout, r, right.layout) } _ if left.layout.ty.is_any_ptr() => { @@ -410,7 +410,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { use rustc_middle::mir::UnOp::*; let layout = val.layout; - let val = val.to_scalar()?; + let val = val.to_scalar(); trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty); match layout.ty.kind() { diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index f4571a1ca..b32889290 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -2,8 +2,6 @@ //! into a place. //! All high-level functions to write to memory work on places as destinations. -use std::hash::Hash; - use rustc_ast::Mutability; use rustc_middle::mir; use rustc_middle::ty; @@ -13,7 +11,7 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, Vari use super::{ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, - Pointer, Provenance, Scalar, ScalarMaybeUninit, + Pointer, Provenance, Scalar, }; #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] @@ -254,8 +252,6 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { // These are defined here because they produce a place. impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { #[inline(always)] - /// Note: do not call `as_ref` on the resulting place. This function should only be used to - /// read from the resulting mplace, not to get its address back. pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> { match **self { Operand::Indirect(mplace) => { @@ -267,8 +263,6 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { #[inline(always)] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - /// Note: do not call `as_ref` on the resulting place. This function should only be used to - /// read from the resulting mplace, not to get its address back. pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> { self.try_as_mplace().unwrap() } @@ -294,7 +288,7 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> { // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> where - Prov: Provenance + Eq + Hash + 'static, + Prov: Provenance + 'static, M: Machine<'mir, 'tcx, Provenance = Prov>, { /// Take a value, which represents a (thin or wide) reference, and make it a place. @@ -312,7 +306,7 @@ where let layout = self.layout_of(pointee_type)?; let (ptr, meta) = match **val { Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None), - Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)), + Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta)), Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)), }; @@ -467,7 +461,7 @@ where #[inline(always)] pub fn write_scalar( &mut self, - val: impl Into<ScalarMaybeUninit<M::Provenance>>, + val: impl Into<Scalar<M::Provenance>>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { self.write_immediate(Immediate::Scalar(val.into()), dest) @@ -644,9 +638,9 @@ where // Let us see if the layout is simple so we take a shortcut, // avoid force_allocation. - let src = match self.read_immediate_raw(src, /*force*/ false)? { + let src = match self.read_immediate_raw(src)? { Ok(src_val) => { - assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); + assert!(!src.layout.is_unsized(), "cannot copy unsized immediates"); assert!( !dest.layout.is_unsized(), "the src is sized, so the dest must also be sized" @@ -823,7 +817,7 @@ where } abi::Variants::Multiple { tag_encoding: - TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, + TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, tag: tag_layout, tag_field, .. @@ -831,7 +825,7 @@ where // No need to validate that the discriminant here because the // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. - if variant_index != dataful_variant { + if variant_index != untagged_variant { let variants_start = niche_variants.start().as_u32(); let variant_index_relative = variant_index .as_u32() @@ -891,10 +885,13 @@ where #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] mod size_asserts { use super::*; + use rustc_data_structures::static_assert_size; // These are in alphabetical order, which is easy to maintain. - rustc_data_structures::static_assert_size!(MemPlaceMeta, 24); - rustc_data_structures::static_assert_size!(MemPlace, 40); - rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64); - rustc_data_structures::static_assert_size!(Place, 48); - rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72); + static_assert_size!(MemPlaceMeta, 24); + static_assert_size!(MemPlace, 40); + static_assert_size!(MPlaceTy<'_>, 64); + #[cfg(not(bootstrap))] + static_assert_size!(Place, 40); + #[cfg(not(bootstrap))] + static_assert_size!(PlaceTy<'_>, 64); } diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 742339f2b..77da8f104 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -1,14 +1,12 @@ //! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy. //! -//! OpTy and PlaceTy genrally work by "let's see if we are actually an MPlaceTy, and do something custom if not". +//! OpTy and PlaceTy generally work by "let's see if we are actually an MPlaceTy, and do something custom if not". //! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway. //! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields), //! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial, //! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually //! implement the logic on OpTy, and MPlaceTy calls that. -use std::hash::Hash; - use rustc_middle::mir; use rustc_middle::ty; use rustc_middle::ty::layout::LayoutOf; @@ -22,7 +20,7 @@ use super::{ // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> where - Prov: Provenance + Eq + Hash + 'static, + Prov: Provenance + 'static, M: Machine<'mir, 'tcx, Provenance = Prov>, { //# Field access @@ -100,6 +98,8 @@ where // This makes several assumptions about what layouts we will encounter; we match what // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`). let field_val: Immediate<_> = match (*base, base.layout.abi) { + // if the entire value is uninit, then so is the field (can happen in ConstProp) + (Immediate::Uninit, _) => Immediate::Uninit, // the field contains no information, can be left uninit _ if field_layout.is_zst() => Immediate::Uninit, // the field covers the entire type @@ -124,6 +124,7 @@ where b_val }) } + // everything else is a bug _ => span_bug!( self.cur_span(), "invalid field access on immediate {}, layout {:#?}", diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index fea158a9f..c6e04cbfb 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -53,7 +53,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.pop_stack_frame(/* unwinding */ true)?; return Ok(true); }; - let basic_block = &self.body().basic_blocks()[loc.block]; + let basic_block = &self.body().basic_blocks[loc.block]; if let Some(stmt) = basic_block.statements.get(loc.statement_index) { let old_frames = self.frame_idx(); @@ -114,13 +114,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { M::retag(self, *kind, &dest)?; } - // Call CopyNonOverlapping - CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => { - let src = self.eval_operand(src, None)?; - let dst = self.eval_operand(dst, None)?; - let count = self.eval_operand(count, None)?; - self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?; - } + Intrinsic(box ref intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?, // Statements we do not track. AscribeUserType(..) => {} @@ -251,8 +245,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Len(place) => { let src = self.eval_place(place)?; - let mplace = self.force_allocation(&src)?; - let len = mplace.len(self)?; + let op = self.place_to_op(&src)?; + let len = op.len(self)?; self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?; } diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index d563e35f9..50a82aa0e 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -1,5 +1,6 @@ use std::borrow::Cow; +use rustc_ast::ast::InlineAsmOptions; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; use rustc_middle::ty::Instance; use rustc_middle::{ @@ -129,8 +130,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond_val = - self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?; + let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?; if expected == cond_val { self.go_to_block(target); } else { @@ -167,8 +167,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { terminator.kind ), - // Inline assembly can't be interpreted. - InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"), + InlineAsm { template, ref operands, options, destination, .. } => { + M::eval_inline_asm(self, template, operands, options)?; + if options.contains(InlineAsmOptions::NORETURN) { + throw_ub_format!("returned from noreturn inline assembly"); + } + self.go_to_block( + destination + .expect("InlineAsm terminators without noreturn must have a destination"), + ) + } } Ok(()) @@ -215,12 +223,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => false, } }; - // Padding must be fully equal. - let pad_compat = || caller_abi.pad == callee_abi.pad; // When comparing the PassMode, we have to be smart about comparing the attributes. - let arg_attr_compat = |a1: ArgAttributes, a2: ArgAttributes| { + let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| { // There's only one regular attribute that matters for the call ABI: InReg. - // Everything else is things like noalias, dereferencable, nonnull, ... + // Everything else is things like noalias, dereferenceable, nonnull, ... // (This also applies to pointee_size, pointee_align.) if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg) { @@ -233,13 +239,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } return true; }; - let mode_compat = || match (caller_abi.mode, callee_abi.mode) { + let mode_compat = || match (&caller_abi.mode, &callee_abi.mode) { (PassMode::Ignore, PassMode::Ignore) => true, (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2), (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => { arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2) } - (PassMode::Cast(c1), PassMode::Cast(c2)) => c1 == c2, + (PassMode::Cast(c1, pad1), PassMode::Cast(c2, pad2)) => c1 == c2 && pad1 == pad2, ( PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 }, PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 }, @@ -251,7 +257,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => false, }; - if layout_compat() && pad_compat() && mode_compat() { + if layout_compat() && mode_compat() { return true; } trace!( @@ -534,7 +540,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let mut non_zst_field = None; for i in 0..receiver.layout.fields.count() { let field = self.operand_field(&receiver, i)?; - if !field.layout.is_zst() { + let zst = + field.layout.is_zst() && field.layout.align.abi.bytes() == 1; + if !zst { assert!( non_zst_field.is_none(), "multiple non-ZST fields in dyn receiver type {}", @@ -557,7 +565,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .tcx .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env); let ty::Dynamic(data, ..) = receiver_tail.kind() else { - span_bug!(self.cur_span(), "dyanmic call on non-`dyn` type {}", receiver_tail) + span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail) }; // Get the required information from the vtable. diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs index b3a511d5a..cab23b724 100644 --- a/compiler/rustc_const_eval/src/interpret/traits.rs +++ b/compiler/rustc_const_eval/src/interpret/traits.rs @@ -32,7 +32,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(vtable_ptr.into()) } - /// Returns a high-level representation of the entires of the given vtable. + /// Returns a high-level representation of the entries of the given vtable. pub fn get_vtable_entries( &self, vtable: Pointer<Option<M::Provenance>>, diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 0e50d1ed4..14aaee6ac 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -5,9 +5,10 @@ //! to be const-safe. use std::convert::TryFrom; -use std::fmt::Write; +use std::fmt::{Display, Write}; use std::num::NonZeroUsize; +use rustc_ast::Mutability; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_middle::mir::interpret::InterpError; @@ -19,9 +20,11 @@ use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, Wr use std::hash::Hash; +// for the validation errors +use super::UndefinedBehaviorInfo::*; use super::{ - alloc_range, CheckInAllocMsg, GlobalAlloc, Immediate, InterpCx, InterpResult, MPlaceTy, - Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor, + CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, + MemPlaceMeta, OpTy, Scalar, ValueVisitor, }; macro_rules! throw_validation_failure { @@ -59,6 +62,7 @@ macro_rules! throw_validation_failure { /// }); /// ``` /// +/// The patterns must be of type `UndefinedBehaviorInfo`. /// An additional expected parameter can also be added to the failure message: /// /// ``` @@ -86,7 +90,7 @@ macro_rules! try_validation { // allocation here as this can only slow down builds that fail anyway. Err(e) => match e.kind() { $( - $($p)|+ => + InterpError::UndefinedBehavior($($p)|+) => throw_validation_failure!( $where, { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )? @@ -304,6 +308,26 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' Ok(r) } + fn read_immediate( + &self, + op: &OpTy<'tcx, M::Provenance>, + expected: impl Display, + ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { + Ok(try_validation!( + self.ecx.read_immediate(op), + self.path, + InvalidUninitBytes(None) => { "uninitialized memory" } expected { "{expected}" } + )) + } + + fn read_scalar( + &self, + op: &OpTy<'tcx, M::Provenance>, + expected: impl Display, + ) -> InterpResult<'tcx, Scalar<M::Provenance>> { + Ok(self.read_immediate(op, expected)?.to_scalar()) + } + fn check_wide_ptr_meta( &mut self, meta: MemPlaceMeta<M::Provenance>, @@ -317,8 +341,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' let (_ty, _trait) = try_validation!( self.ecx.get_ptr_vtable(vtable), self.path, - err_ub!(DanglingIntPointer(..)) | - err_ub!(InvalidVTablePointer(..)) => + DanglingIntPointer(..) | + InvalidVTablePointer(..) => { "{vtable}" } expected { "a vtable pointer" }, ); // FIXME: check if the type/trait match what ty::Dynamic says? @@ -344,14 +368,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' value: &OpTy<'tcx, M::Provenance>, kind: &str, ) -> InterpResult<'tcx> { - let value = self.ecx.read_immediate(value)?; + let place = + self.ecx.ref_to_mplace(&self.read_immediate(value, format_args!("a {kind}"))?)?; // Handle wide pointers. // Check metadata early, for better diagnostics - let place = try_validation!( - self.ecx.ref_to_mplace(&value), - self.path, - err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind }, - ); if place.layout.is_unsized() { self.check_wide_ptr_meta(place.meta, place.layout)?; } @@ -359,7 +379,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' let size_and_align = try_validation!( self.ecx.size_and_align_of_mplace(&place), self.path, - err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg }, + InvalidMeta(msg) => { "invalid {} metadata: {}", kind, msg }, ); let (size, align) = size_and_align // for the purpose of validity, consider foreign types to have @@ -375,21 +395,21 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message ), self.path, - err_ub!(AlignmentCheckFailed { required, has }) => + AlignmentCheckFailed { required, has } => { "an unaligned {kind} (required {} byte alignment but found {})", required.bytes(), has.bytes() }, - err_ub!(DanglingIntPointer(0, _)) => + DanglingIntPointer(0, _) => { "a null {kind}" }, - err_ub!(DanglingIntPointer(i, _)) => + DanglingIntPointer(i, _) => { "a dangling {kind} (address {i:#x} is unallocated)" }, - err_ub!(PointerOutOfBounds { .. }) => + PointerOutOfBounds { .. } => { "a dangling {kind} (going beyond the bounds of its allocation)" }, // This cannot happen during const-eval (because interning already detects // dangling pointers), but it can happen in Miri. - err_ub!(PointerUseAfterFree(..)) => + PointerUseAfterFree(..) => { "a dangling {kind} (use-after-free)" }, ); // Do not allow pointers to uninhabited types. @@ -403,34 +423,51 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // Proceed recursively even for ZST, no reason to skip them! // `!` is a ZST and we want to validate it. if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) { - // Special handling for pointers to statics (irrespective of their type). + // Let's see what kind of memory this points to. let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id); - if let Some(GlobalAlloc::Static(did)) = alloc_kind { - assert!(!self.ecx.tcx.is_thread_local_static(did)); - assert!(self.ecx.tcx.is_static(did)); - if matches!( - self.ctfe_mode, - Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. }) - ) { - // See const_eval::machine::MemoryExtra::can_access_statics for why - // this check is so important. - // This check is reachable when the const just referenced the static, - // but never read it (so we never entered `before_access_global`). - throw_validation_failure!(self.path, - { "a {} pointing to a static variable", kind } - ); + match alloc_kind { + Some(GlobalAlloc::Static(did)) => { + // Special handling for pointers to statics (irrespective of their type). + assert!(!self.ecx.tcx.is_thread_local_static(did)); + assert!(self.ecx.tcx.is_static(did)); + if matches!( + self.ctfe_mode, + Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. }) + ) { + // See const_eval::machine::MemoryExtra::can_access_statics for why + // this check is so important. + // This check is reachable when the const just referenced the static, + // but never read it (so we never entered `before_access_global`). + throw_validation_failure!(self.path, + { "a {} pointing to a static variable in a constant", kind } + ); + } + // We skip recursively checking other statics. These statics must be sound by + // themselves, and the only way to get broken statics here is by using + // unsafe code. + // The reasons we don't check other statics is twofold. For one, in all + // sound cases, the static was already validated on its own, and second, we + // trigger cycle errors if we try to compute the value of the other static + // and that static refers back to us. + // We might miss const-invalid data, + // but things are still sound otherwise (in particular re: consts + // referring to statics). + return Ok(()); } - // We skip checking other statics. These statics must be sound by - // themselves, and the only way to get broken statics here is by using - // unsafe code. - // The reasons we don't check other statics is twofold. For one, in all - // sound cases, the static was already validated on its own, and second, we - // trigger cycle errors if we try to compute the value of the other static - // and that static refers back to us. - // We might miss const-invalid data, - // but things are still sound otherwise (in particular re: consts - // referring to statics). - return Ok(()); + Some(GlobalAlloc::Memory(alloc)) => { + if alloc.inner().mutability == Mutability::Mut + && matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. })) + { + // This should be unreachable, but if someone manages to copy a pointer + // out of a `static`, then that pointer might point to mutable memory, + // and we would catch that here. + throw_validation_failure!(self.path, + { "a {} pointing to mutable memory in a constant", kind } + ); + } + } + // Nothing to check for these. + None | Some(GlobalAlloc::Function(..) | GlobalAlloc::VTable(..)) => {} } } let path = &self.path; @@ -446,20 +483,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' Ok(()) } - fn read_scalar( - &self, - op: &OpTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> { - self.ecx.read_scalar(op) - } - - fn read_immediate_forced( - &self, - op: &OpTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, Immediate<M::Provenance>> { - Ok(*self.ecx.read_immediate_raw(op, /*force*/ true)?.unwrap()) - } - /// Check if this is a value of primitive type, and if yes check the validity of the value /// at that type. Return `true` if the type is indeed primitive. fn try_visit_primitive( @@ -470,41 +493,39 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' let ty = value.layout.ty; match ty.kind() { ty::Bool => { - let value = self.read_scalar(value)?; + let value = self.read_scalar(value, "a boolean")?; try_validation!( value.to_bool(), self.path, - err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) => + InvalidBool(..) => { "{:x}", value } expected { "a boolean" }, ); Ok(true) } ty::Char => { - let value = self.read_scalar(value)?; + let value = self.read_scalar(value, "a unicode scalar value")?; try_validation!( value.to_char(), self.path, - err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) => + InvalidChar(..) => { "{:x}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" }, ); Ok(true) } ty::Float(_) | ty::Int(_) | ty::Uint(_) => { - let value = self.read_scalar(value)?; // NOTE: Keep this in sync with the array optimization for int/float // types below! - if M::enforce_number_init(self.ecx) { - try_validation!( - value.check_init(), - self.path, - err_ub!(InvalidUninitBytes(..)) => - { "{:x}", value } expected { "initialized bytes" } - ); - } + let value = self.read_scalar( + value, + if matches!(ty.kind(), ty::Float(..)) { + "a floating point number" + } else { + "an integer" + }, + )?; // As a special exception we *do* match on a `Scalar` here, since we truly want // to know its underlying representation (and *not* cast it to an integer). - let is_ptr = value.check_init().map_or(false, |v| matches!(v, Scalar::Ptr(..))); - if is_ptr { + if matches!(value, Scalar::Ptr(..)) { throw_validation_failure!(self.path, { "{:x}", value } expected { "plain (non-pointer) bytes" } ) @@ -515,11 +536,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // We are conservative with uninit for integers, but try to // actually enforce the strict rules for raw pointers (mostly because // that lets us re-use `ref_to_mplace`). - let place = try_validation!( - self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)), - self.path, - err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" }, - ); + let place = + self.ecx.ref_to_mplace(&self.read_immediate(value, "a raw pointer")?)?; if place.layout.is_unsized() { self.check_wide_ptr_meta(place.meta, place.layout)?; } @@ -527,7 +545,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } ty::Ref(_, ty, mutbl) => { if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. })) - && *mutbl == hir::Mutability::Mut + && *mutbl == Mutability::Mut { // A mutable reference inside a const? That does not seem right (except if it is // a ZST). @@ -540,11 +558,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' Ok(true) } ty::FnPtr(_sig) => { - let value = try_validation!( - self.ecx.read_scalar(value).and_then(|v| v.check_init()), - self.path, - err_ub!(InvalidUninitBytes(None)) => { "uninitialized bytes" } expected { "a proper pointer or integer value" }, - ); + let value = self.read_scalar(value, "a function pointer")?; // If we check references recursively, also check that this points to a function. if let Some(_) = self.ref_tracking { @@ -552,8 +566,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' let _fn = try_validation!( self.ecx.get_ptr_fn(ptr), self.path, - err_ub!(DanglingIntPointer(..)) | - err_ub!(InvalidFunctionPointer(..)) => + DanglingIntPointer(..) | + InvalidFunctionPointer(..) => { "{ptr}" } expected { "a function pointer" }, ); // FIXME: Check if the signature matches @@ -595,40 +609,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' fn visit_scalar( &mut self, - scalar: ScalarMaybeUninit<M::Provenance>, + scalar: Scalar<M::Provenance>, scalar_layout: ScalarAbi, ) -> InterpResult<'tcx> { - // We check `is_full_range` in a slightly complicated way because *if* we are checking - // number validity, then we want to ensure that `Scalar::Initialized` is indeed initialized, - // i.e. that we go over the `check_init` below. let size = scalar_layout.size(self.ecx); - let is_full_range = match scalar_layout { - ScalarAbi::Initialized { .. } => { - if M::enforce_number_init(self.ecx) { - false // not "full" since uninit is not accepted - } else { - scalar_layout.is_always_valid(self.ecx) - } - } - ScalarAbi::Union { .. } => true, - }; - if is_full_range { - // Nothing to check. Cruciall we don't even `read_scalar` until here, since that would - // fail for `Union` scalars! - return Ok(()); - } - // We have something to check: it must at least be initialized. let valid_range = scalar_layout.valid_range(self.ecx); let WrappingRange { start, end } = valid_range; let max_value = size.unsigned_int_max(); assert!(end <= max_value); - let value = try_validation!( - scalar.check_init(), - self.path, - err_ub!(InvalidUninitBytes(None)) => { "{:x}", scalar } - expected { "something {}", wrapping_range_format(valid_range, max_value) }, - ); - let bits = match value.try_to_int() { + let bits = match scalar.try_to_int() { Ok(int) => int.assert_bits(size), Err(_) => { // So this is a pointer then, and casting to an int failed. @@ -636,7 +625,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // We support 2 kinds of ranges here: full range, and excluding zero. if start == 1 && end == max_value { // Only null is the niche. So make sure the ptr is NOT null. - if self.ecx.scalar_may_be_null(value)? { + if self.ecx.scalar_may_be_null(scalar)? { throw_validation_failure!(self.path, { "a potentially null pointer" } expected { @@ -693,9 +682,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> Ok(try_validation!( this.ecx.read_discriminant(op), this.path, - err_ub!(InvalidTag(val)) => + InvalidTag(val) => { "{:x}", val } expected { "a valid enum tag" }, - err_ub!(InvalidUninitBytes(None)) => + InvalidUninitBytes(None) => { "uninitialized bytes" } expected { "a valid enum tag" }, ) .1) @@ -788,10 +777,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> ); } Abi::Scalar(scalar_layout) => { - // We use a 'forced' read because we always need a `Immediate` here - // and treating "partially uninit" as "fully uninit" is fine for us. - let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit(); - self.visit_scalar(scalar, scalar_layout)?; + if !scalar_layout.is_uninit_valid() { + // There is something to check here. + let scalar = self.read_scalar(op, "initiailized scalar value")?; + self.visit_scalar(scalar, scalar_layout)?; + } } Abi::ScalarPair(a_layout, b_layout) => { // There is no `rustc_layout_scalar_valid_range_start` for pairs, so @@ -799,10 +789,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // but that can miss bugs in layout computation. Layout computation // is subtle due to enums having ScalarPair layout, where one field // is the discriminant. - if cfg!(debug_assertions) { - // We use a 'forced' read because we always need a `Immediate` here - // and treating "partially uninit" as "fully uninit" is fine for us. - let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair(); + if cfg!(debug_assertions) + && !a_layout.is_uninit_valid() + && !b_layout.is_uninit_valid() + { + // We can only proceed if *both* scalars need to be initialized. + // FIXME: find a way to also check ScalarPair when one side can be uninit but + // the other must be init. + let (a, b) = + self.read_immediate(op, "initiailized scalar value")?.to_scalar_pair(); self.visit_scalar(a, a_layout)?; self.visit_scalar(b, b_layout)?; } @@ -830,9 +825,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate let len = mplace.len(self.ecx)?; try_validation!( - self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)), + self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)), self.path, - err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" }, + InvalidUninitBytes(..) => { "uninitialized data in `str`" }, ); } ty::Array(tys, ..) | ty::Slice(tys) @@ -880,13 +875,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // We also accept uninit, for consistency with the slow path. let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0"); - match alloc.check_bytes( - alloc_range(Size::ZERO, size), - /*allow_uninit*/ !M::enforce_number_init(self.ecx), - /*allow_ptr*/ false, - ) { + match alloc.get_bytes_strip_provenance() { // In the happy case, we needn't check anything else. - Ok(()) => {} + Ok(_) => {} // Some error happened, try to provide a more detailed description. Err(err) => { // For some errors we might be able to provide extra information. @@ -981,6 +972,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// It will error if the bits at the destination do not match the ones described by the layout. #[inline(always)] pub fn validate_operand(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> { + // Note that we *could* actually be in CTFE here with `-Zextra-const-ub-checks`, but it's + // still correct to not use `ctfe_mode`: that mode is for validation of the final constant + // value, it rules out things like `UnsafeCell` in awkward places. It also can make checking + // recurse through references which, for now, we don't want here, either. self.validate_operand_internal(op, vec![], None, None) } } diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index 72ac6af68..9f47d302a 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -10,7 +10,7 @@ Rust MIR: a lowered representation of Rust. #![feature(decl_macro)] #![feature(exact_size_is_empty)] #![feature(let_chains)] -#![feature(let_else)] +#![cfg_attr(bootstrap, feature(let_else))] #![feature(map_try_insert)] #![feature(min_specialization)] #![feature(slice_ptr_get)] diff --git a/compiler/rustc_const_eval/src/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/might_permit_raw_init.rs index f971c2238..37ffa19cc 100644 --- a/compiler/rustc_const_eval/src/might_permit_raw_init.rs +++ b/compiler/rustc_const_eval/src/might_permit_raw_init.rs @@ -13,7 +13,11 @@ pub fn might_permit_raw_init<'tcx>( let strict = tcx.sess.opts.unstable_opts.strict_init_checks; if strict { - let machine = CompileTimeInterpreter::new(Limit::new(0), false); + let machine = CompileTimeInterpreter::new( + Limit::new(0), + /*can_access_statics:*/ false, + /*check_alignment:*/ true, + ); let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine); diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs index 0adb88a18..7e15858c8 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs @@ -135,7 +135,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { // qualifs for the return type. let return_block = ccx .body - .basic_blocks() + .basic_blocks .iter_enumerated() .find(|(_, block)| matches!(block.terminator().kind, TerminatorKind::Return)) .map(|(bb, _)| bb); @@ -546,6 +546,10 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { // Since no pointer can ever get exposed (rejected above), this is easy to support. } + Rvalue::Cast(CastKind::DynStar, _, _) => { + unimplemented!() + } + Rvalue::Cast(CastKind::Misc, _, _) => {} Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {} @@ -678,7 +682,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { | StatementKind::Retag { .. } | StatementKind::AscribeUserType(..) | StatementKind::Coverage(..) - | StatementKind::CopyNonOverlapping(..) + | StatementKind::Intrinsic(..) | StatementKind::Nop => {} } } diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs index 338022616..5fb4bf638 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs @@ -1,6 +1,7 @@ //! Concrete error types for all operations which may be invalid in a certain const context. use hir::def_id::LocalDefId; +use hir::ConstContext; use rustc_errors::{ error_code, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, }; @@ -23,8 +24,11 @@ use rustc_trait_selection::traits::SelectionContext; use super::ConstCx; use crate::errors::{ - MutDerefErr, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr, - TransientMutBorrowErr, TransientMutBorrowErrRaw, + InteriorMutabilityBorrow, InteriorMutableDataRefer, MutDerefErr, NonConstFmtMacroCall, + NonConstFnCall, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr, + TransientMutBorrowErr, TransientMutBorrowErrRaw, UnallowedFnPointerCall, + UnallowedHeapAllocations, UnallowedInlineAsm, UnallowedMutableRefs, UnallowedMutableRefsRaw, + UnallowedOpInConstContext, UnstableConstFn, }; use crate::util::{call_kind, CallDesugaringKind, CallKind}; @@ -96,10 +100,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect { ccx: &ConstCx<'_, 'tcx>, span: Span, ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - ccx.tcx.sess.struct_span_err( - span, - &format!("function pointer calls are not allowed in {}s", ccx.const_kind()), - ) + ccx.tcx.sess.create_err(UnallowedFnPointerCall { span, kind: ccx.const_kind() }) } } @@ -307,22 +308,13 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { err } _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => { - struct_span_err!( - ccx.tcx.sess, - span, - E0015, - "cannot call non-const formatting macro in {}s", - ccx.const_kind(), - ) + ccx.tcx.sess.create_err(NonConstFmtMacroCall { span, kind: ccx.const_kind() }) } - _ => struct_span_err!( - ccx.tcx.sess, + _ => ccx.tcx.sess.create_err(NonConstFnCall { span, - E0015, - "cannot call non-const fn `{}` in {}s", - ccx.tcx.def_path_str_with_substs(callee, substs), - ccx.const_kind(), - ), + def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs), + kind: ccx.const_kind(), + }), }; err.note(&format!( @@ -331,6 +323,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { ccx.const_kind(), )); + if let ConstContext::Static(_) = ccx.const_kind() { + err.note("consider wrapping this expression in `Lazy::new(|| ...)` from the `once_cell` crate: https://crates.io/crates/once_cell"); + } + err } } @@ -349,10 +345,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable { ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { let FnCallUnstable(def_id, feature) = *self; - let mut err = ccx.tcx.sess.struct_span_err( - span, - &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)), - ); + let mut err = ccx + .tcx + .sess + .create_err(UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) }); if ccx.is_const_stable_const_fn() { err.help("const-stable functions can only call other const-stable functions"); @@ -387,9 +383,12 @@ impl<'tcx> NonConstOp<'tcx> for Generator { ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind()); if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 { - feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg) + ccx.tcx.sess.create_feature_err( + UnallowedOpInConstContext { span, msg }, + sym::const_async_blocks, + ) } else { - ccx.tcx.sess.struct_span_err(span, &msg) + ccx.tcx.sess.create_err(UnallowedOpInConstContext { span, msg }) } } } @@ -402,23 +401,11 @@ impl<'tcx> NonConstOp<'tcx> for HeapAllocation { ccx: &ConstCx<'_, 'tcx>, span: Span, ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - let mut err = struct_span_err!( - ccx.tcx.sess, + ccx.tcx.sess.create_err(UnallowedHeapAllocations { span, - E0010, - "allocations are not allowed in {}s", - ccx.const_kind() - ); - err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind())); - if ccx.tcx.sess.teach(&err.get_code().unwrap()) { - err.note( - "The value of statics and constants must be known at compile time, \ - and they live for the entire lifetime of a program. Creating a boxed \ - value allocates memory on the heap at runtime, and therefore cannot \ - be done at compile time.", - ); - } - err + kind: ccx.const_kind(), + teach: ccx.tcx.sess.teach(&error_code!(E0010)).then_some(()), + }) } } @@ -430,13 +417,7 @@ impl<'tcx> NonConstOp<'tcx> for InlineAsm { ccx: &ConstCx<'_, 'tcx>, span: Span, ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - struct_span_err!( - ccx.tcx.sess, - span, - E0015, - "inline assembly is not allowed in {}s", - ccx.const_kind() - ) + ccx.tcx.sess.create_err(UnallowedInlineAsm { span, kind: ccx.const_kind() }) } } @@ -482,12 +463,7 @@ impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow { ccx: &ConstCx<'_, 'tcx>, span: Span, ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - feature_err( - &ccx.tcx.sess.parse_sess, - sym::const_refs_to_cell, - span, - "cannot borrow here, since the borrowed element may contain interior mutability", - ) + ccx.tcx.sess.create_feature_err(InteriorMutabilityBorrow { span }, sym::const_refs_to_cell) } } @@ -502,32 +478,22 @@ impl<'tcx> NonConstOp<'tcx> for CellBorrow { ccx: &ConstCx<'_, 'tcx>, span: Span, ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - let mut err = struct_span_err!( - ccx.tcx.sess, - span, - E0492, - "{}s cannot refer to interior mutable data", - ccx.const_kind(), - ); - err.span_label( - span, - "this borrow of an interior mutable value may end up in the final value", - ); + // FIXME: Maybe a more elegant solution to this if else case if let hir::ConstContext::Static(_) = ccx.const_kind() { - err.help( - "to fix this, the value can be extracted to a separate \ - `static` item and then referenced", - ); - } - if ccx.tcx.sess.teach(&err.get_code().unwrap()) { - err.note( - "A constant containing interior mutable data behind a reference can allow you - to modify that data. This would make multiple uses of a constant to be able to - see different values and allow circumventing the `Send` and `Sync` requirements - for shared mutable data, which is unsound.", - ); + ccx.tcx.sess.create_err(InteriorMutableDataRefer { + span, + opt_help: Some(()), + kind: ccx.const_kind(), + teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()), + }) + } else { + ccx.tcx.sess.create_err(InteriorMutableDataRefer { + span, + opt_help: None, + kind: ccx.const_kind(), + teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()), + }) } - err } } @@ -553,33 +519,18 @@ impl<'tcx> NonConstOp<'tcx> for MutBorrow { ccx: &ConstCx<'_, 'tcx>, span: Span, ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - let raw = match self.0 { - hir::BorrowKind::Raw => "raw ", - hir::BorrowKind::Ref => "", - }; - - let mut err = struct_span_err!( - ccx.tcx.sess, - span, - E0764, - "{}mutable references are not allowed in the final value of {}s", - raw, - ccx.const_kind(), - ); - - if ccx.tcx.sess.teach(&err.get_code().unwrap()) { - err.note( - "References in statics and constants may only refer \ - to immutable values.\n\n\ - Statics are shared everywhere, and if they refer to \ - mutable data one might violate memory safety since \ - holding multiple mutable references to shared data \ - is not allowed.\n\n\ - If you really want global mutable state, try using \ - static mut or a global UnsafeCell.", - ); + match self.0 { + hir::BorrowKind::Raw => ccx.tcx.sess.create_err(UnallowedMutableRefsRaw { + span, + kind: ccx.const_kind(), + teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()), + }), + hir::BorrowKind::Ref => ccx.tcx.sess.create_err(UnallowedMutableRefs { + span, + kind: ccx.const_kind(), + teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()), + }), } - err } } diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs index c8a63c9c3..6c73ef5a8 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs @@ -5,12 +5,11 @@ use rustc_errors::ErrorGuaranteed; use rustc_hir::LangItem; use rustc_infer::infer::TyCtxtInferExt; -use rustc_infer::traits::TraitEngine; use rustc_middle::mir::*; use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty}; use rustc_span::DUMMY_SP; use rustc_trait_selection::traits::{ - self, ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngineExt, + self, ImplSource, Obligation, ObligationCause, SelectionContext, }; use super::ConstCx; @@ -189,15 +188,8 @@ impl Qualif for NeedsNonConstDrop { return false; } - // If we successfully found one, then select all of the predicates - // implied by our const drop impl. - let mut fcx = <dyn TraitEngine<'tcx>>::new(cx.tcx); - for nested in impl_src.nested_obligations() { - fcx.register_predicate_obligation(&infcx, nested); - } - // If we had any errors, then it's bad - !fcx.select_all_or_error(&infcx).is_empty() + !traits::fully_solve_obligations(&infcx, impl_src.nested_obligations()).is_empty() }) } @@ -354,31 +346,43 @@ where }; // Check the qualifs of the value of `const` items. - if let Some(ct) = constant.literal.const_for_ty() { - if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted }) = ct.kind() - { - // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible - // only for `NeedsNonConstDrop` with precise drop checking. This is the only const - // check performed after the promotion. Verify that with an assertion. - assert!(promoted.is_none() || Q::ALLOW_PROMOTED); - // Don't peek inside trait associated constants. - if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() { - let qualifs = if let Some((did, param_did)) = def.as_const_arg() { - cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did)) - } else { - cx.tcx.at(constant.span).mir_const_qualif(def.did) - }; - - if !Q::in_qualifs(&qualifs) { - return false; - } + // FIXME(valtrees): check whether const qualifs should behave the same + // way for type and mir constants. + let uneval = match constant.literal { + ConstantKind::Ty(ct) if matches!(ct.kind(), ty::ConstKind::Unevaluated(_)) => { + let ty::ConstKind::Unevaluated(uv) = ct.kind() else { unreachable!() }; + + Some(uv.expand()) + } + ConstantKind::Ty(_) => None, + ConstantKind::Unevaluated(uv, _) => Some(uv), + ConstantKind::Val(..) => None, + }; - // Just in case the type is more specific than - // the definition, e.g., impl associated const - // with type parameters, take it into account. + if let Some(ty::Unevaluated { def, substs: _, promoted }) = uneval { + // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible + // only for `NeedsNonConstDrop` with precise drop checking. This is the only const + // check performed after the promotion. Verify that with an assertion. + assert!(promoted.is_none() || Q::ALLOW_PROMOTED); + + // Don't peek inside trait associated constants. + if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() { + let qualifs = if let Some((did, param_did)) = def.as_const_arg() { + cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did)) + } else { + cx.tcx.at(constant.span).mir_const_qualif(def.did) + }; + + if !Q::in_qualifs(&qualifs) { + return false; } + + // Just in case the type is more specific than + // the definition, e.g., impl associated const + // with type parameters, take it into account. } } + // Otherwise use the qualifs of the type. Q::in_any_value_of_ty(cx, constant.literal.ty()) } diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs index ed4d8c95d..f7a7cc88a 100644 --- a/compiler/rustc_const_eval/src/transform/promote_consts.rs +++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs @@ -41,7 +41,7 @@ pub struct PromoteTemps<'tcx> { impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> { fn phase_change(&self) -> Option<MirPhase> { - Some(MirPhase::ConstsPromoted) + Some(MirPhase::Analysis(AnalysisPhase::Initial)) } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { @@ -710,7 +710,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { } fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) { - let last = self.promoted.basic_blocks().last().unwrap(); + let last = self.promoted.basic_blocks.last().unwrap(); let data = &mut self.promoted[last]; data.statements.push(Statement { source_info: SourceInfo::outermost(span), @@ -803,7 +803,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { self.visit_operand(arg, loc); } - let last = self.promoted.basic_blocks().last().unwrap(); + let last = self.promoted.basic_blocks.last().unwrap(); let new_target = self.new_block(); *self.promoted[last].terminator_mut() = Terminator { @@ -839,27 +839,16 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let mut promoted_operand = |ty, span| { promoted.span = span; promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span); - let _const = tcx.mk_const(ty::ConstS { - ty, - kind: ty::ConstKind::Unevaluated(ty::Unevaluated { - def, - substs: InternalSubsts::for_item(tcx, def.did, |param, _| { - if let ty::GenericParamDefKind::Lifetime = param.kind { - tcx.lifetimes.re_erased.into() - } else { - tcx.mk_param_from_def(param) - } - }), - promoted: Some(promoted_id), - }), - }); + let substs = tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def.did)); + let uneval = ty::Unevaluated { def, substs, promoted: Some(promoted_id) }; Operand::Constant(Box::new(Constant { span, user_ty: None, - literal: ConstantKind::from_const(_const, tcx), + literal: ConstantKind::Unevaluated(uneval, ty), })) }; + let blocks = self.source.basic_blocks.as_mut(); let local_decls = &mut self.source.local_decls; let loc = candidate.location; @@ -969,7 +958,7 @@ pub fn promote_candidates<'tcx>( let mut scope = body.source_scopes[body.source_info(candidate.location).scope].clone(); scope.parent_scope = None; - let promoted = Body::new( + let mut promoted = Body::new( body.source, // `promoted` gets filled in below IndexVec::new(), IndexVec::from_elem_n(scope, 1), @@ -981,6 +970,7 @@ pub fn promote_candidates<'tcx>( body.generator_kind(), body.tainted_by_errors, ); + promoted.phase = MirPhase::Analysis(AnalysisPhase::Initial); let promoter = Promoter { promoted, @@ -1046,7 +1036,7 @@ pub fn is_const_fn_in_array_repeat_expression<'tcx>( _ => {} } - for block in body.basic_blocks() { + for block in body.basic_blocks.iter() { if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) = &block.terminator { diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs index 15e820f2d..4aa98cb13 100644 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ b/compiler/rustc_const_eval/src/transform/validate.rs @@ -7,9 +7,10 @@ use rustc_middle::mir::interpret::Scalar; use rustc_middle::mir::visit::NonUseContext::VarDebugInfo; use rustc_middle::mir::visit::{PlaceContext, Visitor}; use rustc_middle::mir::{ - traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, Local, Location, - MirPass, MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope, - Statement, StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK, + traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping, + Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef, + ProjectionElem, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator, + TerminatorKind, UnOp, START_BLOCK, }; use rustc_middle::ty::fold::BottomUpFolder; use rustc_middle::ty::subst::Subst; @@ -89,9 +90,8 @@ pub fn equal_up_to_regions<'tcx>( // Normalize lifetimes away on both sides, then compare. let normalize = |ty: Ty<'tcx>| { - tcx.normalize_erasing_regions( - param_env, - ty.fold_with(&mut BottomUpFolder { + tcx.try_normalize_erasing_regions(param_env, ty).unwrap_or(ty).fold_with( + &mut BottomUpFolder { tcx, // FIXME: We erase all late-bound lifetimes, but this is not fully correct. // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`, @@ -103,7 +103,7 @@ pub fn equal_up_to_regions<'tcx>( // Leave consts and types unchanged. ct_op: |ct| ct, ty_op: |ty| ty, - }), + }, ) }; tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok()) @@ -142,8 +142,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { if bb == START_BLOCK { self.fail(location, "start block must not have predecessors") } - if let Some(bb) = self.body.basic_blocks().get(bb) { - let src = self.body.basic_blocks().get(location.block).unwrap(); + if let Some(bb) = self.body.basic_blocks.get(bb) { + let src = self.body.basic_blocks.get(location.block).unwrap(); match (src.is_cleanup, bb.is_cleanup, edge_kind) { // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges (false, false, EdgeKind::Normal) @@ -183,16 +183,23 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { if (src, dest).has_opaque_types() { return true; } - // Normalize projections and things like that. - let param_env = self.param_env.with_reveal_all_normalized(self.tcx); - let src = self.tcx.normalize_erasing_regions(param_env, src); - let dest = self.tcx.normalize_erasing_regions(param_env, dest); + // Normalize projections and things like that. // Type-changing assignments can happen when subtyping is used. While // all normal lifetimes are erased, higher-ranked types with their // late-bound lifetimes are still around and can lead to type // differences. So we compare ignoring lifetimes. - equal_up_to_regions(self.tcx, param_env, src, dest) + + // First, try with reveal_all. This might not work in some cases, as the predicates + // can be cleared in reveal_all mode. We try the reveal first anyways as it is used + // by some other passes like inlining as well. + let param_env = self.param_env.with_reveal_all_normalized(self.tcx); + if equal_up_to_regions(self.tcx, param_env, src, dest) { + return true; + } + + // If this fails, we can try it without the reveal. + equal_up_to_regions(self.tcx, self.param_env, src, dest) } } @@ -223,7 +230,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed. - if self.tcx.sess.opts.unstable_opts.validate_mir && self.mir_phase < MirPhase::DropsLowered + if self.tcx.sess.opts.unstable_opts.validate_mir + && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { // `Operand::Copy` is only supposed to be used with `Copy` types. if let Operand::Copy(place) = operand { @@ -254,7 +262,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { self.fail(location, format!("bad index ({:?} != usize)", index_ty)) } } - ProjectionElem::Deref if self.mir_phase >= MirPhase::GeneratorsLowered => { + ProjectionElem::Deref + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) => + { let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty; if base_ty.is_box() { @@ -362,7 +372,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { // Set off any `bug!`s in the type computation code let _ = place.ty(&self.body.local_decls, self.tcx); - if self.mir_phase >= MirPhase::Derefered + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) && place.projection.len() > 1 && cntxt != PlaceContext::NonUse(VarDebugInfo) && place.projection[1..].contains(&ProjectionElem::Deref) @@ -386,8 +396,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { Rvalue::Aggregate(agg_kind, _) => { let disallowed = match **agg_kind { AggregateKind::Array(..) => false, - AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered, - _ => self.mir_phase >= MirPhase::Deaggregated, + _ => self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup), }; if disallowed { self.fail( @@ -397,10 +406,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } Rvalue::Ref(_, BorrowKind::Shallow, _) => { - if self.mir_phase >= MirPhase::DropsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, - "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase", + "`Assign` statement with a `Shallow` borrow should have been removed in runtime MIR", ); } } @@ -560,6 +569,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { ); } } + CastKind::DynStar => { + // FIXME(dyn-star): make sure nothing needs to be done here. + } // Nothing to check here CastKind::PointerFromExposedAddress | CastKind::PointerExposeAddress @@ -614,7 +626,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } StatementKind::AscribeUserType(..) => { - if self.mir_phase >= MirPhase::DropsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, "`AscribeUserType` should have been removed after drop lowering phase", @@ -622,18 +634,25 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } StatementKind::FakeRead(..) => { - if self.mir_phase >= MirPhase::DropsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, "`FakeRead` should have been removed after drop lowering phase", ); } } - StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { - ref src, - ref dst, - ref count, - }) => { + StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => { + let ty = op.ty(&self.body.local_decls, self.tcx); + if !ty.is_bool() { + self.fail( + location, + format!("`assume` argument must be `bool`, but got: `{}`", ty), + ); + } + } + StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping( + CopyNonOverlapping { src, dst, count }, + )) => { let src_ty = src.ty(&self.body.local_decls, self.tcx); let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) { src_deref.ty @@ -666,7 +685,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } StatementKind::SetDiscriminant { place, .. } => { - if self.mir_phase < MirPhase::Deaggregated { + if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { self.fail(location, "`SetDiscriminant`is not allowed until deaggregation"); } let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind(); @@ -681,7 +700,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } StatementKind::Deinit(..) => { - if self.mir_phase < MirPhase::Deaggregated { + if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { self.fail(location, "`Deinit`is not allowed until deaggregation"); } } @@ -761,7 +780,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } TerminatorKind::DropAndReplace { target, unwind, .. } => { - if self.mir_phase >= MirPhase::DropsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, "`DropAndReplace` should have been removed during drop elaboration", @@ -832,7 +851,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { if self.body.generator.is_none() { self.fail(location, "`Yield` cannot appear outside generator bodies"); } - if self.mir_phase >= MirPhase::GeneratorsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail(location, "`Yield` should have been replaced by generator lowering"); } self.check_edge(location, *resume, EdgeKind::Normal); @@ -841,7 +860,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } TerminatorKind::FalseEdge { real_target, imaginary_target } => { - if self.mir_phase >= MirPhase::DropsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, "`FalseEdge` should have been removed after drop elaboration", @@ -851,7 +870,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { self.check_edge(location, *imaginary_target, EdgeKind::Normal); } TerminatorKind::FalseUnwind { real_target, unwind } => { - if self.mir_phase >= MirPhase::DropsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, "`FalseUnwind` should have been removed after drop elaboration", @@ -874,7 +893,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { if self.body.generator.is_none() { self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies"); } - if self.mir_phase >= MirPhase::GeneratorsLowered { + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { self.fail( location, "`GeneratorDrop` should have been replaced by generator lowering", @@ -883,13 +902,13 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } TerminatorKind::Resume | TerminatorKind::Abort => { let bb = location.block; - if !self.body.basic_blocks()[bb].is_cleanup { + if !self.body.basic_blocks[bb].is_cleanup { self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block") } } TerminatorKind::Return => { let bb = location.block; - if self.body.basic_blocks()[bb].is_cleanup { + if self.body.basic_blocks[bb].is_cleanup { self.fail(location, "Cannot `Return` from cleanup basic block") } } |