diff options
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
16 files changed, 150 insertions, 125 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 269ae15d4..b2c847d3f 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -1,5 +1,4 @@ use std::assert_matches::assert_matches; -use std::convert::TryFrom; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::{Float, FloatConvert}; @@ -333,7 +332,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self); self.write_immediate(val, dest) } - (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { + (ty::Dynamic(data_a, ..), ty::Dynamic(data_b, ..)) => { let val = self.read_immediate(src)?; if data_a.principal() == data_b.principal() { // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables. @@ -348,7 +347,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?; self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest) } - (_, &ty::Dynamic(ref data, _, ty::Dyn)) => { + (_, &ty::Dynamic(data, _, ty::Dyn)) => { // Initial cast from sized to dyn trait let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?; let ptr = self.read_scalar(src)?; diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 0b2809f1d..d13fed7a9 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -196,7 +196,7 @@ impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> { } } - /// Overwrite the local. If the local can be overwritten in place, return a reference + /// Overwrite the local. If the local can be overwritten in place, return a reference /// to do so; otherwise return the `MemPlace` to consult instead. /// /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from @@ -248,6 +248,15 @@ impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> { Right(span) => span, } } + + pub fn lint_root(&self) -> Option<hir::HirId> { + self.current_source_info().and_then(|source_info| { + match &self.body.source_scopes[source_info.scope].local_data { + mir::ClearCrossCrate::Set(data) => Some(data.lint_root), + mir::ClearCrossCrate::Clear => None, + } + }) + } } impl<'tcx> fmt::Display for FrameInfo<'tcx> { @@ -583,7 +592,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ); // Recurse to get the size of the dynamically sized field (must be - // the last field). Can't have foreign types here, how would we + // the last field). Can't have foreign types here, how would we // adjust alignment and size for them? let field = layout.field(self, layout.fields.count() - 1); let Some((unsized_size, mut unsized_align)) = self.size_and_align_of(metadata, &field)? else { @@ -954,12 +963,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // This deliberately does *not* honor `requires_caller_location` since it is used for much // more than just panics. for frame in stack.iter().rev() { - let lint_root = frame.current_source_info().and_then(|source_info| { - match &frame.body.source_scopes[source_info.scope].local_data { - mir::ClearCrossCrate::Set(data) => Some(data.lint_root), - mir::ClearCrossCrate::Clear => None, - } - }); + let lint_root = frame.lint_root(); let span = frame.current_span(); frames.push(FrameInfo { span, instance: frame.instance, lint_root }); diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 458cc6180..54528b1db 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -59,7 +59,7 @@ struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_ev #[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)] enum InternMode { - /// A static and its current mutability. Below shared references inside a `static mut`, + /// A static and its current mutability. Below shared references inside a `static mut`, /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this /// is *mutable*. Static(hir::Mutability), @@ -296,7 +296,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory } } InternMode::Const => { - // Ignore `UnsafeCell`, everything is immutable. Validity does some sanity + // Ignore `UnsafeCell`, everything is immutable. Validity does some sanity // checking for mutable references that we encounter -- they must all be // ZST. InternMode::Const @@ -330,7 +330,7 @@ pub enum InternKind { /// Intern `ret` and everything it references. /// -/// This *cannot raise an interpreter error*. Doing so is left to validation, which +/// This *cannot raise an interpreter error*. Doing so is left to validation, which /// tracks where in the value we are and thus can show much better error messages. #[instrument(level = "debug", skip(ecx))] pub fn intern_const_alloc_recursive< @@ -379,7 +379,7 @@ pub fn intern_const_alloc_recursive< inside_unsafe_cell: false, } .visit_value(&mplace); - // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining + // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining // references are "leftover"-interned, and later validation will show a proper error // and point at the right part of the value causing the problem. match res { @@ -454,7 +454,7 @@ pub fn intern_const_alloc_recursive< return Err(reported); } else if ecx.tcx.try_get_global_alloc(alloc_id).is_none() { // We have hit an `AllocId` that is neither in local or global memory and isn't - // marked as dangling by local memory. That should be impossible. + // marked as dangling by local memory. That should be impossible. span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id); } } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 7940efcd2..cc7b6c91b 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -2,8 +2,6 @@ //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE //! and miri. -use std::convert::TryFrom; - use rustc_hir::def_id::DefId; use rustc_middle::mir::{ self, @@ -81,14 +79,10 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( } sym::variant_count => match tp_ty.kind() { // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. - ty::Adt(ref adt, _) => { - ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx) + ty::Adt(adt, _) => ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx), + ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => { + throw_inval!(TooGeneric) } - ty::Projection(_) - | ty::Opaque(_, _) - | ty::Param(_) - | ty::Placeholder(_) - | ty::Infer(_) => throw_inval!(TooGeneric), ty::Bound(_, _) => bug!("bound ty during ctfe"), ty::Bool | ty::Char @@ -307,7 +301,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } sym::offset => { let ptr = self.read_pointer(&args[0])?; - let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; + let offset_count = self.read_machine_isize(&args[1])?; let pointee_ty = substs.type_at(0); let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; @@ -315,7 +309,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } sym::arith_offset => { let ptr = self.read_pointer(&args[0])?; - let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; + let offset_count = self.read_machine_isize(&args[1])?; let pointee_ty = substs.type_at(0); let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); @@ -432,7 +426,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::transmute => { self.copy_op(&args[0], dest, /*allow_transmute*/ true)?; } - sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { + sym::assert_inhabited + | sym::assert_zero_valid + | sym::assert_mem_uninitialized_valid => { let ty = instance.substs.type_at(0); let layout = self.layout_of(ty)?; @@ -464,7 +460,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - if intrinsic_name == sym::assert_uninit_valid { + if intrinsic_name == sym::assert_mem_uninitialized_valid { let should_panic = !self.tcx.permits_uninit_init(layout); if should_panic { @@ -672,7 +668,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, nonoverlapping: bool, ) -> InterpResult<'tcx> { - let count = self.read_scalar(&count)?.to_machine_usize(self)?; + let count = self.read_machine_usize(&count)?; let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?; let (size, align) = (layout.size, layout.align.abi); // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), @@ -700,7 +696,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let dst = self.read_pointer(&dst)?; let byte = self.read_scalar(&byte)?.to_u8()?; - let count = self.read_scalar(&count)?.to_machine_usize(self)?; + let count = self.read_machine_usize(&count)?; // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), // but no actual allocation can be big enough for the difference to be noticeable. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs index 7d94a22c4..77c7b4bac 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs @@ -1,5 +1,3 @@ -use std::convert::TryFrom; - use rustc_ast::Mutability; use rustc_hir::lang_items::LangItem; use rustc_middle::mir::TerminatorKind; diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 0604d5ee6..248953de8 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -10,9 +10,11 @@ use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_middle::mir; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::def_id::DefId; -use rustc_target::abi::Size; +use rustc_target::abi::{Align, Size}; use rustc_target::spec::abi::Abi as CallAbi; +use crate::const_eval::CheckAlignment; + use super::{ AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind, @@ -122,7 +124,7 @@ pub trait Machine<'mir, 'tcx>: Sized { const PANIC_ON_ALLOC_FAIL: bool; /// Whether memory accesses should be alignment-checked. - fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment; /// Whether, when checking alignment, we should look at the actual address and thus support /// custom alignment logic based on whatever the integer address happens to be. @@ -130,6 +132,13 @@ pub trait Machine<'mir, 'tcx>: Sized { /// If this returns true, Provenance::OFFSET_IS_ADDR must be true. fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + fn alignment_check_failed( + ecx: &InterpCx<'mir, 'tcx, Self>, + has: Align, + required: Align, + check: CheckAlignment, + ) -> InterpResult<'tcx, ()>; + /// Whether to enforce the validity invariant fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; @@ -171,7 +180,7 @@ pub trait Machine<'mir, 'tcx>: Sized { unwind: StackPopUnwind, ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>; - /// Execute `fn_val`. It is the hook's responsibility to advance the instruction + /// Execute `fn_val`. It is the hook's responsibility to advance the instruction /// pointer as appropriate. fn call_extra_fn( ecx: &mut InterpCx<'mir, 'tcx, Self>, @@ -430,7 +439,7 @@ pub trait Machine<'mir, 'tcx>: Sized { } /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines -/// (CTFE and ConstProp) use the same instance. Here, we share that code. +/// (CTFE and ConstProp) use the same instance. Here, we share that code. pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { type Provenance = AllocId; type ProvenanceExtra = (); diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 528c1cb06..291bfb2b5 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -18,6 +18,8 @@ use rustc_middle::mir::display_allocation; use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt}; use rustc_target::abi::{Align, HasDataLayout, Size}; +use crate::const_eval::CheckAlignment; + use super::{ alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar, @@ -144,7 +146,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Call this to turn untagged "global" pointers (obtained via `tcx`) into - /// the machine pointer to the allocation. Must never be used + /// the machine pointer to the allocation. Must never be used /// for any other pointers, nor for TLS statics. /// /// Using the resulting pointer represents a *direct* access to that memory @@ -349,11 +351,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { size: Size, align: Align, ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> { - let align = M::enforce_alignment(&self).then_some(align); self.check_and_deref_ptr( ptr, size, align, + M::enforce_alignment(self), CheckInAllocMsg::MemoryAccessTest, |alloc_id, offset, prov| { let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?; @@ -373,10 +375,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { align: Align, msg: CheckInAllocMsg, ) -> InterpResult<'tcx> { - self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| { - let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?; - Ok((size, align, ())) - })?; + self.check_and_deref_ptr( + ptr, + size, + align, + CheckAlignment::Error, + msg, + |alloc_id, _, _| { + let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?; + Ok((size, align, ())) + }, + )?; Ok(()) } @@ -388,7 +397,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &self, ptr: Pointer<Option<M::Provenance>>, size: Size, - align: Option<Align>, + align: Align, + check: CheckAlignment, msg: CheckInAllocMsg, alloc_size: impl FnOnce( AllocId, @@ -396,19 +406,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { M::ProvenanceExtra, ) -> InterpResult<'tcx, (Size, Align, T)>, ) -> InterpResult<'tcx, Option<T>> { - fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> { - if offset % align.bytes() == 0 { - Ok(()) - } else { - // The biggest power of two through which `offset` is divisible. - let offset_pow2 = 1 << offset.trailing_zeros(); - throw_ub!(AlignmentCheckFailed { - has: Align::from_bytes(offset_pow2).unwrap(), - required: align, - }) - } - } - Ok(match self.ptr_try_get_alloc_id(ptr) { Err(addr) => { // We couldn't get a proper allocation. This is only okay if the access size is 0, @@ -417,8 +414,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { throw_ub!(DanglingIntPointer(addr, msg)); } // Must be aligned. - if let Some(align) = align { - check_offset_align(addr, align)?; + if check.should_check() { + self.check_offset_align(addr, align, check)?; } None } @@ -441,16 +438,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } // Test align. Check this last; if both bounds and alignment are violated // we want the error to be about the bounds. - if let Some(align) = align { + if check.should_check() { if M::use_addr_for_alignment_check(self) { // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true. - check_offset_align(ptr.addr().bytes(), align)?; + self.check_offset_align(ptr.addr().bytes(), align, check)?; } else { // Check allocation alignment and offset alignment. if alloc_align.bytes() < align.bytes() { - throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align }); + M::alignment_check_failed(self, alloc_align, align, check)?; } - check_offset_align(offset.bytes(), align)?; + self.check_offset_align(offset.bytes(), align, check)?; } } @@ -460,6 +457,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } }) } + + fn check_offset_align( + &self, + offset: u64, + align: Align, + check: CheckAlignment, + ) -> InterpResult<'tcx> { + if offset % align.bytes() == 0 { + Ok(()) + } else { + // The biggest power of two through which `offset` is divisible. + let offset_pow2 = 1 << offset.trailing_zeros(); + M::alignment_check_failed(self, Align::from_bytes(offset_pow2).unwrap(), align, check) + } + } } /// Allocation accessors @@ -524,7 +536,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &self, id: AllocId, ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> { - // The error type of the inner closure here is somewhat funny. We have two + // The error type of the inner closure here is somewhat funny. We have two // ways of "erroring": An actual error, or because we got a reference from // `get_global_alloc` that we can actually use directly without inserting anything anywhere. // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`. @@ -560,11 +572,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { size: Size, align: Align, ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> { - let align = M::enforce_alignment(self).then_some(align); let ptr_and_alloc = self.check_and_deref_ptr( ptr, size, align, + M::enforce_alignment(self), CheckInAllocMsg::MemoryAccessTest, |alloc_id, offset, prov| { let alloc = self.get_alloc_raw(alloc_id)?; @@ -851,7 +863,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, write!(fmt, "{id:?}")?; match self.ecx.memory.alloc_map.get(id) { - Some(&(kind, ref alloc)) => { + Some((kind, alloc)) => { // normal alloc write!(fmt, " ({}, ", kind)?; write_allocation_track_relocs( diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 221e359d2..befc0928f 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -39,7 +39,7 @@ pub enum Immediate<Prov: Provenance = AllocId> { impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> { #[inline(always)] fn from(val: Scalar<Prov>) -> Self { - Immediate::Scalar(val.into()) + Immediate::Scalar(val) } } @@ -53,7 +53,7 @@ impl<Prov: Provenance> Immediate<Prov> { } pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self { - Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into()) + Immediate::ScalarPair(val, Scalar::from_machine_usize(len, cx)) } pub fn new_dyn_trait( @@ -61,7 +61,7 @@ impl<Prov: Provenance> Immediate<Prov> { vtable: Pointer<Option<Prov>>, cx: &impl HasDataLayout, ) -> Self { - Immediate::ScalarPair(val.into(), Scalar::from_maybe_pointer(vtable, cx)) + Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx)) } #[inline] @@ -341,10 +341,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr(), )?; - Some(ImmTy { - imm: Immediate::ScalarPair(a_val.into(), b_val.into()), - layout: mplace.layout, - }) + Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }) } _ => { // Neither a scalar nor scalar pair. @@ -407,6 +404,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(self.read_immediate(op)?.to_scalar()) } + // Pointer-sized reads are fairly common and need target layout access, so we wrap them in + // convenience functions. + /// Read a pointer from a place. pub fn read_pointer( &self, @@ -414,6 +414,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> { self.read_scalar(op)?.to_pointer(self) } + /// Read a pointer-sized unsigned integer from a place. + pub fn read_machine_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> { + self.read_scalar(op)?.to_machine_usize(self) + } + /// Read a pointer-sized signed integer from a place. + pub fn read_machine_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> { + self.read_scalar(op)?.to_machine_isize(self) + } /// Turn the wide MPlace into a string (must already be dereferenced!) pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> { @@ -480,7 +488,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(OpTy { op, layout: place.layout, align: Some(place.align) }) } - /// Evaluate a place with the goal of reading from it. This lets us sometimes + /// Evaluate a place with the goal of reading from it. This lets us sometimes /// avoid allocations. pub fn eval_place_to_op( &self, @@ -525,11 +533,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { use rustc_middle::mir::Operand::*; - let op = match *mir_op { + let op = match mir_op { // FIXME: do some more logic on `move` to invalidate the old location - Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?, + &Copy(place) | &Move(place) => self.eval_place_to_op(place, layout)?, - Constant(ref constant) => { + Constant(constant) => { let c = self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?; @@ -569,8 +577,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ty::ConstKind::Unevaluated(uv) => { let instance = self.resolve(uv.def, uv.substs)?; let cid = GlobalId { instance, promoted: None }; - self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))? - .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}")) + self.ctfe_query(span, |tcx| { + tcx.eval_to_valtree(self.param_env.with_const().and(cid)) + })? + .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}")) } ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => { span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}") diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 1f1d06651..e8ff70e3a 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -1,5 +1,3 @@ -use std::convert::TryFrom; - use rustc_apfloat::Float; use rustc_middle::mir; use rustc_middle::mir::interpret::{InterpResult, Scalar}; @@ -38,7 +36,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if let Abi::ScalarPair(..) = dest.layout.abi { // We can use the optimized path and avoid `place_field` (which might do // `force_allocation`). - let pair = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); + let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed)); self.write_immediate(pair, dest)?; } else { assert!(self.tcx.sess.opts.unstable_opts.randomize_layout); diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index c47cfe8bb..274af61ee 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -141,7 +141,7 @@ impl<Prov: Provenance> MemPlace<Prov> { match self.meta { MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)), MemPlaceMeta::Meta(meta) => { - Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into()) + Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx), meta) } } } @@ -233,7 +233,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { _ => bug!("len not supported on unsized type {:?}", self.layout.ty), } } else { - // Go through the layout. There are lots of types that support a length, + // Go through the layout. There are lots of types that support a length, // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!) match self.layout.fields { abi::FieldsShape::Array { count, .. } => Ok(count), @@ -294,7 +294,7 @@ where M: Machine<'mir, 'tcx, Provenance = Prov>, { /// Take a value, which represents a (thin or wide) reference, and make it a place. - /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. + /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. /// /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not /// want to ever use the place for memory access! @@ -364,13 +364,8 @@ where .size_and_align_of_mplace(&mplace)? .unwrap_or((mplace.layout.size, mplace.layout.align.abi)); assert!(mplace.align <= align, "dynamic alignment less strict than static one?"); - let align = M::enforce_alignment(self).then_some(align); - self.check_ptr_access_align( - mplace.ptr, - size, - align.unwrap_or(Align::ONE), - CheckInAllocMsg::DerefTest, - )?; + let align = if M::enforce_alignment(self).should_check() { align } else { Align::ONE }; + self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?; Ok(()) } @@ -708,7 +703,7 @@ where &mut Operand::Immediate(local_val) => { // We need to make an allocation. - // We need the layout of the local. We can NOT use the layout we got, + // We need the layout of the local. We can NOT use the layout we got, // that might e.g., be an inner field of a struct with `Scalar` layout, // that has different alignment than the outer field. let local_layout = diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 2ffd73eef..291464ab5 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -363,7 +363,7 @@ where Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.local_to_op(self.frame(), local, Some(layout))?; - let n = self.read_scalar(&n)?.to_machine_usize(self)?; + let n = self.read_machine_usize(&n)?; self.place_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { @@ -392,7 +392,7 @@ where Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.local_to_op(self.frame(), local, Some(layout))?; - let n = self.read_scalar(&n)?.to_machine_usize(self)?; + let n = self.read_machine_usize(&n)?; self.operand_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 81b44a494..fad4cb06c 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -111,7 +111,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { M::retag_place_contents(self, *kind, &dest)?; } - Intrinsic(box ref intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?, + Intrinsic(box intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?, // Statements we do not track. AscribeUserType(..) => {} @@ -163,8 +163,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.copy_op(&op, &dest, /*allow_transmute*/ false)?; } - CopyForDeref(ref place) => { - let op = self.eval_place_to_op(*place, Some(dest.layout))?; + CopyForDeref(place) => { + let op = self.eval_place_to_op(place, Some(dest.layout))?; self.copy_op(&op, &dest, /* allow_transmute*/ false)?; } diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index 57e40e168..da320cd1c 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -29,10 +29,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Goto { target } => self.go_to_block(target), - SwitchInt { ref discr, ref targets, switch_ty } => { + SwitchInt { ref discr, ref targets } => { let discr = self.read_immediate(&self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); - assert_eq!(discr.layout.ty, switch_ty); // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets.otherwise(); @@ -120,11 +119,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Drop { place, target, unwind } => { + let frame = self.frame(); + let ty = place.ty(&frame.body.local_decls, *self.tcx).ty; + let ty = self.subst_from_frame_and_normalize_erasing_regions(frame, ty)?; + let instance = Instance::resolve_drop_in_place(*self.tcx, ty); + if let ty::InstanceDef::DropGlue(_, None) = instance.def { + // This is the branch we enter if and only if the dropped type has no drop glue + // whatsoever. This can happen as a result of monomorphizing a drop of a + // generic. In order to make sure that generic and non-generic code behaves + // roughly the same (and in keeping with Mir semantics) we do nothing here. + self.go_to_block(target); + return Ok(()); + } let place = self.eval_place(place)?; - let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", place, ty); - - let instance = Instance::resolve_drop_in_place(*self.tcx, ty); self.drop_in_place(&place, instance, target, unwind)?; } @@ -438,7 +446,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // they go to. // For where they come from: If the ABI is RustCall, we untuple the - // last incoming argument. These two iterators do not have the same type, + // last incoming argument. These two iterators do not have the same type, // so to keep the code paths uniform we accept an allocation // (for RustCall ABI only). let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> = @@ -473,7 +481,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore)); // Now we have to spread them out across the callee's locals, - // taking into account the `spread_arg`. If we could write + // taking into account the `spread_arg`. If we could write // this is a single iterator (that handles `spread_arg`), then // `pass_argument` would be the loop body. It takes care to // not advance `caller_iter` for ZSTs. @@ -640,8 +648,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { unwind: Option<mir::BasicBlock>, ) -> InterpResult<'tcx> { trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); - // We take the address of the object. This may well be unaligned, which is fine - // for us here. However, unaligned accesses will probably make the actual drop + // We take the address of the object. This may well be unaligned, which is fine + // for us here. However, unaligned accesses will probably make the actual drop // implementation fail -- a problem shared by rustc. let place = self.force_allocation(place)?; diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index 2bc521d5b..cabc65e2c 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -1,6 +1,5 @@ use rustc_middle::mir::interpret::InterpResult; use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor}; -use std::convert::TryInto; use std::ops::ControlFlow; /// Checks whether a type contains generic parameters which require substitution. @@ -27,7 +26,7 @@ where fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { if !ty.needs_subst() { - return ControlFlow::CONTINUE; + return ControlFlow::Continue(()); } match *ty.kind() { @@ -41,16 +40,15 @@ where let index = index .try_into() .expect("more generic parameters than can fit into a `u32`"); - let is_used = unused_params.contains(index).map_or(true, |unused| !unused); // Only recurse when generic parameters in fns, closures and generators // are used and require substitution. // Just in case there are closures or generators within this subst, // recurse. - if is_used && subst.needs_subst() { + if unused_params.is_used(index) && subst.needs_subst() { return subst.visit_with(self); } } - ControlFlow::CONTINUE + ControlFlow::Continue(()) } _ => ty.super_visit_with(self), } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 0e85c7d11..19e359986 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -4,7 +4,6 @@ //! That's useful because it means other passes (e.g. promotion) can rely on `const`s //! to be const-safe. -use std::convert::TryFrom; use std::fmt::{Display, Write}; use std::num::NonZeroUsize; @@ -176,7 +175,7 @@ fn write_path(out: &mut String, path: &[PathElem]) { TupleElem(idx) => write!(out, ".{}", idx), ArrayElem(idx) => write!(out, "[{}]", idx), // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and - // some of the other items here also are not Rust syntax. Actually we can't + // some of the other items here also are not Rust syntax. Actually we can't // even use the usual syntax because we are just showing the projections, // not the root. Deref => write!(out, ".<deref>"), @@ -420,7 +419,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' ) } // Recursive checking - if let Some(ref mut ref_tracking) = self.ref_tracking { + if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() { // Proceed recursively even for ZST, no reason to skip them! // `!` is a ZST and we want to validate it. if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) { @@ -485,7 +484,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } /// Check if this is a value of primitive type, and if yes check the validity of the value - /// at that type. Return `true` if the type is indeed primitive. + /// at that type. Return `true` if the type is indeed primitive. fn try_visit_primitive( &mut self, value: &OpTy<'tcx, M::Provenance>, @@ -602,8 +601,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' | ty::Placeholder(..) | ty::Bound(..) | ty::Param(..) - | ty::Opaque(..) - | ty::Projection(..) + | ty::Alias(..) | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty), } } @@ -625,7 +623,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // Can only happen during CTFE. // We support 2 kinds of ranges here: full range, and excluding zero. if start == 1 && end == max_value { - // Only null is the niche. So make sure the ptr is NOT null. + // Only null is the niche. So make sure the ptr is NOT null. if self.ecx.scalar_may_be_null(scalar)? { throw_validation_failure!(self.path, { "a potentially null pointer" } @@ -761,7 +759,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // Recursively walk the value at its type. self.walk_value(op)?; - // *After* all of this, check the ABI. We need to check the ABI to handle + // *After* all of this, check the ABI. We need to check the ABI to handle // types like `NonNull` where the `Scalar` info is more restrictive than what // the fields say (`rustc_layout_scalar_valid_range_start`). // But in most cases, this will just propagate what the fields say, @@ -859,10 +857,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // Optimization: we just check the entire range at once. // NOTE: Keep this in sync with the handling of integer and float // types above, in `visit_primitive`. - // In run-time mode, we accept pointers in here. This is actually more + // In run-time mode, we accept pointers in here. This is actually more // permissive than a per-element check would be, e.g., we accept // a &[u8] that contains a pointer even though bytewise checking would - // reject it. However, that's good: We don't inherently want + // reject it. However, that's good: We don't inherently want // to reject those pointers, we just do not have the machinery to // talk about parts of a pointer. // We also accept uninit, for consistency with the slow path. diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index 1a10851a9..f9efc2418 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -481,12 +481,12 @@ macro_rules! make_value_visitor { }; // Visit the fields of this value. - match v.layout().fields { + match &v.layout().fields { FieldsShape::Primitive => {} - FieldsShape::Union(fields) => { + &FieldsShape::Union(fields) => { self.visit_union(v, fields)?; } - FieldsShape::Arbitrary { ref offsets, .. } => { + FieldsShape::Arbitrary { offsets, .. } => { // FIXME: We collect in a vec because otherwise there are lifetime // errors: Projecting to a field needs access to `ecx`. let fields: Vec<InterpResult<'tcx, Self::V>> = |