diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:43 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:43 +0000 |
commit | 3e3e70d529d8c7d7c4d7bc4fefc9f109393b9245 (patch) | |
tree | daf049b282ab10e8c3d03e409b3cd84ff3f7690c /compiler/rustc_const_eval/src/interpret | |
parent | Adding debian version 1.68.2+dfsg1-1. (diff) | |
download | rustc-3e3e70d529d8c7d7c4d7bc4fefc9f109393b9245.tar.xz rustc-3e3e70d529d8c7d7c4d7bc4fefc9f109393b9245.zip |
Merging upstream version 1.69.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
18 files changed, 638 insertions, 497 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index b2c847d3f..2be5ed896 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -126,7 +126,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?; let vtable = Scalar::from_maybe_pointer(vtable, self); let data = self.read_immediate(src)?.to_scalar(); - let _assert_pointer_sized = data.to_pointer(self)?; + let _assert_pointer_like = data.to_pointer(self)?; let val = Immediate::ScalarPair(data, vtable); self.write_immediate(val, dest)?; } else { @@ -231,7 +231,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // First cast to usize. let scalar = src.to_scalar(); let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?; - let addr = addr.to_machine_usize(self)?; + let addr = addr.to_target_usize(self)?; // Then turn address into pointer. let ptr = M::ptr_from_addr_cast(&self, addr)?; @@ -312,6 +312,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } + /// `src` is a *pointer to* a `source_ty`, and in `dest` we should store a pointer to th same + /// data at type `cast_ty`. fn unsize_into_ptr( &mut self, src: &OpTy<'tcx, M::Provenance>, @@ -328,11 +330,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { (&ty::Array(_, length), &ty::Slice(_)) => { let ptr = self.read_scalar(src)?; // u64 cast is from usize to u64, which is always good - let val = - Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self); + let val = Immediate::new_slice( + ptr, + length.eval_target_usize(*self.tcx, self.param_env), + self, + ); self.write_immediate(val, dest) } - (ty::Dynamic(data_a, ..), ty::Dynamic(data_b, ..)) => { + (ty::Dynamic(data_a, _, ty::Dyn), ty::Dynamic(data_b, _, ty::Dyn)) => { let val = self.read_immediate(src)?; if data_a.principal() == data_b.principal() { // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables. @@ -356,7 +361,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } _ => { - span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty) + span_bug!( + self.cur_span(), + "invalid pointer unsizing {:?} -> {:?}", + src.layout.ty, + cast_ty + ) } } } diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs new file mode 100644 index 000000000..557e72124 --- /dev/null +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -0,0 +1,238 @@ +//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators). + +use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt}; +use rustc_middle::{mir, ty}; +use rustc_target::abi::{self, TagEncoding}; +use rustc_target::abi::{VariantIdx, Variants}; + +use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar}; + +impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { + /// Writes the discriminant of the given variant. + #[instrument(skip(self), level = "trace")] + pub fn write_discriminant( + &mut self, + variant_index: VariantIdx, + dest: &PlaceTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx> { + // Layout computation excludes uninhabited variants from consideration + // therefore there's no way to represent those variants in the given layout. + // Essentially, uninhabited variants do not have a tag that corresponds to their + // discriminant, so we cannot do anything here. + // When evaluating we will always error before even getting here, but ConstProp 'executes' + // dead code, so we cannot ICE here. + if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { + throw_ub!(UninhabitedEnumVariantWritten) + } + + match dest.layout.variants { + abi::Variants::Single { index } => { + assert_eq!(index, variant_index); + } + abi::Variants::Multiple { + tag_encoding: TagEncoding::Direct, + tag: tag_layout, + tag_field, + .. + } => { + // No need to validate that the discriminant here because the + // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. + + let discr_val = + dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; + + // raw discriminants for enums are isize or bigger during + // their computation, but the in-memory tag is the smallest possible + // representation + let size = tag_layout.size(self); + let tag_val = size.truncate(discr_val); + + let tag_dest = self.place_field(dest, tag_field)?; + self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; + } + abi::Variants::Multiple { + tag_encoding: + TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, + tag: tag_layout, + tag_field, + .. + } => { + // No need to validate that the discriminant here because the + // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. + + if variant_index != untagged_variant { + let variants_start = niche_variants.start().as_u32(); + let variant_index_relative = variant_index + .as_u32() + .checked_sub(variants_start) + .expect("overflow computing relative variant idx"); + // We need to use machine arithmetic when taking into account `niche_start`: + // tag_val = variant_index_relative + niche_start_val + let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?; + let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); + let variant_index_relative_val = + ImmTy::from_uint(variant_index_relative, tag_layout); + let tag_val = self.binary_op( + mir::BinOp::Add, + &variant_index_relative_val, + &niche_start_val, + )?; + // Write result. + let niche_dest = self.place_field(dest, tag_field)?; + self.write_immediate(*tag_val, &niche_dest)?; + } + } + } + + Ok(()) + } + + /// Read discriminant, return the runtime value as well as the variant index. + /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)! + #[instrument(skip(self), level = "trace")] + pub fn read_discriminant( + &self, + op: &OpTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> { + trace!("read_discriminant_value {:#?}", op.layout); + // Get type and layout of the discriminant. + let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?; + trace!("discriminant type: {:?}", discr_layout.ty); + + // We use "discriminant" to refer to the value associated with a particular enum variant. + // This is not to be confused with its "variant index", which is just determining its position in the + // declared list of variants -- they can differ with explicitly assigned discriminants. + // We use "tag" to refer to how the discriminant is encoded in memory, which can be either + // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`). + let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants { + Variants::Single { index } => { + let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { + Some(discr) => { + // This type actually has discriminants. + assert_eq!(discr.ty, discr_layout.ty); + Scalar::from_uint(discr.val, discr_layout.size) + } + None => { + // On a type without actual discriminants, variant is 0. + assert_eq!(index.as_u32(), 0); + Scalar::from_uint(index.as_u32(), discr_layout.size) + } + }; + return Ok((discr, index)); + } + Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { + (tag, tag_encoding, tag_field) + } + }; + + // There are *three* layouts that come into play here: + // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for + // the `Scalar` we return. + // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type, + // and used to interpret the value we read from the tag field. + // For the return value, a cast to `discr_layout` is performed. + // - The field storing the tag has a layout, which is very similar to `tag_layout` but + // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks. + + // Get layout for tag. + let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?; + + // Read tag and sanity-check `tag_layout`. + let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; + assert_eq!(tag_layout.size, tag_val.layout.size); + assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); + trace!("tag value: {}", tag_val); + + // Figure out which discriminant and variant this corresponds to. + Ok(match *tag_encoding { + TagEncoding::Direct => { + let scalar = tag_val.to_scalar(); + // Generate a specific error if `tag_val` is not an integer. + // (`tag_bits` itself is only used for error messages below.) + let tag_bits = scalar + .try_to_int() + .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))? + .assert_bits(tag_layout.size); + // Cast bits from tag layout to discriminant layout. + // After the checks we did above, this cannot fail, as + // discriminants are int-like. + let discr_val = + self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap(); + let discr_bits = discr_val.assert_bits(discr_layout.size); + // Convert discriminant to variant index, and catch invalid discriminants. + let index = match *op.layout.ty.kind() { + ty::Adt(adt, _) => { + adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) + } + ty::Generator(def_id, substs, _) => { + let substs = substs.as_generator(); + substs + .discriminants(def_id, *self.tcx) + .find(|(_, var)| var.val == discr_bits) + } + _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"), + } + .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?; + // Return the cast value, and the index. + (discr_val, index.0) + } + TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { + let tag_val = tag_val.to_scalar(); + // Compute the variant this niche value/"tag" corresponds to. With niche layout, + // discriminant (encoded in niche/tag) and variant index are the same. + let variants_start = niche_variants.start().as_u32(); + let variants_end = niche_variants.end().as_u32(); + let variant = match tag_val.try_to_int() { + Err(dbg_val) => { + // So this is a pointer then, and casting to an int failed. + // Can only happen during CTFE. + // The niche must be just 0, and the ptr not null, then we know this is + // okay. Everything else, we conservatively reject. + let ptr_valid = niche_start == 0 + && variants_start == variants_end + && !self.scalar_may_be_null(tag_val)?; + if !ptr_valid { + throw_ub!(InvalidTag(dbg_val)) + } + untagged_variant + } + Ok(tag_bits) => { + let tag_bits = tag_bits.assert_bits(tag_layout.size); + // We need to use machine arithmetic to get the relative variant idx: + // variant_index_relative = tag_val - niche_start_val + let tag_val = ImmTy::from_uint(tag_bits, tag_layout); + let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); + let variant_index_relative_val = + self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; + let variant_index_relative = + variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); + // Check if this is in the range that indicates an actual discriminant. + if variant_index_relative <= u128::from(variants_end - variants_start) { + let variant_index_relative = u32::try_from(variant_index_relative) + .expect("we checked that this fits into a u32"); + // Then computing the absolute variant idx should not overflow any more. + let variant_index = variants_start + .checked_add(variant_index_relative) + .expect("overflow computing absolute variant idx"); + let variants_len = op + .layout + .ty + .ty_adt_def() + .expect("tagged layout for non adt") + .variants() + .len(); + assert!(usize::try_from(variant_index).unwrap() < variants_len); + VariantIdx::from_u32(variant_index) + } else { + untagged_variant + } + } + }; + // Compute the size of the scalar we need to return. + // No need to cast, because the variant index directly serves as discriminant and is + // encoded in the tag. + (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant) + } + }) + } +} diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index d13fed7a9..3db102e48 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -489,7 +489,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Call this on things you got out of the MIR (so it is as generic as the current /// stack frame), to bring it into the proper environment for this interpreter. - pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>( + pub(super) fn subst_from_current_frame_and_normalize_erasing_regions< + T: TypeFoldable<TyCtxt<'tcx>>, + >( &self, value: T, ) -> Result<T, InterpError<'tcx>> { @@ -498,7 +500,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Call this on things you got out of the MIR (so it is as generic as the provided /// stack frame), to bring it into the proper environment for this interpreter. - pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>( + pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<TyCtxt<'tcx>>>( &self, frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>, value: T, @@ -632,14 +634,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Ok(Some((size, align))) } - ty::Dynamic(..) => { + ty::Dynamic(_, _, ty::Dyn) => { let vtable = metadata.unwrap_meta().to_pointer(self)?; // Read size and align from vtable (already checks size). Ok(Some(self.get_vtable_size_and_align(vtable)?)) } ty::Slice(_) | ty::Str => { - let len = metadata.unwrap_meta().to_machine_usize(self)?; + let len = metadata.unwrap_meta().to_target_usize(self)?; let elem = layout.field(self, 0); // Make sure the slice is not too big. diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 54528b1db..b220d21f6 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -30,15 +30,15 @@ use super::{ use crate::const_eval; pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine< - 'mir, - 'tcx, - MemoryKind = T, - Provenance = AllocId, - ExtraFnVal = !, - FrameExtra = (), - AllocExtra = (), - MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>, ->; + 'mir, + 'tcx, + MemoryKind = T, + Provenance = AllocId, + ExtraFnVal = !, + FrameExtra = (), + AllocExtra = (), + MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>, + >; struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> { /// The ectx from which we intern. @@ -135,7 +135,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval: }; // link the alloc id to the actual allocation leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id)); - let alloc = tcx.intern_const_alloc(alloc); + let alloc = tcx.mk_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); None } @@ -242,7 +242,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory let mplace = self.ecx.ref_to_mplace(&value)?; assert_eq!(mplace.layout.ty, referenced_ty); // Handle trait object vtables. - if let ty::Dynamic(..) = + if let ty::Dynamic(_, _, ty::Dyn) = tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind() { let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?; @@ -437,7 +437,7 @@ pub fn intern_const_alloc_recursive< alloc.mutability = Mutability::Not; } } - let alloc = tcx.intern_const_alloc(alloc); + let alloc = tcx.mk_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() { if leftover_allocations.insert(alloc_id) { @@ -479,6 +479,6 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> f(self, &dest.into())?; let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1; alloc.mutability = Mutability::Not; - Ok(self.tcx.intern_const_alloc(alloc)) + Ok(self.tcx.mk_const_alloc(alloc)) } } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index cc7b6c91b..a29cdade0 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -11,7 +11,7 @@ use rustc_middle::mir::{ BinOp, NonDivergingIntrinsic, }; use rustc_middle::ty; -use rustc_middle::ty::layout::LayoutOf as _; +use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement}; use rustc_middle::ty::subst::SubstsRef; use rustc_middle::ty::{Ty, TyCtxt}; use rustc_span::symbol::{sym, Symbol}; @@ -45,7 +45,7 @@ fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar< pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> { let path = crate::util::type_name(tcx, ty); let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes()); - tcx.intern_const_alloc(alloc) + tcx.mk_const_alloc(alloc) } /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated @@ -71,7 +71,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( sym::pref_align_of => { // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?; - ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx) + ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx) } sym::type_id => { ensure_monomorphic_enough(tcx, tp_ty)?; @@ -79,7 +79,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( } sym::variant_count => match tp_ty.kind() { // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. - ty::Adt(adt, _) => ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx), + ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx), ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => { throw_inval!(TooGeneric) } @@ -101,9 +101,10 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( | ty::Closure(_, _) | ty::Generator(_, _, _) | ty::GeneratorWitness(_) + | ty::GeneratorWitnessMIR(_, _) | ty::Never | ty::Tuple(_) - | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx), + | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx), }, other => bug!("`{}` is not a zero arg intrinsic", other), }) @@ -155,7 +156,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => bug!(), }; - self.write_scalar(Scalar::from_machine_usize(result, self), dest)?; + self.write_scalar(Scalar::from_target_usize(result, self), dest)?; } sym::pref_align_of @@ -209,19 +210,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let out_val = numeric_intrinsic(intrinsic_name, bits, kind); self.write_scalar(out_val, dest)?; } - sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { - let lhs = self.read_immediate(&args[0])?; - let rhs = self.read_immediate(&args[1])?; - let bin_op = match intrinsic_name { - sym::add_with_overflow => BinOp::Add, - sym::sub_with_overflow => BinOp::Sub, - sym::mul_with_overflow => BinOp::Mul, - _ => bug!(), - }; - self.binop_with_overflow( - bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest, - )?; - } sym::saturating_add | sym::saturating_sub => { let l = self.read_immediate(&args[0])?; let r = self.read_immediate(&args[1])?; @@ -301,7 +289,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } sym::offset => { let ptr = self.read_pointer(&args[0])?; - let offset_count = self.read_machine_isize(&args[1])?; + let offset_count = self.read_target_isize(&args[1])?; let pointee_ty = substs.type_at(0); let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; @@ -309,7 +297,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } sym::arith_offset => { let ptr = self.read_pointer(&args[0])?; - let offset_count = self.read_machine_isize(&args[1])?; + let offset_count = self.read_target_isize(&args[1])?; let pointee_ty = substs.type_at(0); let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); @@ -375,7 +363,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // The signed form of the intrinsic allows this. If we interpret the // difference as isize, we'll get the proper signed difference. If that // seems *positive*, they were more than isize::MAX apart. - let dist = val.to_machine_isize(self)?; + let dist = val.to_target_isize(self)?; if dist >= 0 { throw_ub_format!( "`{}` called when first pointer is too far before second", @@ -385,7 +373,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { dist } else { // b >= a - let dist = val.to_machine_isize(self)?; + let dist = val.to_target_isize(self)?; // If converting to isize produced a *negative* result, we had an overflow // because they were more than isize::MAX apart. if dist < 0 { @@ -410,10 +398,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Perform division by size to compute return value. let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned { - assert!(0 <= dist && dist <= self.machine_isize_max()); + assert!(0 <= dist && dist <= self.target_isize_max()); usize_layout } else { - assert!(self.machine_isize_min() <= dist && dist <= self.machine_isize_max()); + assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max()); isize_layout }; let pointee_layout = self.layout_of(substs.type_at(0))?; @@ -430,48 +418,36 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => { let ty = instance.substs.type_at(0); - let layout = self.layout_of(ty)?; - - // For *all* intrinsics we first check `is_uninhabited` to give a more specific - // error message. - if layout.abi.is_uninhabited() { - // The run-time intrinsic panics just to get a good backtrace; here we abort - // since there is no problem showing a backtrace even for aborts. - M::abort( - self, - format!( + let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap(); + + let should_panic = !self + .tcx + .check_validity_requirement((requirement, self.param_env.and(ty))) + .map_err(|_| err_inval!(TooGeneric))?; + + if should_panic { + let layout = self.layout_of(ty)?; + + let msg = match requirement { + // For *all* intrinsics we first check `is_uninhabited` to give a more specific + // error message. + _ if layout.abi.is_uninhabited() => format!( "aborted execution: attempted to instantiate uninhabited type `{}`", ty ), - )?; - } - - if intrinsic_name == sym::assert_zero_valid { - let should_panic = !self.tcx.permits_zero_init(layout); - - if should_panic { - M::abort( - self, - format!( - "aborted execution: attempted to zero-initialize type `{}`, which is invalid", - ty - ), - )?; - } - } + ValidityRequirement::Inhabited => bug!("handled earlier"), + ValidityRequirement::Zero => format!( + "aborted execution: attempted to zero-initialize type `{}`, which is invalid", + ty + ), + ValidityRequirement::UninitMitigated0x01Fill => format!( + "aborted execution: attempted to leave type `{}` uninitialized, which is invalid", + ty + ), + ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"), + }; - if intrinsic_name == sym::assert_mem_uninitialized_valid { - let should_panic = !self.tcx.permits_uninit_init(layout); - - if should_panic { - M::abort( - self, - format!( - "aborted execution: attempted to leave type `{}` uninitialized, which is invalid", - ty - ), - )?; - } + M::abort(self, msg)?; } } sym::simd_insert => { @@ -482,7 +458,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_eq!(input_len, dest_len, "Return vector length must match input length"); assert!( index < dest_len, - "Index `{}` must be in bounds of vector with length {}`", + "Index `{}` must be in bounds of vector with length {}", index, dest_len ); @@ -502,7 +478,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (input, input_len) = self.operand_to_simd(&args[0])?; assert!( index < input_len, - "index `{}` must be in bounds of vector with length `{}`", + "index `{}` must be in bounds of vector with length {}", index, input_len ); @@ -524,12 +500,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::vtable_size => { let ptr = self.read_pointer(&args[0])?; let (size, _align) = self.get_vtable_size_and_align(ptr)?; - self.write_scalar(Scalar::from_machine_usize(size.bytes(), self), dest)?; + self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?; } sym::vtable_align => { let ptr = self.read_pointer(&args[0])?; let (_size, align) = self.get_vtable_size_and_align(ptr)?; - self.write_scalar(Scalar::from_machine_usize(align.bytes(), self), dest)?; + self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?; } _ => return Ok(false), @@ -668,10 +644,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, nonoverlapping: bool, ) -> InterpResult<'tcx> { - let count = self.read_machine_usize(&count)?; + let count = self.read_target_usize(&count)?; let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?; let (size, align) = (layout.size, layout.align.abi); - // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), + // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max), // but no actual allocation can be big enough for the difference to be noticeable. let size = size.checked_mul(count, self).ok_or_else(|| { err_ub_format!( @@ -696,9 +672,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let dst = self.read_pointer(&dst)?; let byte = self.read_scalar(&byte)?.to_u8()?; - let count = self.read_machine_usize(&count)?; + let count = self.read_target_usize(&count)?; - // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), + // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max), // but no actual allocation can be big enough for the difference to be noticeable. let len = layout .size diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs index 77c7b4bac..cf52299b7 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs @@ -78,13 +78,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { col: u32, ) -> MPlaceTy<'tcx, M::Provenance> { let loc_details = &self.tcx.sess.opts.unstable_opts.location_detail; + // This can fail if rustc runs out of memory right here. Trying to emit an error would be + // pointless, since that would require allocating more memory than these short strings. let file = if loc_details.file { self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not) + .unwrap() } else { // FIXME: This creates a new allocation each time. It might be preferable to // perform this allocation only once, and re-use the `MPlaceTy`. // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398 - self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not) + self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap() }; let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) }; let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) }; @@ -92,11 +95,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Allocate memory for `CallerLocation` struct. let loc_ty = self .tcx - .bound_type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None)) - .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter())); + .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None)) + .subst(*self.tcx, self.tcx.mk_substs(&[self.tcx.lifetimes.re_erased.into()])); let loc_layout = self.layout_of(loc_ty).unwrap(); - // This can fail if rustc runs out of memory right here. Trying to emit an error would be - // pointless, since that would require allocating more memory than a Location. let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap(); // Initialize fields. diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 248953de8..92fa59aec 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -16,8 +16,8 @@ use rustc_target::spec::abi::Abi as CallAbi; use crate::const_eval::CheckAlignment; use super::{ - AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult, - MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind, + AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, + InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind, }; /// Data returned by Machine::stack_pop, @@ -105,10 +105,16 @@ pub trait Machine<'mir, 'tcx>: Sized { /// Extra data stored in every allocation. type AllocExtra: Debug + Clone + 'static; + /// Type for the bytes of the allocation. + type Bytes: AllocBytes + 'static; + /// Memory's allocation map type MemoryMap: AllocMap< AllocId, - (MemoryKind<Self::MemoryKind>, Allocation<Self::Provenance, Self::AllocExtra>), + ( + MemoryKind<Self::MemoryKind>, + Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>, + ), > + Default + Clone; @@ -147,8 +153,9 @@ pub trait Machine<'mir, 'tcx>: Sized { true } - /// Whether CheckedBinOp MIR statements should actually check for overflow. - fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + /// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually + /// check for overflow. + fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; /// Entry point for obtaining the MIR of anything that should get evaluated. /// So not just functions and shims, but also const/static initializers, anonymous @@ -244,12 +251,18 @@ pub trait Machine<'mir, 'tcx>: Sized { } /// Called before a basic block terminator is executed. - /// You can use this to detect endlessly running programs. #[inline] fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { Ok(()) } + /// Called when the interpreter encounters a `StatementKind::ConstEvalCounter` instruction. + /// You can use this to detect long or endlessly running programs. + #[inline] + fn increment_const_eval_counter(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { + Ok(()) + } + /// Called before a global allocation is accessed. /// `def_id` is `Some` if this is the "lazy" allocation of a static. #[inline] @@ -285,7 +298,7 @@ pub trait Machine<'mir, 'tcx>: Sized { fn adjust_alloc_base_pointer( ecx: &InterpCx<'mir, 'tcx, Self>, ptr: Pointer, - ) -> Pointer<Self::Provenance>; + ) -> InterpResult<'tcx, Pointer<Self::Provenance>>; /// "Int-to-pointer cast" fn ptr_from_addr_cast( @@ -331,7 +344,7 @@ pub trait Machine<'mir, 'tcx>: Sized { id: AllocId, alloc: Cow<'b, Allocation>, kind: Option<MemoryKind<Self::MemoryKind>>, - ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>; + ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>; fn eval_inline_asm( _ecx: &mut InterpCx<'mir, 'tcx, Self>, @@ -452,6 +465,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { type AllocExtra = (); type FrameExtra = (); + type Bytes = Box<[u8]>; #[inline(always)] fn use_addr_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { @@ -460,8 +474,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { } #[inline(always)] - fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { - true + fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { + false } #[inline(always)] @@ -499,8 +513,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { fn adjust_alloc_base_pointer( _ecx: &InterpCx<$mir, $tcx, Self>, ptr: Pointer<AllocId>, - ) -> Pointer<AllocId> { - ptr + ) -> InterpResult<$tcx, Pointer<AllocId>> { + Ok(ptr) } #[inline(always)] @@ -511,7 +525,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { // Allow these casts, but make the pointer not dereferenceable. // (I.e., they behave like transmutation.) // This is correct because no pointers can ever be exposed in compile-time evaluation. - Ok(Pointer::from_addr(addr)) + Ok(Pointer::from_addr_invalid(addr)) } #[inline(always)] diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 291bfb2b5..a3764a7d1 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -21,8 +21,9 @@ use rustc_target::abi::{Align, HasDataLayout, Size}; use crate::const_eval::CheckAlignment; use super::{ - alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx, - InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar, + alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, + GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, + Scalar, }; #[derive(Debug, PartialEq, Copy, Clone)] @@ -114,16 +115,16 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> { /// A reference to some allocation that was already bounds-checked for the given region /// and had the on-access machine hooks run. #[derive(Copy, Clone)] -pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra> { - alloc: &'a Allocation<Prov, Extra>, +pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> { + alloc: &'a Allocation<Prov, Extra, Bytes>, range: AllocRange, tcx: TyCtxt<'tcx>, alloc_id: AllocId, } /// A reference to some allocation that was already bounds-checked for the given region /// and had the on-access machine hooks run. -pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra> { - alloc: &'a mut Allocation<Prov, Extra>, +pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> { + alloc: &'a mut Allocation<Prov, Extra, Bytes>, range: AllocRange, tcx: TyCtxt<'tcx>, alloc_id: AllocId, @@ -171,7 +172,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => {} } // And we need to get the provenance. - Ok(M::adjust_alloc_base_pointer(self, ptr)) + M::adjust_alloc_base_pointer(self, ptr) } pub fn create_fn_alloc_ptr( @@ -200,8 +201,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { kind: MemoryKind<M::MemoryKind>, ) -> InterpResult<'tcx, Pointer<M::Provenance>> { let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?; - // We can `unwrap` since `alloc` contains no pointers. - Ok(self.allocate_raw_ptr(alloc, kind).unwrap()) + self.allocate_raw_ptr(alloc, kind) } pub fn allocate_bytes_ptr( @@ -210,10 +210,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { align: Align, kind: MemoryKind<M::MemoryKind>, mutability: Mutability, - ) -> Pointer<M::Provenance> { + ) -> InterpResult<'tcx, Pointer<M::Provenance>> { let alloc = Allocation::from_bytes(bytes, align, mutability); - // We can `unwrap` since `alloc` contains no pointers. - self.allocate_raw_ptr(alloc, kind).unwrap() + self.allocate_raw_ptr(alloc, kind) } /// This can fail only of `alloc` contains provenance. @@ -230,7 +229,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ); let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?; self.memory.alloc_map.insert(id, (kind, alloc.into_owned())); - Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id))) + M::adjust_alloc_base_pointer(self, Pointer::from(id)) } pub fn reallocate_ptr( @@ -304,7 +303,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .into()); }; - if alloc.mutability == Mutability::Not { + if alloc.mutability.is_not() { throw_ub_format!("deallocating immutable allocation {alloc_id:?}"); } if alloc_kind != kind { @@ -427,7 +426,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { throw_ub!(PointerOutOfBounds { alloc_id, alloc_size, - ptr_offset: self.machine_usize_to_isize(offset.bytes()), + ptr_offset: self.target_usize_to_isize(offset.bytes()), ptr_size: size, msg, }) @@ -485,7 +484,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &self, id: AllocId, is_write: bool, - ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> { + ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> { let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) { Some(GlobalAlloc::Memory(mem)) => { // Memory of a constant or promoted or anonymous memory referenced by a static. @@ -528,6 +527,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) } + /// Get the base address for the bytes in an `Allocation` specified by the + /// `AllocID` passed in; error if no such allocation exists. + /// + /// It is up to the caller to take sufficient care when using this address: + /// there could be provenance or uninit memory in there, and other memory + /// accesses could invalidate the exposed pointer. + pub fn alloc_base_addr(&self, id: AllocId) -> InterpResult<'tcx, *const u8> { + let alloc = self.get_alloc_raw(id)?; + Ok(alloc.base_addr()) + } + /// Gives raw access to the `Allocation`, without bounds or alignment checks. /// The caller is responsible for calling the access hooks! /// @@ -535,7 +545,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn get_alloc_raw( &self, id: AllocId, - ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> { + ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> { // The error type of the inner closure here is somewhat funny. We have two // ways of "erroring": An actual error, or because we got a reference from // `get_global_alloc` that we can actually use directly without inserting anything anywhere. @@ -571,7 +581,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ptr: Pointer<Option<M::Provenance>>, size: Size, align: Align, - ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> { + ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>> + { let ptr_and_alloc = self.check_and_deref_ptr( ptr, size, @@ -614,7 +625,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn get_alloc_raw_mut( &mut self, id: AllocId, - ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> { + ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> { // We have "NLL problem case #3" here, which cannot be worked around without loss of // efficiency even for the common case where the key is in the map. // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions> @@ -631,7 +642,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap(); - if alloc.mutability == Mutability::Not { + if alloc.mutability.is_not() { throw_ub!(WriteToReadOnly(id)) } Ok((alloc, &mut self.machine)) @@ -643,7 +654,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ptr: Pointer<Option<M::Provenance>>, size: Size, align: Align, - ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> { + ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>> + { let parts = self.get_ptr_access(ptr, size, align)?; if let Some((alloc_id, offset, prov)) = parts { let tcx = *self.tcx; @@ -692,7 +704,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(self.tcx.is_static(def_id)); assert!(!self.tcx.is_thread_local_static(def_id)); // Use size and align of the type. - let ty = self.tcx.type_of(def_id); + let ty = self + .tcx + .type_of(def_id) + .no_bound_vars() + .expect("statics should not have generic parameters"); let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap(); assert!(layout.is_sized()); (layout.size, layout.align.abi, AllocKind::LiveData) @@ -838,11 +854,11 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Cannot be a closure because it is generic in `Prov`, `Extra`. - fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>( + fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( fmt: &mut std::fmt::Formatter<'_>, tcx: TyCtxt<'tcx>, allocs_to_print: &mut VecDeque<AllocId>, - alloc: &Allocation<Prov, Extra>, + alloc: &Allocation<Prov, Extra, Bytes>, ) -> std::fmt::Result { for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()) { @@ -910,7 +926,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, } /// Reading and writing. -impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> { +impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> + AllocRefMut<'a, 'tcx, Prov, Extra, Bytes> +{ /// `range` is relative to this allocation reference, not the base of the allocation. pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> { let range = self.range.subrange(range); @@ -935,7 +953,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> { } } -impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> { +impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> { /// `range` is relative to this allocation reference, not the base of the allocation. pub fn read_scalar( &self, diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index 2e356f67b..86de4e4e3 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -1,6 +1,7 @@ //! An interpreter for MIR used in CTFE and by miri mod cast; +mod discriminant; mod eval_context; mod intern; mod intrinsics; diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index befc0928f..5310ef0bb 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -4,13 +4,12 @@ use either::{Either, Left, Right}; use rustc_hir::def::Namespace; -use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter}; use rustc_middle::ty::{ConstInt, Ty, ValTree}; use rustc_middle::{mir, ty}; use rustc_span::Span; -use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding}; -use rustc_target::abi::{VariantIdx, Variants}; +use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size}; use super::{ alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId, @@ -53,7 +52,7 @@ impl<Prov: Provenance> Immediate<Prov> { } pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self { - Immediate::ScalarPair(val, Scalar::from_machine_usize(len, cx)) + Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx)) } pub fn new_dyn_trait( @@ -256,7 +255,22 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { } } - pub fn offset_with_meta( + /// Replace the layout of this operand. There's basically no sanity check that this makes sense, + /// you better know what you are doing! If this is an immediate, applying the wrong layout can + /// not just lead to invalid data, it can actually *shift the data around* since the offsets of + /// a ScalarPair are entirely determined by the layout, not the data. + pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self { + assert_eq!( + self.layout.size, layout.size, + "transmuting with a size change, that doesn't seem right" + ); + OpTy { layout, ..*self } + } + + /// Offset the operand in memory (if possible) and change its metadata. + /// + /// This can go wrong very easily if you give the wrong layout for the new place! + pub(super) fn offset_with_meta( &self, offset: Size, meta: MemPlaceMeta<Prov>, @@ -277,6 +291,9 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { } } + /// Offset the operand in memory (if possible). + /// + /// This can go wrong very easily if you give the wrong layout for the new place! pub fn offset( &self, offset: Size, @@ -319,7 +336,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); let scalar = alloc.read_scalar( alloc_range(Size::ZERO, size), - /*read_provenance*/ s.is_ptr(), + /*read_provenance*/ matches!(s, abi::Pointer(_)), )?; Some(ImmTy { imm: scalar.into(), layout: mplace.layout }) } @@ -335,11 +352,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields let a_val = alloc.read_scalar( alloc_range(Size::ZERO, a_size), - /*read_provenance*/ a.is_ptr(), + /*read_provenance*/ matches!(a, abi::Pointer(_)), )?; let b_val = alloc.read_scalar( alloc_range(b_offset, b_size), - /*read_provenance*/ b.is_ptr(), + /*read_provenance*/ matches!(b, abi::Pointer(_)), )?; Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }) } @@ -415,12 +432,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.read_scalar(op)?.to_pointer(self) } /// Read a pointer-sized unsigned integer from a place. - pub fn read_machine_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> { - self.read_scalar(op)?.to_machine_usize(self) + pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> { + self.read_scalar(op)?.to_target_usize(self) } /// Read a pointer-sized signed integer from a place. - pub fn read_machine_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> { - self.read_scalar(op)?.to_machine_isize(self) + pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> { + self.read_scalar(op)?.to_target_isize(self) } /// Turn the wide MPlace into a string (must already be dereferenced!) @@ -595,14 +612,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { span: Option<Span>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { - // FIXME(const_prop): normalization needed b/c const prop lint in - // `mir_drops_elaborated_and_const_checked`, which happens before - // optimized MIR. Only after optimizing the MIR can we guarantee - // that the `RevealAll` pass has happened and that the body's consts - // are normalized, so any call to resolve before that needs to be - // manually normalized. - let val = self.tcx.normalize_erasing_regions(self.param_env, *val); - match val { + match *val { mir::ConstantKind::Ty(ct) => { let ty = ct.ty(); let valtree = self.eval_ty_constant(ct, span)?; @@ -657,154 +667,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; Ok(OpTy { op, layout, align: Some(layout.align.abi) }) } - - /// Read discriminant, return the runtime value as well as the variant index. - /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)! - pub fn read_discriminant( - &self, - op: &OpTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> { - trace!("read_discriminant_value {:#?}", op.layout); - // Get type and layout of the discriminant. - let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?; - trace!("discriminant type: {:?}", discr_layout.ty); - - // We use "discriminant" to refer to the value associated with a particular enum variant. - // This is not to be confused with its "variant index", which is just determining its position in the - // declared list of variants -- they can differ with explicitly assigned discriminants. - // We use "tag" to refer to how the discriminant is encoded in memory, which can be either - // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`). - let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants { - Variants::Single { index } => { - let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { - Some(discr) => { - // This type actually has discriminants. - assert_eq!(discr.ty, discr_layout.ty); - Scalar::from_uint(discr.val, discr_layout.size) - } - None => { - // On a type without actual discriminants, variant is 0. - assert_eq!(index.as_u32(), 0); - Scalar::from_uint(index.as_u32(), discr_layout.size) - } - }; - return Ok((discr, index)); - } - Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { - (tag, tag_encoding, tag_field) - } - }; - - // There are *three* layouts that come into play here: - // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for - // the `Scalar` we return. - // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type, - // and used to interpret the value we read from the tag field. - // For the return value, a cast to `discr_layout` is performed. - // - The field storing the tag has a layout, which is very similar to `tag_layout` but - // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks. - - // Get layout for tag. - let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?; - - // Read tag and sanity-check `tag_layout`. - let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; - assert_eq!(tag_layout.size, tag_val.layout.size); - assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); - trace!("tag value: {}", tag_val); - - // Figure out which discriminant and variant this corresponds to. - Ok(match *tag_encoding { - TagEncoding::Direct => { - let scalar = tag_val.to_scalar(); - // Generate a specific error if `tag_val` is not an integer. - // (`tag_bits` itself is only used for error messages below.) - let tag_bits = scalar - .try_to_int() - .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))? - .assert_bits(tag_layout.size); - // Cast bits from tag layout to discriminant layout. - // After the checks we did above, this cannot fail, as - // discriminants are int-like. - let discr_val = - self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap(); - let discr_bits = discr_val.assert_bits(discr_layout.size); - // Convert discriminant to variant index, and catch invalid discriminants. - let index = match *op.layout.ty.kind() { - ty::Adt(adt, _) => { - adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) - } - ty::Generator(def_id, substs, _) => { - let substs = substs.as_generator(); - substs - .discriminants(def_id, *self.tcx) - .find(|(_, var)| var.val == discr_bits) - } - _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"), - } - .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?; - // Return the cast value, and the index. - (discr_val, index.0) - } - TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { - let tag_val = tag_val.to_scalar(); - // Compute the variant this niche value/"tag" corresponds to. With niche layout, - // discriminant (encoded in niche/tag) and variant index are the same. - let variants_start = niche_variants.start().as_u32(); - let variants_end = niche_variants.end().as_u32(); - let variant = match tag_val.try_to_int() { - Err(dbg_val) => { - // So this is a pointer then, and casting to an int failed. - // Can only happen during CTFE. - // The niche must be just 0, and the ptr not null, then we know this is - // okay. Everything else, we conservatively reject. - let ptr_valid = niche_start == 0 - && variants_start == variants_end - && !self.scalar_may_be_null(tag_val)?; - if !ptr_valid { - throw_ub!(InvalidTag(dbg_val)) - } - untagged_variant - } - Ok(tag_bits) => { - let tag_bits = tag_bits.assert_bits(tag_layout.size); - // We need to use machine arithmetic to get the relative variant idx: - // variant_index_relative = tag_val - niche_start_val - let tag_val = ImmTy::from_uint(tag_bits, tag_layout); - let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); - let variant_index_relative_val = - self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; - let variant_index_relative = - variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); - // Check if this is in the range that indicates an actual discriminant. - if variant_index_relative <= u128::from(variants_end - variants_start) { - let variant_index_relative = u32::try_from(variant_index_relative) - .expect("we checked that this fits into a u32"); - // Then computing the absolute variant idx should not overflow any more. - let variant_index = variants_start - .checked_add(variant_index_relative) - .expect("overflow computing absolute variant idx"); - let variants_len = op - .layout - .ty - .ty_adt_def() - .expect("tagged layout for non adt") - .variants() - .len(); - assert!(usize::try_from(variant_index).unwrap() < variants_len); - VariantIdx::from_u32(variant_index) - } else { - untagged_variant - } - } - }; - // Compute the size of the scalar we need to return. - // No need to cast, because the variant index directly serves as discriminant and is - // encoded in the tag. - (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant) - } - }) - } } // Some nodes are used a lot. Make sure they don't unintentionally get bigger. diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index e8ff70e3a..4decfe863 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -10,28 +10,20 @@ use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy}; impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Applies the binary operation `op` to the two operands and writes a tuple of the result /// and a boolean signifying the potential overflow to the destination. - /// - /// `force_overflow_checks` indicates whether overflow checks should be done even when - /// `tcx.sess.overflow_checks()` is `false`. pub fn binop_with_overflow( &mut self, op: mir::BinOp, - force_overflow_checks: bool, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?; debug_assert_eq!( - self.tcx.intern_tup(&[ty, self.tcx.types.bool]), + self.tcx.mk_tup(&[ty, self.tcx.types.bool]), dest.layout.ty, "type mismatch for result of {:?}", op, ); - // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd - // component when overflow checking is disabled. - let overflowed = - overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self)); // Write the result to `dest`. if let Abi::ScalarPair(..) = dest.layout.abi { // We can use the optimized path and avoid `place_field` (which might do diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 274af61ee..3c463500a 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -7,8 +7,8 @@ use either::{Either, Left, Right}; use rustc_ast::Mutability; use rustc_middle::mir; use rustc_middle::ty; -use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; -use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx}; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, VariantIdx}; use super::{ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, @@ -26,6 +26,7 @@ pub enum MemPlaceMeta<Prov: Provenance = AllocId> { } impl<Prov: Provenance> MemPlaceMeta<Prov> { + #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn unwrap_meta(self) -> Scalar<Prov> { match self { Self::Meta(s) => s, @@ -147,12 +148,16 @@ impl<Prov: Provenance> MemPlace<Prov> { } #[inline] - pub fn offset_with_meta<'tcx>( + pub(super) fn offset_with_meta<'tcx>( self, offset: Size, meta: MemPlaceMeta<Prov>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self> { + debug_assert!( + !meta.has_meta() || self.meta.has_meta(), + "cannot use `offset_with_meta` to add metadata to a place" + ); Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta }) } } @@ -178,12 +183,15 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self { assert!(layout.is_zst()); let align = layout.align.abi; - let ptr = Pointer::from_addr(align.bytes()); // no provenance, absolute address + let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align } } + /// Offset the place in memory and change its metadata. + /// + /// This can go wrong very easily if you give the wrong layout for the new place! #[inline] - pub fn offset_with_meta( + pub(crate) fn offset_with_meta( &self, offset: Size, meta: MemPlaceMeta<Prov>, @@ -197,6 +205,9 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { }) } + /// Offset the place in memory. + /// + /// This can go wrong very easily if you give the wrong layout for the new place! pub fn offset( &self, offset: Size, @@ -229,7 +240,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { if self.layout.is_unsized() { // We need to consult `meta` metadata match self.layout.ty.kind() { - ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx), + ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx), _ => bug!("len not supported on unsized type {:?}", self.layout.ty), } } else { @@ -241,14 +252,6 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { } } } - - #[inline] - pub(super) fn vtable(&self) -> Scalar<Prov> { - match self.layout.ty.kind() { - ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), - _ => bug!("vtable not supported on type {:?}", self.layout.ty), - } - } } // These are defined here because they produce a place. @@ -266,7 +269,12 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { #[inline(always)] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> { - self.as_mplace_or_imm().left().unwrap() + self.as_mplace_or_imm().left().unwrap_or_else(|| { + bug!( + "OpTy of type {} was immediate when it was expected to be an MPlace", + self.layout.ty + ) + }) } } @@ -283,7 +291,12 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> { #[inline(always)] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> { - self.as_mplace_or_local().left().unwrap() + self.as_mplace_or_local().left().unwrap_or_else(|| { + bug!( + "PlaceTy of type {} was a local when it was expected to be an MPlace", + self.layout.ty + ) + }) } } @@ -340,7 +353,8 @@ where pub(super) fn get_place_alloc( &self, place: &MPlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> { + ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>> + { assert!(place.layout.is_sized()); assert!(!place.meta.has_meta()); let size = place.layout.size; @@ -351,7 +365,8 @@ where pub(super) fn get_place_alloc_mut( &mut self, place: &MPlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> { + ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>> + { assert!(place.layout.is_sized()); assert!(!place.meta.has_meta()); let size = place.layout.size; @@ -754,9 +769,9 @@ where str: &str, kind: MemoryKind<M::MemoryKind>, mutbl: Mutability, - ) -> MPlaceTy<'tcx, M::Provenance> { - let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl); - let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self); + ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> { + let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?; + let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self); let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) }; let ty = self.tcx.mk_ref( @@ -764,95 +779,35 @@ where ty::TypeAndMut { ty: self.tcx.types.str_, mutbl }, ); let layout = self.layout_of(ty).unwrap(); - MPlaceTy { mplace, layout, align: layout.align.abi } + Ok(MPlaceTy { mplace, layout, align: layout.align.abi }) } - /// Writes the discriminant of the given variant. - #[instrument(skip(self), level = "debug")] - pub fn write_discriminant( + /// Writes the aggregate to the destination. + #[instrument(skip(self), level = "trace")] + pub fn write_aggregate( &mut self, - variant_index: VariantIdx, + kind: &mir::AggregateKind<'tcx>, + operands: &[mir::Operand<'tcx>], dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { - // This must be an enum or generator. - match dest.layout.ty.kind() { - ty::Adt(adt, _) => assert!(adt.is_enum()), - ty::Generator(..) => {} - _ => span_bug!( - self.cur_span(), - "write_discriminant called on non-variant-type (neither enum nor generator)" - ), - } - // Layout computation excludes uninhabited variants from consideration - // therefore there's no way to represent those variants in the given layout. - // Essentially, uninhabited variants do not have a tag that corresponds to their - // discriminant, so we cannot do anything here. - // When evaluating we will always error before even getting here, but ConstProp 'executes' - // dead code, so we cannot ICE here. - if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { - throw_ub!(UninhabitedEnumVariantWritten) - } - - match dest.layout.variants { - abi::Variants::Single { index } => { - assert_eq!(index, variant_index); - } - abi::Variants::Multiple { - tag_encoding: TagEncoding::Direct, - tag: tag_layout, - tag_field, - .. - } => { - // No need to validate that the discriminant here because the - // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. - - let discr_val = - dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; - - // raw discriminants for enums are isize or bigger during - // their computation, but the in-memory tag is the smallest possible - // representation - let size = tag_layout.size(self); - let tag_val = size.truncate(discr_val); - - let tag_dest = self.place_field(dest, tag_field)?; - self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; - } - abi::Variants::Multiple { - tag_encoding: - TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, - tag: tag_layout, - tag_field, - .. - } => { - // No need to validate that the discriminant here because the - // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. - - if variant_index != untagged_variant { - let variants_start = niche_variants.start().as_u32(); - let variant_index_relative = variant_index - .as_u32() - .checked_sub(variants_start) - .expect("overflow computing relative variant idx"); - // We need to use machine arithmetic when taking into account `niche_start`: - // tag_val = variant_index_relative + niche_start_val - let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?; - let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); - let variant_index_relative_val = - ImmTy::from_uint(variant_index_relative, tag_layout); - let tag_val = self.binary_op( - mir::BinOp::Add, - &variant_index_relative_val, - &niche_start_val, - )?; - // Write result. - let niche_dest = self.place_field(dest, tag_field)?; - self.write_immediate(*tag_val, &niche_dest)?; - } + self.write_uninit(&dest)?; + let (variant_index, variant_dest, active_field_index) = match *kind { + mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { + let variant_dest = self.place_downcast(&dest, variant_index)?; + (variant_index, variant_dest, active_field_index) } + _ => (VariantIdx::from_u32(0), dest.clone(), None), + }; + if active_field_index.is_some() { + assert_eq!(operands.len(), 1); } - - Ok(()) + for (field_index, operand) in operands.iter().enumerate() { + let field_index = active_field_index.unwrap_or(field_index); + let field_dest = self.place_field(&variant_dest, field_index)?; + let op = self.eval_operand(operand, Some(field_dest.layout))?; + self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?; + } + self.write_discriminant(variant_index, &dest) } pub fn raw_const_to_mplace( @@ -867,11 +822,16 @@ where } /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. + /// Aso returns the vtable. pub(super) fn unpack_dyn_trait( &self, mplace: &MPlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> { - let vtable = mplace.vtable().to_pointer(self)?; // also sanity checks the type + ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> { + assert!( + matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)), + "`unpack_dyn_trait` only makes sense on `dyn*` types" + ); + let vtable = mplace.meta.unwrap_meta().to_pointer(self)?; let (ty, _) = self.get_ptr_vtable(vtable)?; let layout = self.layout_of(ty)?; @@ -880,7 +840,26 @@ where layout, align: layout.align.abi, }; - Ok(mplace) + Ok((mplace, vtable)) + } + + /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type. + /// Aso returns the vtable. + pub(super) fn unpack_dyn_star( + &self, + op: &OpTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> { + assert!( + matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)), + "`unpack_dyn_star` only makes sense on `dyn*` types" + ); + let data = self.operand_field(&op, 0)?; + let vtable = self.operand_field(&op, 1)?; + let vtable = self.read_pointer(&vtable)?; + let (ty, _) = self.get_ptr_vtable(vtable)?; + let layout = self.layout_of(ty)?; + let data = data.transmute(layout); + Ok((data, vtable)) } } diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 291464ab5..91da930db 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -319,7 +319,7 @@ where // implement this. ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)), ty::Slice(..) => { - let len = Scalar::from_machine_usize(inner_len, self); + let len = Scalar::from_target_usize(inner_len, self); (MemPlaceMeta::Meta(len), base.layout.ty) } _ => { @@ -363,7 +363,7 @@ where Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.local_to_op(self.frame(), local, Some(layout))?; - let n = self.read_machine_usize(&n)?; + let n = self.read_target_usize(&n)?; self.place_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { @@ -392,7 +392,7 @@ where Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.local_to_op(self.frame(), local, Some(layout))?; - let n = self.read_machine_usize(&n)?; + let n = self.read_target_usize(&n)?; self.operand_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index fad4cb06c..6863435e5 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -129,6 +129,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // FIXME(#73156): Handle source code coverage in const eval Coverage(..) => {} + ConstEvalCounter => { + M::increment_const_eval_counter(self)?; + } + // Defined to do nothing. These are added by optimization passes, to avoid changing the // size of MIR constantly. Nop => {} @@ -181,9 +185,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let left = self.read_immediate(&self.eval_operand(left, None)?)?; let layout = binop_right_homogeneous(bin_op).then_some(left.layout); let right = self.read_immediate(&self.eval_operand(right, layout)?)?; - self.binop_with_overflow( - bin_op, /*force_overflow_checks*/ false, &left, &right, &dest, - )?; + self.binop_with_overflow(bin_op, &left, &right, &dest)?; } UnaryOp(un_op, ref operand) => { @@ -195,13 +197,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Aggregate(box ref kind, ref operands) => { - assert!(matches!(kind, mir::AggregateKind::Array(..))); - - for (field_index, operand) in operands.iter().enumerate() { - let op = self.eval_operand(operand, None)?; - let field_dest = self.place_field(&dest, field_index)?; - self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?; - } + self.write_aggregate(kind, operands, &dest)?; } Repeat(ref operand, _) => { @@ -244,7 +240,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let src = self.eval_place(place)?; let op = self.place_to_op(&src)?; let len = op.len(self)?; - self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?; + self.write_scalar(Scalar::from_target_usize(len, self), &dest)?; } Ref(_, borrow_kind, place) => { @@ -299,7 +295,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { mir::NullOp::SizeOf => layout.size.bytes(), mir::NullOp::AlignOf => layout.align.abi.bytes(), }; - self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?; + self.write_scalar(Scalar::from_target_usize(val, self), &dest)?; } ShallowInitBox(ref operand, _) => { diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index da320cd1c..2aea7c79b 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -73,7 +73,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); let extra_args = &args[fn_sig.inputs().len()..]; - let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty)); + let extra_args = + self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout.ty)); let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() { ty::FnPtr(_sig) => { @@ -137,8 +138,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Assert { ref cond, expected, ref msg, target, cleanup } => { + let ignored = M::ignore_checkable_overflow_assertions(self) + && match msg { + mir::AssertKind::OverflowNeg(..) => true, + mir::AssertKind::Overflow(op, ..) => op.is_checkable(), + _ => false, + }; let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?; - if expected == cond_val { + if ignored || expected == cond_val { self.go_to_block(target); } else { M::assert_panic(self, msg, cleanup)?; @@ -541,7 +548,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let receiver_place = loop { match receiver.layout.ty.kind() { ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?, - ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values + ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values + ty::Dynamic(.., ty::DynStar) => { + // Not clear how to handle this, so far we assume the receiver is always a pointer. + span_bug!( + self.cur_span(), + "by-value calls on a `dyn*`... are those a thing?" + ); + } _ => { // Not there yet, search for the only non-ZST field. let mut non_zst_field = None; @@ -567,39 +581,59 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } }; - // Obtain the underlying trait we are working on. - let receiver_tail = self - .tcx - .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env); - let ty::Dynamic(data, ..) = receiver_tail.kind() else { - span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail) - }; - // Get the required information from the vtable. - let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?; - let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?; - if dyn_trait != data.principal() { - throw_ub_format!( - "`dyn` call on a pointer whose vtable does not match its type" - ); - } + // Obtain the underlying trait we are working on, and the adjusted receiver argument. + let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) = + receiver_place.layout.ty.kind() + { + let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?; + let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?; + if dyn_trait != data.principal() { + throw_ub_format!( + "`dyn*` call on a pointer whose vtable does not match its type" + ); + } + let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one + + (vptr, dyn_ty, recv.ptr) + } else { + // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`. + // (For that reason we also cannot use `unpack_dyn_trait`.) + let receiver_tail = self + .tcx + .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env); + let ty::Dynamic(data, _, ty::Dyn) = receiver_tail.kind() else { + span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail) + }; + assert!(receiver_place.layout.is_unsized()); + + // Get the required information from the vtable. + let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?; + let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?; + if dyn_trait != data.principal() { + throw_ub_format!( + "`dyn` call on a pointer whose vtable does not match its type" + ); + } + + // It might be surprising that we use a pointer as the receiver even if this + // is a by-val case; this works because by-val passing of an unsized `dyn + // Trait` to a function is actually desugared to a pointer. + (vptr, dyn_ty, receiver_place.ptr) + }; // Now determine the actual method to call. We can do that in two different ways and // compare them to ensure everything fits. let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else { throw_ub_format!("`dyn` call trying to call something that is not a method") }; + trace!("Virtual call dispatches to {fn_inst:#?}"); if cfg!(debug_assertions) { let tcx = *self.tcx; let trait_def_id = tcx.trait_of_item(def_id).unwrap(); let virtual_trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, instance.substs); - assert_eq!( - receiver_tail, - virtual_trait_ref.self_ty(), - "mismatch in underlying dyn trait computation within Miri and MIR building", - ); let existential_trait_ref = ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref); let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty); @@ -614,17 +648,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert_eq!(fn_inst, concrete_method); } - // `*mut receiver_place.layout.ty` is almost the layout that we - // want for args[0]: We have to project to field 0 because we want - // a thin pointer. - assert!(receiver_place.layout.is_unsized()); - let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty); - let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0); - // Adjust receiver argument. - args[0] = OpTy::from(ImmTy::from_immediate( - Scalar::from_maybe_pointer(receiver_place.ptr, self).into(), - this_receiver_ptr, - )); + // Adjust receiver argument. Layout can be any (thin) ptr. + args[0] = ImmTy::from_immediate( + Scalar::from_maybe_pointer(adjusted_receiver, self).into(), + self.layout_of(self.tcx.mk_mut_ptr(dyn_ty))?, + ) + .into(); trace!("Patched receiver operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call( @@ -653,15 +682,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // implementation fail -- a problem shared by rustc. let place = self.force_allocation(place)?; - let (instance, place) = match place.layout.ty.kind() { - ty::Dynamic(..) => { + let place = match place.layout.ty.kind() { + ty::Dynamic(_, _, ty::Dyn) => { // Dropping a trait object. Need to find actual drop fn. - let place = self.unpack_dyn_trait(&place)?; - let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); - (instance, place) + self.unpack_dyn_trait(&place)?.0 + } + ty::Dynamic(_, _, ty::DynStar) => { + // Dropping a `dyn*`. Need to find actual drop fn. + self.unpack_dyn_star(&place.into())?.0.assert_mem_place() + } + _ => { + debug_assert_eq!( + instance, + ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty) + ); + place } - _ => (instance, place), }; + let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; let arg = ImmTy::from_immediate( diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index cabc65e2c..bf2b4ee69 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -1,5 +1,7 @@ use rustc_middle::mir::interpret::InterpResult; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor}; +use rustc_middle::ty::{ + self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor, +}; use std::ops::ControlFlow; /// Checks whether a type contains generic parameters which require substitution. @@ -9,7 +11,7 @@ use std::ops::ControlFlow; /// case these parameters are unused. pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx> where - T: TypeVisitable<'tcx>, + T: TypeVisitable<TyCtxt<'tcx>>, { debug!("ensure_monomorphic_enough: ty={:?}", ty); if !ty.needs_subst() { @@ -21,7 +23,7 @@ where tcx: TyCtxt<'tcx>, } - impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> { + impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedSubstVisitor<'tcx> { type BreakTy = FoundParam; fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 19e359986..f7881c509 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -23,18 +23,18 @@ use std::hash::Hash; // for the validation errors use super::UndefinedBehaviorInfo::*; use super::{ - CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, - MemPlaceMeta, OpTy, Scalar, ValueVisitor, + AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, + Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor, }; macro_rules! throw_validation_failure { - ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{ + ($where:expr, { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )?) => {{ let mut msg = String::new(); msg.push_str("encountered "); - write!(&mut msg, $($what_fmt),+).unwrap(); + write!(&mut msg, $($what_fmt)*).unwrap(); $( msg.push_str(", but expected "); - write!(&mut msg, $($expected_fmt),+).unwrap(); + write!(&mut msg, $($expected_fmt)*).unwrap(); )? let path = rustc_middle::ty::print::with_no_trimmed_paths!({ let where_ = &$where; @@ -82,7 +82,7 @@ macro_rules! throw_validation_failure { /// macro_rules! try_validation { ($e:expr, $where:expr, - $( $( $p:pat_param )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)? + $( $( $p:pat_param )|+ => { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )? ),+ $(,)? ) => {{ match $e { Ok(x) => x, @@ -93,7 +93,7 @@ macro_rules! try_validation { InterpError::UndefinedBehavior($($p)|+) => throw_validation_failure!( $where, - { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )? + { $( $what_fmt )* } $( expected { $( $expected_fmt )* } )? ) ),+, #[allow(unreachable_patterns)] @@ -240,10 +240,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar // https://github.com/rust-lang/project-rfc-2229/issues/46 if let Some(local_def_id) = def_id.as_local() { - let tables = self.ecx.tcx.typeck(local_def_id); - if let Some(captured_place) = - tables.closure_min_captures_flattened(local_def_id).nth(field) - { + let captures = self.ecx.tcx.closure_captures(local_def_id); + if let Some(captured_place) = captures.get(field) { // Sometimes the index is beyond the number of upvars (seen // for a generator). let var_hir_id = captured_place.get_root_variable(); @@ -335,7 +333,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' ) -> InterpResult<'tcx> { let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env); match tail.kind() { - ty::Dynamic(..) => { + ty::Dynamic(_, _, ty::Dyn) => { let vtable = meta.unwrap_meta().to_pointer(self.ecx)?; // Make sure it is a genuine vtable pointer. let (_ty, _trait) = try_validation!( @@ -348,7 +346,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // FIXME: check if the type/trait match what ty::Dynamic says? } ty::Slice(..) | ty::Str => { - let _len = meta.unwrap_meta().to_machine_usize(self.ecx)?; + let _len = meta.unwrap_meta().to_target_usize(self.ecx)?; // We do not check that `len * elem_size <= isize::MAX`: // that is only required for references, and there it falls out of the // "dereferenceable" check performed by Stacked Borrows. @@ -399,12 +397,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' { "an unaligned {kind} (required {} byte alignment but found {})", required.bytes(), - has.bytes() + has.bytes(), }, DanglingIntPointer(0, _) => { "a null {kind}" }, DanglingIntPointer(i, _) => - { "a dangling {kind} (address {i:#x} is unallocated)" }, + { + "a dangling {kind} ({pointer} has no provenance)", + pointer = Pointer::<Option<AllocId>>::from_addr_invalid(*i), + }, PointerOutOfBounds { .. } => { "a dangling {kind} (going beyond the bounds of its allocation)" }, // This cannot happen during const-eval (because interning already detects @@ -602,6 +603,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' | ty::Bound(..) | ty::Param(..) | ty::Alias(..) + | ty::GeneratorWitnessMIR(..) | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty), } } diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index f9efc2418..7a1445939 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -284,7 +284,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M> &self, ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { - // We `force_allocation` here so that `from_op` below can work. + // No need for `force_allocation` since we are just going to read from this. ecx.place_to_op(self) } @@ -421,15 +421,25 @@ macro_rules! make_value_visitor { // Special treatment for special types, where the (static) layout is not sufficient. match *ty.kind() { // If it is a trait object, switch to the real type that was used to create it. - ty::Dynamic(..) => { + ty::Dynamic(_, _, ty::Dyn) => { + // Dyn types. This is unsized, and the actual dynamic type of the data is given by the + // vtable stored in the place metadata. // unsized values are never immediate, so we can assert_mem_place let op = v.to_op_for_read(self.ecx())?; let dest = op.assert_mem_place(); - let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?; + let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0; trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout); // recurse with the inner type return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into())); }, + ty::Dynamic(_, _, ty::DynStar) => { + // DynStar types. Very different from a dyn type (but strangely part of the + // same variant in `TyKind`): These are pairs where the 2nd component is the + // vtable, and the first component is the data (which must be ptr-sized). + let op = v.to_op_for_proj(self.ecx())?; + let data = self.ecx().unpack_dyn_star(&op)?.0; + return self.visit_field(&v, 0, &$value_trait::from_op(&data)); + } // Slices do not need special handling here: they have `Array` field // placement with length 0, so we enter the `Array` case below which // indirectly uses the metadata to determine the actual length. |