summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval/src/interpret/operand.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret/operand.rs')
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs198
1 files changed, 30 insertions, 168 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index befc0928f..5310ef0bb 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -4,13 +4,12 @@
use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty, ValTree};
use rustc_middle::{mir, ty};
use rustc_span::Span;
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
-use rustc_target::abi::{VariantIdx, Variants};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
@@ -53,7 +52,7 @@ impl<Prov: Provenance> Immediate<Prov> {
}
pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
- Immediate::ScalarPair(val, Scalar::from_machine_usize(len, cx))
+ Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
}
pub fn new_dyn_trait(
@@ -256,7 +255,22 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
- pub fn offset_with_meta(
+ /// Replace the layout of this operand. There's basically no sanity check that this makes sense,
+ /// you better know what you are doing! If this is an immediate, applying the wrong layout can
+ /// not just lead to invalid data, it can actually *shift the data around* since the offsets of
+ /// a ScalarPair are entirely determined by the layout, not the data.
+ pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self {
+ assert_eq!(
+ self.layout.size, layout.size,
+ "transmuting with a size change, that doesn't seem right"
+ );
+ OpTy { layout, ..*self }
+ }
+
+ /// Offset the operand in memory (if possible) and change its metadata.
+ ///
+ /// This can go wrong very easily if you give the wrong layout for the new place!
+ pub(super) fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -277,6 +291,9 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
+ /// Offset the operand in memory (if possible).
+ ///
+ /// This can go wrong very easily if you give the wrong layout for the new place!
pub fn offset(
&self,
offset: Size,
@@ -319,7 +336,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar(
alloc_range(Size::ZERO, size),
- /*read_provenance*/ s.is_ptr(),
+ /*read_provenance*/ matches!(s, abi::Pointer(_)),
)?;
Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
}
@@ -335,11 +352,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
let a_val = alloc.read_scalar(
alloc_range(Size::ZERO, a_size),
- /*read_provenance*/ a.is_ptr(),
+ /*read_provenance*/ matches!(a, abi::Pointer(_)),
)?;
let b_val = alloc.read_scalar(
alloc_range(b_offset, b_size),
- /*read_provenance*/ b.is_ptr(),
+ /*read_provenance*/ matches!(b, abi::Pointer(_)),
)?;
Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
}
@@ -415,12 +432,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.read_scalar(op)?.to_pointer(self)
}
/// Read a pointer-sized unsigned integer from a place.
- pub fn read_machine_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
- self.read_scalar(op)?.to_machine_usize(self)
+ pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
+ self.read_scalar(op)?.to_target_usize(self)
}
/// Read a pointer-sized signed integer from a place.
- pub fn read_machine_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
- self.read_scalar(op)?.to_machine_isize(self)
+ pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
+ self.read_scalar(op)?.to_target_isize(self)
}
/// Turn the wide MPlace into a string (must already be dereferenced!)
@@ -595,14 +612,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // FIXME(const_prop): normalization needed b/c const prop lint in
- // `mir_drops_elaborated_and_const_checked`, which happens before
- // optimized MIR. Only after optimizing the MIR can we guarantee
- // that the `RevealAll` pass has happened and that the body's consts
- // are normalized, so any call to resolve before that needs to be
- // manually normalized.
- let val = self.tcx.normalize_erasing_regions(self.param_env, *val);
- match val {
+ match *val {
mir::ConstantKind::Ty(ct) => {
let ty = ct.ty();
let valtree = self.eval_ty_constant(ct, span)?;
@@ -657,154 +667,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
}
-
- /// Read discriminant, return the runtime value as well as the variant index.
- /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
- pub fn read_discriminant(
- &self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
- trace!("read_discriminant_value {:#?}", op.layout);
- // Get type and layout of the discriminant.
- let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
- trace!("discriminant type: {:?}", discr_layout.ty);
-
- // We use "discriminant" to refer to the value associated with a particular enum variant.
- // This is not to be confused with its "variant index", which is just determining its position in the
- // declared list of variants -- they can differ with explicitly assigned discriminants.
- // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
- // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
- let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
- Variants::Single { index } => {
- let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
- Some(discr) => {
- // This type actually has discriminants.
- assert_eq!(discr.ty, discr_layout.ty);
- Scalar::from_uint(discr.val, discr_layout.size)
- }
- None => {
- // On a type without actual discriminants, variant is 0.
- assert_eq!(index.as_u32(), 0);
- Scalar::from_uint(index.as_u32(), discr_layout.size)
- }
- };
- return Ok((discr, index));
- }
- Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
- (tag, tag_encoding, tag_field)
- }
- };
-
- // There are *three* layouts that come into play here:
- // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
- // the `Scalar` we return.
- // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
- // and used to interpret the value we read from the tag field.
- // For the return value, a cast to `discr_layout` is performed.
- // - The field storing the tag has a layout, which is very similar to `tag_layout` but
- // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
-
- // Get layout for tag.
- let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
-
- // Read tag and sanity-check `tag_layout`.
- let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
- assert_eq!(tag_layout.size, tag_val.layout.size);
- assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
- trace!("tag value: {}", tag_val);
-
- // Figure out which discriminant and variant this corresponds to.
- Ok(match *tag_encoding {
- TagEncoding::Direct => {
- let scalar = tag_val.to_scalar();
- // Generate a specific error if `tag_val` is not an integer.
- // (`tag_bits` itself is only used for error messages below.)
- let tag_bits = scalar
- .try_to_int()
- .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
- .assert_bits(tag_layout.size);
- // Cast bits from tag layout to discriminant layout.
- // After the checks we did above, this cannot fail, as
- // discriminants are int-like.
- let discr_val =
- self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
- let discr_bits = discr_val.assert_bits(discr_layout.size);
- // Convert discriminant to variant index, and catch invalid discriminants.
- let index = match *op.layout.ty.kind() {
- ty::Adt(adt, _) => {
- adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
- }
- ty::Generator(def_id, substs, _) => {
- let substs = substs.as_generator();
- substs
- .discriminants(def_id, *self.tcx)
- .find(|(_, var)| var.val == discr_bits)
- }
- _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
- }
- .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
- // Return the cast value, and the index.
- (discr_val, index.0)
- }
- TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
- let tag_val = tag_val.to_scalar();
- // Compute the variant this niche value/"tag" corresponds to. With niche layout,
- // discriminant (encoded in niche/tag) and variant index are the same.
- let variants_start = niche_variants.start().as_u32();
- let variants_end = niche_variants.end().as_u32();
- let variant = match tag_val.try_to_int() {
- Err(dbg_val) => {
- // So this is a pointer then, and casting to an int failed.
- // Can only happen during CTFE.
- // The niche must be just 0, and the ptr not null, then we know this is
- // okay. Everything else, we conservatively reject.
- let ptr_valid = niche_start == 0
- && variants_start == variants_end
- && !self.scalar_may_be_null(tag_val)?;
- if !ptr_valid {
- throw_ub!(InvalidTag(dbg_val))
- }
- untagged_variant
- }
- Ok(tag_bits) => {
- let tag_bits = tag_bits.assert_bits(tag_layout.size);
- // We need to use machine arithmetic to get the relative variant idx:
- // variant_index_relative = tag_val - niche_start_val
- let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
- let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
- let variant_index_relative_val =
- self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
- let variant_index_relative =
- variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
- // Check if this is in the range that indicates an actual discriminant.
- if variant_index_relative <= u128::from(variants_end - variants_start) {
- let variant_index_relative = u32::try_from(variant_index_relative)
- .expect("we checked that this fits into a u32");
- // Then computing the absolute variant idx should not overflow any more.
- let variant_index = variants_start
- .checked_add(variant_index_relative)
- .expect("overflow computing absolute variant idx");
- let variants_len = op
- .layout
- .ty
- .ty_adt_def()
- .expect("tagged layout for non adt")
- .variants()
- .len();
- assert!(usize::try_from(variant_index).unwrap() < variants_len);
- VariantIdx::from_u32(variant_index)
- } else {
- untagged_variant
- }
- }
- };
- // Compute the size of the scalar we need to return.
- // No need to cast, because the variant index directly serves as discriminant and is
- // encoded in the tag.
- (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
- }
- })
- }
}
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.