summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_abi
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:50 +0000
commit2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35 (patch)
treed325add32978dbdc1db975a438b3a77d571b1ab8 /compiler/rustc_abi
parentReleasing progress-linux version 1.68.2+dfsg1-1~progress7.99u1. (diff)
downloadrustc-2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35.tar.xz
rustc-2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35.zip
Merging upstream version 1.69.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_abi')
-rw-r--r--compiler/rustc_abi/src/layout.rs146
-rw-r--r--compiler/rustc_abi/src/lib.rs138
2 files changed, 153 insertions, 131 deletions
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 9c2cf58ef..54858b520 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -1,11 +1,5 @@
use super::*;
-use std::{
- borrow::Borrow,
- cmp,
- fmt::Debug,
- iter,
- ops::{Bound, Deref},
-};
+use std::{borrow::Borrow, cmp, iter, ops::Bound};
#[cfg(feature = "randomize")]
use rand::{seq::SliceRandom, SeedableRng};
@@ -33,7 +27,7 @@ pub trait LayoutCalculator {
fn delay_bug(&self, txt: &str);
fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
- fn scalar_pair<V: Idx>(&self, a: Scalar, b: Scalar) -> LayoutS<V> {
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
let dl = self.current_data_layout();
let dl = dl.borrow();
let b_align = b.align(dl);
@@ -49,7 +43,7 @@ pub trait LayoutCalculator {
.max_by_key(|niche| niche.available(dl));
LayoutS {
- variants: Variants::Single { index: V::new(0) },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: vec![Size::ZERO, b_offset],
memory_index: vec![0, 1],
@@ -61,13 +55,13 @@ pub trait LayoutCalculator {
}
}
- fn univariant<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+ fn univariant(
&self,
dl: &TargetDataLayout,
- fields: &[F],
+ fields: &[Layout<'_>],
repr: &ReprOptions,
kind: StructKind,
- ) -> Option<LayoutS<V>> {
+ ) -> Option<LayoutS> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
@@ -76,17 +70,17 @@ pub trait LayoutCalculator {
let end =
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index[..end];
- let effective_field_align = |f: &F| {
+ let effective_field_align = |layout: Layout<'_>| {
if let Some(pack) = pack {
// return the packed alignment in bytes
- f.align.abi.min(pack).bytes()
+ layout.align().abi.min(pack).bytes()
} else {
// returns log2(effective-align).
// This is ok since `pack` applies to all fields equally.
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
//
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
- f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
+ layout.align().abi.bytes().max(layout.size().bytes()).trailing_zeros() as u64
}
};
@@ -111,9 +105,9 @@ pub trait LayoutCalculator {
// Place ZSTs first to avoid "interesting offsets",
// especially with only one or two non-ZST fields.
// Then place largest alignments first, largest niches within an alignment group last
- let f = &fields[x as usize];
- let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
- (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
+ let f = fields[x as usize];
+ let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ (!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
});
}
@@ -123,8 +117,8 @@ pub trait LayoutCalculator {
// And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| {
- let f = &fields[x as usize];
- let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+ let f = fields[x as usize];
+ let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
(effective_field_align(f), niche_size)
});
}
@@ -160,15 +154,15 @@ pub trait LayoutCalculator {
));
}
- if field.is_unsized() {
+ if field.0.is_unsized() {
sized = false;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
let field_align = if let Some(pack) = pack {
- field.align.min(AbiAndPrefAlign::new(pack))
+ field.align().min(AbiAndPrefAlign::new(pack))
} else {
- field.align
+ field.align()
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
@@ -176,7 +170,7 @@ pub trait LayoutCalculator {
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i as usize] = offset;
- if let Some(mut niche) = field.largest_niche {
+ if let Some(mut niche) = field.largest_niche() {
let available = niche.available(dl);
if available > largest_niche_available {
largest_niche_available = available;
@@ -185,7 +179,7 @@ pub trait LayoutCalculator {
}
}
- offset = offset.checked_add(field.size, dl)?;
+ offset = offset.checked_add(field.size(), dl)?;
}
if let Some(repr_align) = repr.align {
align = align.max(AbiAndPrefAlign::new(repr_align));
@@ -205,24 +199,26 @@ pub trait LayoutCalculator {
// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
// All other fields must be ZSTs.
- let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+ let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.0.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+ if offsets[i].bytes() == 0
+ && align.abi == field.align().abi
+ && size == field.size()
{
- match field.abi {
+ match field.abi() {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi;
+ abi = field.abi();
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
- abi = field.abi;
+ abi = field.abi();
}
_ => {}
}
@@ -231,7 +227,7 @@ pub trait LayoutCalculator {
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
- match (a.abi, b.abi) {
+ match (a.abi(), b.abi()) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
@@ -239,7 +235,7 @@ pub trait LayoutCalculator {
} else {
((j, b), (i, a))
};
- let pair = self.scalar_pair::<V>(a, b);
+ let pair = self.scalar_pair(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]);
@@ -264,11 +260,11 @@ pub trait LayoutCalculator {
_ => {}
}
}
- if fields.iter().any(|f| f.abi.is_uninhabited()) {
+ if fields.iter().any(|f| f.abi().is_uninhabited()) {
abi = Abi::Uninhabited;
}
Some(LayoutS {
- variants: Variants::Single { index: V::new(0) },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
largest_niche,
@@ -277,11 +273,11 @@ pub trait LayoutCalculator {
})
}
- fn layout_of_never_type<V: Idx>(&self) -> LayoutS<V> {
+ fn layout_of_never_type(&self) -> LayoutS {
let dl = self.current_data_layout();
let dl = dl.borrow();
LayoutS {
- variants: Variants::Single { index: V::new(0) },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
largest_niche: None,
@@ -290,18 +286,18 @@ pub trait LayoutCalculator {
}
}
- fn layout_of_struct_or_enum<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+ fn layout_of_struct_or_enum(
&self,
repr: &ReprOptions,
- variants: &IndexVec<V, Vec<F>>,
+ variants: &IndexVec<VariantIdx, Vec<Layout<'_>>>,
is_enum: bool,
is_unsafe_cell: bool,
scalar_valid_range: (Bound<u128>, Bound<u128>),
discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
- discriminants: impl Iterator<Item = (V, i128)>,
+ discriminants: impl Iterator<Item = (VariantIdx, i128)>,
niche_optimize_enum: bool,
always_sized: bool,
- ) -> Option<LayoutS<V>> {
+ ) -> Option<LayoutS> {
let dl = self.current_data_layout();
let dl = dl.borrow();
@@ -316,9 +312,9 @@ pub trait LayoutCalculator {
// but *not* an encoding of the discriminant (e.g., a tag value).
// See issue #49298 for more details on the need to leave space
// for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &[F]| {
- let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
- let is_zst = fields.iter().all(|f| f.is_zst());
+ let absent = |fields: &[Layout<'_>]| {
+ let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
+ let is_zst = fields.iter().all(|f| f.0.is_zst());
uninhabited && is_zst
};
let (present_first, present_second) = {
@@ -335,7 +331,7 @@ pub trait LayoutCalculator {
}
// If it's a struct, still compute a layout so that we can still compute the
// field offsets.
- None => V::new(0),
+ None => VariantIdx::new(0),
};
let is_struct = !is_enum ||
@@ -439,12 +435,12 @@ pub trait LayoutCalculator {
// variant layouts, so we can't store them in the
// overall LayoutS. Store the overall LayoutS
// and the variant LayoutSs here until then.
- struct TmpLayout<V: Idx> {
- layout: LayoutS<V>,
- variants: IndexVec<V, LayoutS<V>>,
+ struct TmpLayout {
+ layout: LayoutS,
+ variants: IndexVec<VariantIdx, LayoutS>,
}
- let calculate_niche_filling_layout = || -> Option<TmpLayout<V>> {
+ let calculate_niche_filling_layout = || -> Option<TmpLayout> {
if niche_optimize_enum {
return None;
}
@@ -464,15 +460,16 @@ pub trait LayoutCalculator {
Some(st)
})
- .collect::<Option<IndexVec<V, _>>>()?;
+ .collect::<Option<IndexVec<VariantIdx, _>>>()?;
let largest_variant_index = variant_layouts
.iter_enumerated()
.max_by_key(|(_i, layout)| layout.size.bytes())
.map(|(i, _layout)| i)?;
- let all_indices = (0..=variants.len() - 1).map(V::new);
- let needs_disc = |index: V| index != largest_variant_index && !absent(&variants[index]);
+ let all_indices = (0..=variants.len() - 1).map(VariantIdx::new);
+ let needs_disc =
+ |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap().index()
..=all_indices.rev().find(|v| needs_disc(*v)).unwrap().index();
@@ -482,7 +479,7 @@ pub trait LayoutCalculator {
let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
.iter()
.enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche?)))
+ .filter_map(|(j, field)| Some((j, field.largest_niche()?)))
.max_by_key(|(_, niche)| niche.available(dl))
.and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
let niche_offset =
@@ -514,7 +511,7 @@ pub trait LayoutCalculator {
match layout.fields {
FieldsShape::Arbitrary { ref mut offsets, .. } => {
for (j, offset) in offsets.iter_mut().enumerate() {
- if !variants[i][j].is_zst() {
+ if !variants[i][j].0.is_zst() {
*offset += this_offset;
}
}
@@ -572,8 +569,8 @@ pub trait LayoutCalculator {
tag: niche_scalar,
tag_encoding: TagEncoding::Niche {
untagged_variant: largest_variant_index,
- niche_variants: (V::new(*niche_variants.start())
- ..=V::new(*niche_variants.end())),
+ niche_variants: (VariantIdx::new(*niche_variants.start())
+ ..=VariantIdx::new(*niche_variants.end())),
niche_start,
},
tag_field: 0,
@@ -598,7 +595,7 @@ pub trait LayoutCalculator {
let discr_type = repr.discr_type();
let bits = Integer::from_attr(dl, discr_type).size().bits();
for (i, mut val) in discriminants {
- if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+ if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
continue;
}
if discr_type.is_signed() {
@@ -636,7 +633,7 @@ pub trait LayoutCalculator {
if repr.c() {
for fields in variants {
for field in fields {
- prefix_align = prefix_align.max(field.align.abi);
+ prefix_align = prefix_align.max(field.align().abi);
}
}
}
@@ -655,8 +652,8 @@ pub trait LayoutCalculator {
// Find the first field we can't move later
// to make room for a larger discriminant.
for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) {
- if !field.is_zst() || field.align.abi.bytes() != 1 {
- start_align = start_align.min(field.align.abi);
+ if !field.0.is_zst() || field.align().abi.bytes() != 1 {
+ start_align = start_align.min(field.align().abi);
break;
}
}
@@ -664,7 +661,7 @@ pub trait LayoutCalculator {
align = align.max(st.align);
Some(st)
})
- .collect::<Option<IndexVec<V, _>>>()?;
+ .collect::<Option<IndexVec<VariantIdx, _>>>()?;
// Align the maximum variant size to the largest alignment.
size = size.align_to(align.abi);
@@ -759,7 +756,7 @@ pub trait LayoutCalculator {
let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
panic!();
};
- let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
+ let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
let (field, offset) = match (fields.next(), fields.next()) {
(None, None) => {
common_prim_initialized_in_all_variants = false;
@@ -771,7 +768,7 @@ pub trait LayoutCalculator {
break;
}
};
- let prim = match field.abi {
+ let prim = match field.abi() {
Abi::Scalar(scalar) => {
common_prim_initialized_in_all_variants &=
matches!(scalar, Scalar::Initialized { .. });
@@ -802,7 +799,7 @@ pub trait LayoutCalculator {
// Common prim might be uninit.
Scalar::Union { value: prim }
};
- let pair = self.scalar_pair::<V>(tag, prim_scalar);
+ let pair = self.scalar_pair(tag, prim_scalar);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]);
@@ -862,9 +859,8 @@ pub trait LayoutCalculator {
// pick the layout with the larger niche; otherwise,
// pick tagged as it has simpler codegen.
use cmp::Ordering::*;
- let niche_size = |tmp_l: &TmpLayout<V>| {
- tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
- };
+ let niche_size =
+ |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
(Greater, _) => nl,
(Equal, Less) => nl,
@@ -884,11 +880,11 @@ pub trait LayoutCalculator {
Some(best_layout.layout)
}
- fn layout_of_union<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+ fn layout_of_union(
&self,
repr: &ReprOptions,
- variants: &IndexVec<V, Vec<F>>,
- ) -> Option<LayoutS<V>> {
+ variants: &IndexVec<VariantIdx, Vec<Layout<'_>>>,
+ ) -> Option<LayoutS> {
let dl = self.current_data_layout();
let dl = dl.borrow();
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
@@ -900,15 +896,15 @@ pub trait LayoutCalculator {
let optimize = !repr.inhibit_union_abi_opt();
let mut size = Size::ZERO;
let mut abi = Abi::Aggregate { sized: true };
- let index = V::new(0);
+ let index = VariantIdx::new(0);
for field in &variants[index] {
- assert!(field.is_sized());
- align = align.max(field.align);
+ assert!(field.0.is_sized());
+ align = align.max(field.align());
// If all non-ZST fields have the same ABI, forward this ABI
- if optimize && !field.is_zst() {
+ if optimize && !field.0.is_zst() {
// Discard valid range information and allow undef
- let field_abi = match field.abi {
+ let field_abi = match field.abi() {
Abi::Scalar(x) => Abi::Scalar(x.to_union()),
Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
Abi::Vector { element: x, count } => {
@@ -926,7 +922,7 @@ pub trait LayoutCalculator {
}
}
- size = cmp::max(size, field.size);
+ size = cmp::max(size, field.size());
}
if let Some(pack) = repr.pack {
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index f4cb459f3..39574ca55 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -8,6 +8,7 @@ use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
use std::str::FromStr;
use bitflags::bitflags;
+use rustc_data_structures::intern::Interned;
#[cfg(feature = "nightly")]
use rustc_data_structures::stable_hasher::StableOrd;
use rustc_index::vec::{Idx, IndexVec};
@@ -170,7 +171,9 @@ pub struct TargetDataLayout {
pub instruction_address_space: AddressSpace,
- /// Minimum size of #[repr(C)] enums (default I32 bits)
+ /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
+ /// Note: This isn't in LLVM's data layout string, it is `short_enum`
+ /// so the only valid spec for LLVM is c_int::BITS or 8
pub c_enum_min_size: Integer,
}
@@ -267,6 +270,9 @@ impl TargetDataLayout {
["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+ // FIXME(erikdesjardins): we should be parsing nonzero address spaces
+ // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
+ // with e.g. `fn pointer_size_in(AddressSpace)`
[p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
dl.pointer_size = size(s, p)?;
dl.pointer_align = align(a, p)?;
@@ -861,7 +867,7 @@ pub enum Primitive {
Int(Integer, bool),
F32,
F64,
- Pointer,
+ Pointer(AddressSpace),
}
impl Primitive {
@@ -872,7 +878,10 @@ impl Primitive {
Int(i, _) => i.size(),
F32 => Size::from_bits(32),
F64 => Size::from_bits(64),
- Pointer => dl.pointer_size,
+ // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
+ // different address spaces can have different sizes
+ // (but TargetDataLayout doesn't currently parse that part of the DL string)
+ Pointer(_) => dl.pointer_size,
}
}
@@ -883,26 +892,12 @@ impl Primitive {
Int(i, _) => i.align(dl),
F32 => dl.f32_align,
F64 => dl.f64_align,
- Pointer => dl.pointer_align,
+ // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
+ // different address spaces can have different alignments
+ // (but TargetDataLayout doesn't currently parse that part of the DL string)
+ Pointer(_) => dl.pointer_align,
}
}
-
- // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
- #[inline]
- pub fn is_float(self) -> bool {
- matches!(self, F32 | F64)
- }
-
- // FIXME(eddyb) remove, it's completely unused.
- #[inline]
- pub fn is_int(self) -> bool {
- matches!(self, Int(..))
- }
-
- #[inline]
- pub fn is_ptr(self) -> bool {
- matches!(self, Pointer)
- }
}
/// Inclusive wrap-around range of valid values, that is, if
@@ -1188,7 +1183,8 @@ impl FieldsShape {
/// An identifier that specifies the address space that some operation
/// should operate on. Special address spaces have an effect on code generation,
/// depending on the target and the address spaces it implements.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub struct AddressSpace(pub u32);
impl AddressSpace {
@@ -1257,9 +1253,9 @@ impl Abi {
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Variants<V: Idx> {
+pub enum Variants {
/// Single enum variants, structs/tuples, unions, and all non-ADTs.
- Single { index: V },
+ Single { index: VariantIdx },
/// Enum-likes with more than one inhabited variant: each variant comes with
/// a *discriminant* (usually the same as the variant index but the user can
@@ -1269,15 +1265,15 @@ pub enum Variants<V: Idx> {
/// For enums, the tag is the sole field of the layout.
Multiple {
tag: Scalar,
- tag_encoding: TagEncoding<V>,
+ tag_encoding: TagEncoding,
tag_field: usize,
- variants: IndexVec<V, LayoutS<V>>,
+ variants: IndexVec<VariantIdx, LayoutS>,
},
}
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum TagEncoding<V: Idx> {
+pub enum TagEncoding {
/// The tag directly stores the discriminant, but possibly with a smaller layout
/// (so converting the tag to the discriminant can require sign extension).
Direct,
@@ -1292,7 +1288,11 @@ pub enum TagEncoding<V: Idx> {
/// For example, `Option<(usize, &T)>` is represented such that
/// `None` has a null pointer for the second tuple field, and
/// `Some` is the identity function (with a non-null reference).
- Niche { untagged_variant: V, niche_variants: RangeInclusive<V>, niche_start: u128 },
+ Niche {
+ untagged_variant: VariantIdx,
+ niche_variants: RangeInclusive<VariantIdx>,
+ niche_start: u128,
+ },
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
@@ -1379,9 +1379,14 @@ impl Niche {
}
}
+rustc_index::newtype_index! {
+ #[derive(HashStable_Generic)]
+ pub struct VariantIdx {}
+}
+
#[derive(PartialEq, Eq, Hash, Clone)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub struct LayoutS<V: Idx> {
+pub struct LayoutS {
/// Says where the fields are located within the layout.
pub fields: FieldsShape,
@@ -1392,7 +1397,7 @@ pub struct LayoutS<V: Idx> {
///
/// To access all fields of this layout, both `fields` and the fields of the active variant
/// must be taken into account.
- pub variants: Variants<V>,
+ pub variants: Variants,
/// The `abi` defines how this data is passed between functions, and it defines
/// value restrictions via `valid_range`.
@@ -1411,13 +1416,13 @@ pub struct LayoutS<V: Idx> {
pub size: Size,
}
-impl<V: Idx> LayoutS<V> {
+impl LayoutS {
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.size(cx);
let align = scalar.align(cx);
LayoutS {
- variants: Variants::Single { index: V::new(0) },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Scalar(scalar),
largest_niche,
@@ -1427,7 +1432,7 @@ impl<V: Idx> LayoutS<V> {
}
}
-impl<V: Idx> fmt::Debug for LayoutS<V> {
+impl fmt::Debug for LayoutS {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// This is how `Layout` used to print before it become
// `Interned<LayoutS>`. We print it like this to avoid having to update
@@ -1444,42 +1449,63 @@ impl<V: Idx> fmt::Debug for LayoutS<V> {
}
}
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum PointerKind {
- /// Most general case, we know no restrictions to tell LLVM.
- SharedMutable,
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
+#[rustc_pass_by_value]
+pub struct Layout<'a>(pub Interned<'a, LayoutS>);
+
+impl<'a> fmt::Debug for Layout<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // See comment on `<LayoutS as Debug>::fmt` above.
+ self.0.0.fmt(f)
+ }
+}
- /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
- Frozen,
+impl<'a> Layout<'a> {
+ pub fn fields(self) -> &'a FieldsShape {
+ &self.0.0.fields
+ }
- /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
- UniqueBorrowed,
+ pub fn variants(self) -> &'a Variants {
+ &self.0.0.variants
+ }
- /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
- UniqueBorrowedPinned,
+ pub fn abi(self) -> Abi {
+ self.0.0.abi
+ }
+
+ pub fn largest_niche(self) -> Option<Niche> {
+ self.0.0.largest_niche
+ }
+
+ pub fn align(self) -> AbiAndPrefAlign {
+ self.0.0.align
+ }
- /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
- /// nor `dereferenceable`.
- UniqueOwned,
+ pub fn size(self) -> Size {
+ self.0.0.size
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PointerKind {
+ /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
+ SharedRef { frozen: bool },
+ /// Mutable reference. `unpin` indicates the absence of any pinned data.
+ MutableRef { unpin: bool },
+ /// Box. `unpin` indicates the absence of any pinned data.
+ Box { unpin: bool },
}
+/// Note that this information is advisory only, and backends are free to ignore it.
+/// It can only be used to encode potential optimizations, but no critical information.
#[derive(Copy, Clone, Debug)]
pub struct PointeeInfo {
pub size: Size,
pub align: Align,
pub safe: Option<PointerKind>,
- pub address_space: AddressSpace,
-}
-
-/// Used in `might_permit_raw_init` to indicate the kind of initialisation
-/// that is checked to be valid
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum InitKind {
- Zero,
- UninitMitigated0x01Fill,
}
-impl<V: Idx> LayoutS<V> {
+impl LayoutS {
/// Returns `true` if the layout corresponds to an unsized type.
pub fn is_unsized(&self) -> bool {
self.abi.is_unsized()