summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_abi
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_abi')
-rw-r--r--compiler/rustc_abi/Cargo.toml10
-rw-r--r--compiler/rustc_abi/src/layout.rs312
-rw-r--r--compiler/rustc_abi/src/lib.rs246
3 files changed, 268 insertions, 300 deletions
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml
index 48b199cb8..e549724b1 100644
--- a/compiler/rustc_abi/Cargo.toml
+++ b/compiler/rustc_abi/Cargo.toml
@@ -4,21 +4,27 @@ version = "0.0.0"
edition = "2021"
[dependencies]
+# tidy-alphabetical-start
bitflags = "1.2.1"
-tracing = "0.1"
rand = { version = "0.8.4", default-features = false, optional = true }
rand_xoshiro = { version = "0.6.0", optional = true }
rustc_data_structures = { path = "../rustc_data_structures", optional = true }
rustc_index = { path = "../rustc_index", default-features = false }
rustc_macros = { path = "../rustc_macros", optional = true }
rustc_serialize = { path = "../rustc_serialize", optional = true }
+tracing = "0.1"
+# tidy-alphabetical-end
[features]
+# tidy-alphabetical-start
default = ["nightly", "randomize"]
-randomize = ["rand", "rand_xoshiro"]
+# rust-analyzer depends on this crate and we therefore require it to built on a stable toolchain
+# without depending on rustc_data_structures, rustc_macros and rustc_serialize
nightly = [
"rustc_data_structures",
"rustc_index/nightly",
"rustc_macros",
"rustc_serialize",
]
+randomize = ["rand", "rand_xoshiro", "nightly"]
+# tidy-alphabetical-end
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 0706dc18f..996fd5bbe 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -1,21 +1,27 @@
-use super::*;
-use std::fmt::Write;
+use std::fmt::{self, Write};
+use std::ops::Deref;
use std::{borrow::Borrow, cmp, iter, ops::Bound};
-#[cfg(feature = "randomize")]
-use rand::{seq::SliceRandom, SeedableRng};
-#[cfg(feature = "randomize")]
-use rand_xoshiro::Xoshiro128StarStar;
-
+use rustc_index::Idx;
use tracing::debug;
+use crate::{
+ Abi, AbiAndPrefAlign, Align, FieldsShape, IndexSlice, IndexVec, Integer, LayoutS, Niche,
+ NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout,
+ Variants, WrappingRange,
+};
+
pub trait LayoutCalculator {
type TargetDataLayoutRef: Borrow<TargetDataLayout>;
fn delay_bug(&self, txt: String);
fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
- fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
+ fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
+ &self,
+ a: Scalar,
+ b: Scalar,
+ ) -> LayoutS<FieldIdx, VariantIdx> {
let dl = self.current_data_layout();
let dl = dl.borrow();
let b_align = b.align(dl);
@@ -31,7 +37,7 @@ pub trait LayoutCalculator {
.max_by_key(|niche| niche.available(dl));
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO, b_offset].into(),
memory_index: [0, 1].into(),
@@ -45,40 +51,45 @@ pub trait LayoutCalculator {
}
}
- fn univariant(
+ fn univariant<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+ >(
&self,
dl: &TargetDataLayout,
- fields: &IndexSlice<FieldIdx, Layout<'_>>,
+ fields: &IndexSlice<FieldIdx, F>,
repr: &ReprOptions,
kind: StructKind,
- ) -> Option<LayoutS> {
+ ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
- // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
- // data-carrying variants can be packed into the space after/before the niche.
+ // Enums prefer niches close to the beginning or the end of the variants so that other
+ // (smaller) data-carrying variants can be packed into the space after/before the niche.
// If the default field ordering does not give us a niche at the front then we do a second
- // run and bias niches to the right and then check which one is closer to one of the struct's
- // edges.
+ // run and bias niches to the right and then check which one is closer to one of the
+ // struct's edges.
if let Some(layout) = &layout {
// Don't try to calculate an end-biased layout for unsizable structs,
// otherwise we could end up with different layouts for
- // Foo<Type> and Foo<dyn Trait> which would break unsizing
+ // Foo<Type> and Foo<dyn Trait> which would break unsizing.
if !matches!(kind, StructKind::MaybeUnsized) {
if let Some(niche) = layout.largest_niche {
let head_space = niche.offset.bytes();
- let niche_length = niche.value.size(dl).bytes();
- let tail_space = layout.size.bytes() - head_space - niche_length;
+ let niche_len = niche.value.size(dl).bytes();
+ let tail_space = layout.size.bytes() - head_space - niche_len;
- // This may end up doing redundant work if the niche is already in the last field
- // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
- // the unpadded size so we try anyway.
+ // This may end up doing redundant work if the niche is already in the last
+ // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
+ // to get the unpadded size so we try anyway.
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
.expect("alt layout should always work");
- let niche = alt_layout
+ let alt_niche = alt_layout
.largest_niche
.expect("alt layout should have a niche like the regular one");
- let alt_head_space = niche.offset.bytes();
- let alt_niche_len = niche.value.size(dl).bytes();
+ let alt_head_space = alt_niche.offset.bytes();
+ let alt_niche_len = alt_niche.value.size(dl).bytes();
let alt_tail_space =
alt_layout.size.bytes() - alt_head_space - alt_niche_len;
@@ -93,7 +104,7 @@ pub trait LayoutCalculator {
alt_layout: {}\n",
layout.size.bytes(),
head_space,
- niche_length,
+ niche_len,
tail_space,
alt_head_space,
alt_niche_len,
@@ -114,11 +125,13 @@ pub trait LayoutCalculator {
layout
}
- fn layout_of_never_type(&self) -> LayoutS {
+ fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
+ &self,
+ ) -> LayoutS<FieldIdx, VariantIdx> {
let dl = self.current_data_layout();
let dl = dl.borrow();
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
largest_niche: None,
@@ -129,10 +142,15 @@ pub trait LayoutCalculator {
}
}
- fn layout_of_struct_or_enum(
+ fn layout_of_struct_or_enum<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+ >(
&self,
repr: &ReprOptions,
- variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
+ variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
is_enum: bool,
is_unsafe_cell: bool,
scalar_valid_range: (Bound<u128>, Bound<u128>),
@@ -140,7 +158,7 @@ pub trait LayoutCalculator {
discriminants: impl Iterator<Item = (VariantIdx, i128)>,
dont_niche_optimize_enum: bool,
always_sized: bool,
- ) -> Option<LayoutS> {
+ ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let dl = self.current_data_layout();
let dl = dl.borrow();
@@ -155,11 +173,11 @@ pub trait LayoutCalculator {
// but *not* an encoding of the discriminant (e.g., a tag value).
// See issue #49298 for more details on the need to leave space
// for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
- let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
+ let absent = |fields: &IndexSlice<FieldIdx, F>| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
// We cannot ignore alignment; that might lead us to entirely discard a variant and
// produce an enum that is less aligned than it should be!
- let is_1zst = fields.iter().all(|f| f.0.is_1zst());
+ let is_1zst = fields.iter().all(|f| f.is_1zst());
uninhabited && is_1zst
};
let (present_first, present_second) = {
@@ -176,7 +194,7 @@ pub trait LayoutCalculator {
}
// If it's a struct, still compute a layout so that we can still compute the
// field offsets.
- None => FIRST_VARIANT,
+ None => VariantIdx::new(0),
};
let is_struct = !is_enum ||
@@ -279,12 +297,12 @@ pub trait LayoutCalculator {
// variant layouts, so we can't store them in the
// overall LayoutS. Store the overall LayoutS
// and the variant LayoutSs here until then.
- struct TmpLayout {
- layout: LayoutS,
- variants: IndexVec<VariantIdx, LayoutS>,
+ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
+ layout: LayoutS<FieldIdx, VariantIdx>,
+ variants: IndexVec<VariantIdx, LayoutS<FieldIdx, VariantIdx>>,
}
- let calculate_niche_filling_layout = || -> Option<TmpLayout> {
+ let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
if dont_niche_optimize_enum {
return None;
}
@@ -322,13 +340,14 @@ pub trait LayoutCalculator {
let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
- let count = niche_variants.size_hint().1.unwrap() as u128;
+ let count =
+ (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
// Find the field with the largest niche
let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
.iter()
.enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche()?)))
+ .filter_map(|(j, field)| Some((j, field.largest_niche?)))
.max_by_key(|(_, niche)| niche.available(dl))
.and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
let niche_offset =
@@ -443,7 +462,7 @@ pub trait LayoutCalculator {
let discr_type = repr.discr_type();
let bits = Integer::from_attr(dl, discr_type).size().bits();
for (i, mut val) in discriminants {
- if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
+ if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
continue;
}
if discr_type.is_signed() {
@@ -484,7 +503,7 @@ pub trait LayoutCalculator {
if repr.c() {
for fields in variants {
for field in fields {
- prefix_align = prefix_align.max(field.align().abi);
+ prefix_align = prefix_align.max(field.align.abi);
}
}
}
@@ -503,9 +522,9 @@ pub trait LayoutCalculator {
// Find the first field we can't move later
// to make room for a larger discriminant.
for field_idx in st.fields.index_by_increasing_offset() {
- let field = &field_layouts[FieldIdx::from_usize(field_idx)];
- if !field.0.is_1zst() {
- start_align = start_align.min(field.align().abi);
+ let field = &field_layouts[FieldIdx::new(field_idx)];
+ if !field.is_1zst() {
+ start_align = start_align.min(field.align.abi);
break;
}
}
@@ -520,6 +539,7 @@ pub trait LayoutCalculator {
// Align the maximum variant size to the largest alignment.
size = size.align_to(align.abi);
+ // FIXME(oli-obk): deduplicate and harden these checks
if size.bytes() >= dl.obj_size_bound() {
return None;
}
@@ -587,7 +607,7 @@ pub trait LayoutCalculator {
let tag_mask = ity.size().unsigned_int_max();
let tag = Scalar::Initialized {
- value: Int(ity, signed),
+ value: Primitive::Int(ity, signed),
valid_range: WrappingRange {
start: (min as u128 & tag_mask),
end: (max as u128 & tag_mask),
@@ -612,7 +632,7 @@ pub trait LayoutCalculator {
};
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
- let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
+ let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
let (field, offset) = match (fields.next(), fields.next()) {
(None, None) => {
common_prim_initialized_in_all_variants = false;
@@ -624,7 +644,7 @@ pub trait LayoutCalculator {
break;
}
};
- let prim = match field.abi() {
+ let prim = match field.abi {
Abi::Scalar(scalar) => {
common_prim_initialized_in_all_variants &=
matches!(scalar, Scalar::Initialized { .. });
@@ -655,7 +675,7 @@ pub trait LayoutCalculator {
// Common prim might be uninit.
Scalar::Union { value: prim }
};
- let pair = self.scalar_pair(tag, prim_scalar);
+ let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
@@ -663,8 +683,8 @@ pub trait LayoutCalculator {
}
_ => panic!(),
};
- if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
- && pair_offsets[FieldIdx::from_u32(1)] == *offset
+ if pair_offsets[FieldIdx::new(0)] == Size::ZERO
+ && pair_offsets[FieldIdx::new(1)] == *offset
&& align == pair.align
&& size == pair.size
{
@@ -684,7 +704,8 @@ pub trait LayoutCalculator {
// Also do not overwrite any already existing "clever" ABIs.
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
+ // Also need to bump up the size and alignment, so that the entire value fits
+ // in here.
variant.size = cmp::max(variant.size, size);
variant.align.abi = cmp::max(variant.align.abi, align.abi);
}
@@ -720,8 +741,9 @@ pub trait LayoutCalculator {
// pick the layout with the larger niche; otherwise,
// pick tagged as it has simpler codegen.
use cmp::Ordering::*;
- let niche_size =
- |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
+ let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
+ tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
+ };
match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
(Greater, _) => nl,
(Equal, Less) => nl,
@@ -741,11 +763,16 @@ pub trait LayoutCalculator {
Some(best_layout.layout)
}
- fn layout_of_union(
+ fn layout_of_union<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+ >(
&self,
repr: &ReprOptions,
- variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
- ) -> Option<LayoutS> {
+ variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
+ ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let dl = self.current_data_layout();
let dl = dl.borrow();
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
@@ -762,24 +789,24 @@ pub trait LayoutCalculator {
};
let mut size = Size::ZERO;
- let only_variant = &variants[FIRST_VARIANT];
+ let only_variant = &variants[VariantIdx::new(0)];
for field in only_variant {
- if field.0.is_unsized() {
+ if field.is_unsized() {
self.delay_bug("unsized field in union".to_string());
}
- align = align.max(field.align());
- max_repr_align = max_repr_align.max(field.max_repr_align());
- size = cmp::max(size, field.size());
+ align = align.max(field.align);
+ max_repr_align = max_repr_align.max(field.max_repr_align);
+ size = cmp::max(size, field.size);
- if field.0.is_zst() {
+ if field.is_zst() {
// Nothing more to do for ZST fields
continue;
}
if let Ok(common) = common_non_zst_abi_and_align {
// Discard valid range information and allow undef
- let field_abi = field.abi().to_union();
+ let field_abi = field.abi.to_union();
if let Some((common_abi, common_align)) = common {
if common_abi != field_abi {
@@ -790,15 +817,14 @@ pub trait LayoutCalculator {
// have the same alignment
if !matches!(common_abi, Abi::Aggregate { .. }) {
assert_eq!(
- common_align,
- field.align().abi,
+ common_align, field.align.abi,
"non-Aggregate field with matching ABI but differing alignment"
);
}
}
} else {
// First non-ZST field: record its ABI and alignment
- common_non_zst_abi_and_align = Ok(Some((field_abi, field.align().abi)));
+ common_non_zst_abi_and_align = Ok(Some((field_abi, field.align.abi)));
}
}
}
@@ -830,7 +856,7 @@ pub trait LayoutCalculator {
};
Some(LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
abi,
largest_niche: None,
@@ -848,14 +874,19 @@ enum NicheBias {
End,
}
-fn univariant(
+fn univariant<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+>(
this: &(impl LayoutCalculator + ?Sized),
dl: &TargetDataLayout,
- fields: &IndexSlice<FieldIdx, Layout<'_>>,
+ fields: &IndexSlice<FieldIdx, F>,
repr: &ReprOptions,
kind: StructKind,
niche_bias: NicheBias,
-) -> Option<LayoutS> {
+) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut max_repr_align = repr.align;
@@ -868,15 +899,17 @@ fn univariant(
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
- // we don't guarantee
+ // we don't guarantee.
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
#[cfg(feature = "randomize")]
{
- // `ReprOptions.layout_seed` is a deterministic seed that we can use to
- // randomize field ordering with
- let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
+ use rand::{seq::SliceRandom, SeedableRng};
+ // `ReprOptions.layout_seed` is a deterministic seed we can use to randomize field
+ // ordering.
+ let mut rng =
+ rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
- // Shuffle the ordering of the fields
+ // Shuffle the ordering of the fields.
optimizing.shuffle(&mut rng);
}
// Otherwise we just leave things alone and actually optimize the type's fields
@@ -884,35 +917,34 @@ fn univariant(
// To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
// not depend on the layout of the tail.
let max_field_align =
- fields_excluding_tail.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
+ fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
let largest_niche_size = fields_excluding_tail
.iter()
- .filter_map(|f| f.largest_niche())
+ .filter_map(|f| f.largest_niche)
.map(|n| n.available(dl))
.max()
.unwrap_or(0);
- // Calculates a sort key to group fields by their alignment or possibly some size-derived
- // pseudo-alignment.
- let alignment_group_key = |layout: Layout<'_>| {
+ // Calculates a sort key to group fields by their alignment or possibly some
+ // size-derived pseudo-alignment.
+ let alignment_group_key = |layout: &F| {
if let Some(pack) = pack {
- // return the packed alignment in bytes
- layout.align().abi.min(pack).bytes()
+ // Return the packed alignment in bytes.
+ layout.align.abi.min(pack).bytes()
} else {
- // returns log2(effective-align).
- // This is ok since `pack` applies to all fields equally.
- // The calculation assumes that size is an integer multiple of align, except for ZSTs.
- //
- let align = layout.align().abi.bytes();
- let size = layout.size().bytes();
- let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
- // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+ // Returns `log2(effective-align)`. This is ok since `pack` applies to all
+ // fields equally. The calculation assumes that size is an integer multiple of
+ // align, except for ZSTs.
+ let align = layout.align.abi.bytes();
+ let size = layout.size.bytes();
+ let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
+ // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
let size_as_align = align.max(size).trailing_zeros();
let size_as_align = if largest_niche_size > 0 {
match niche_bias {
- // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
- // to the front in the first case (for aligned loads) but keep the bool in front
- // in the second case for its niches.
+ // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
+ // array to the front in the first case (for aligned loads) but keep
+ // the bool in front in the second case for its niches.
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
// When moving niches towards the end of the struct then for
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
@@ -931,18 +963,18 @@ fn univariant(
match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
- // to get one niche into the preferred position. If it ever supported multiple niches
- // then a more advanced pick-and-pack approach could provide better results.
- // But even for the single-niche cache it's not optimal. E.g. for
- // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
- // but it would require packing the tuple together with the u16 to build a 4-byte
- // group so that the u32 can be placed after it without padding. This kind
- // of packing can't be achieved by sorting.
+ // Currently `LayoutS` only exposes a single niche so sorting is usually
+ // sufficient to get one niche into the preferred position. If it ever
+ // supported multiple niches then a more advanced pick-and-pack approach could
+ // provide better results. But even for the single-niche cache it's not
+ // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
+ // bool to the front but it would require packing the tuple together with the
+ // u16 to build a 4-byte group so that the u32 can be placed after it without
+ // padding. This kind of packing can't be achieved by sorting.
optimizing.sort_by_key(|&x| {
- let f = fields[x];
- let field_size = f.size().bytes();
- let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ let f = &fields[x];
+ let field_size = f.size.bytes();
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
let niche_size_key = match niche_bias {
// large niche first
NicheBias::Start => !niche_size,
@@ -950,8 +982,8 @@ fn univariant(
NicheBias::End => niche_size,
};
let inner_niche_offset_key = match niche_bias {
- NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
- NicheBias::End => f.largest_niche().map_or(0, |n| {
+ NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
+ NicheBias::End => f.largest_niche.map_or(0, |n| {
!(field_size - n.value.size(dl).bytes() - n.offset.bytes())
}),
};
@@ -975,8 +1007,8 @@ fn univariant(
// And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| {
- let f = fields[x];
- let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ let f = &fields[x];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
(alignment_group_key(f), niche_size)
});
}
@@ -1012,24 +1044,24 @@ fn univariant(
));
}
- if field.0.is_unsized() {
+ if field.is_unsized() {
sized = false;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
let field_align = if let Some(pack) = pack {
- field.align().min(AbiAndPrefAlign::new(pack))
+ field.align.min(AbiAndPrefAlign::new(pack))
} else {
- field.align()
+ field.align
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
- max_repr_align = max_repr_align.max(field.max_repr_align());
+ max_repr_align = max_repr_align.max(field.max_repr_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i] = offset;
- if let Some(mut niche) = field.largest_niche() {
+ if let Some(mut niche) = field.largest_niche {
let available = niche.available(dl);
// Pick up larger niches.
let prefer_new_niche = match niche_bias {
@@ -1044,7 +1076,7 @@ fn univariant(
}
}
- offset = offset.checked_add(field.size(), dl)?;
+ offset = offset.checked_add(field.size, dl)?;
}
// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
@@ -1068,16 +1100,20 @@ fn univariant(
inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
- inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
+ inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
};
let size = min_size.align_to(align.abi);
+ // FIXME(oli-obk): deduplicate and harden these checks
+ if size.bytes() >= dl.obj_size_bound() {
+ return None;
+ }
let mut layout_of_single_non_zst_field = None;
let mut abi = Abi::Aggregate { sized };
// Try to make this a Scalar/ScalarPair.
if sized && size.bytes() > 0 {
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
- let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
+ let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
@@ -1085,18 +1121,17 @@ fn univariant(
layout_of_single_non_zst_field = Some(field);
// Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
- {
- match field.abi() {
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
+ match field.abi {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi();
+ abi = field.abi;
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
- abi = field.abi();
+ abi = field.abi;
}
_ => {}
}
@@ -1105,7 +1140,7 @@ fn univariant(
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
- match (a.abi(), b.abi()) {
+ match (a.abi, b.abi) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
@@ -1113,7 +1148,7 @@ fn univariant(
} else {
((j, b), (i, a))
};
- let pair = this.scalar_pair(a, b);
+ let pair = this.scalar_pair::<FieldIdx, VariantIdx>(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
@@ -1121,8 +1156,8 @@ fn univariant(
}
_ => panic!(),
};
- if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
- && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
+ if offsets[i] == pair_offsets[FieldIdx::new(0)]
+ && offsets[j] == pair_offsets[FieldIdx::new(1)]
&& align == pair.align
&& size == pair.size
{
@@ -1138,13 +1173,13 @@ fn univariant(
_ => {}
}
}
- if fields.iter().any(|f| f.abi().is_uninhabited()) {
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
abi = Abi::Uninhabited;
}
let unadjusted_abi_align = if repr.transparent() {
match layout_of_single_non_zst_field {
- Some(l) => l.unadjusted_abi_align(),
+ Some(l) => l.unadjusted_abi_align,
None => {
// `repr(transparent)` with all ZST fields.
align.abi
@@ -1155,7 +1190,7 @@ fn univariant(
};
Some(LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
largest_niche,
@@ -1166,17 +1201,22 @@ fn univariant(
})
}
-fn format_field_niches(
- layout: &LayoutS,
- fields: &IndexSlice<FieldIdx, Layout<'_>>,
+fn format_field_niches<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+>(
+ layout: &LayoutS<FieldIdx, VariantIdx>,
+ fields: &IndexSlice<FieldIdx, F>,
dl: &TargetDataLayout,
) -> String {
let mut s = String::new();
for i in layout.fields.index_by_increasing_offset() {
let offset = layout.fields.offset(i);
- let f = fields[i.into()];
- write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
- if let Some(n) = f.largest_niche() {
+ let f = &fields[FieldIdx::new(i)];
+ write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
+ if let Some(n) = f.largest_niche {
write!(
s,
" n{}b{}s{}",
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index b30ff058a..09a87cf8e 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1,23 +1,24 @@
-#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
+#![cfg_attr(feature = "nightly", feature(step_trait))]
#![cfg_attr(feature = "nightly", allow(internal_features))]
+#![cfg_attr(all(not(bootstrap), feature = "nightly"), doc(rust_logo))]
+#![cfg_attr(all(not(bootstrap), feature = "nightly"), feature(rustdoc_internals))]
use std::fmt;
-#[cfg(feature = "nightly")]
-use std::iter::Step;
use std::num::{NonZeroUsize, ParseIntError};
use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
use std::str::FromStr;
use bitflags::bitflags;
-use rustc_data_structures::intern::Interned;
-use rustc_data_structures::stable_hasher::Hash64;
+use rustc_index::{Idx, IndexSlice, IndexVec};
+
#[cfg(feature = "nightly")]
use rustc_data_structures::stable_hasher::StableOrd;
-use rustc_index::{IndexSlice, IndexVec};
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_Generic;
#[cfg(feature = "nightly")]
use rustc_macros::{Decodable, Encodable};
+#[cfg(feature = "nightly")]
+use std::iter::Step;
mod layout;
@@ -28,9 +29,6 @@ pub use layout::LayoutCalculator;
/// instead of implementing everything in `rustc_middle`.
pub trait HashStableContext {}
-use Integer::*;
-use Primitive::*;
-
bitflags! {
#[derive(Default)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
@@ -53,10 +51,11 @@ bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub enum IntegerType {
- /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
- /// is, `Pointer(true)` is isize.
+ /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
+ /// `Pointer(true)` means `isize`.
Pointer(bool),
- /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
+ /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
+ /// `Fixed(I8, false)` means `u8`.
Fixed(Integer, bool),
}
@@ -69,7 +68,7 @@ impl IntegerType {
}
}
-/// Represents the repr options provided by the user,
+/// Represents the repr options provided by the user.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub struct ReprOptions {
@@ -79,12 +78,12 @@ pub struct ReprOptions {
pub flags: ReprFlags,
/// The seed to be used for randomizing a type's layout
///
- /// Note: This could technically be a `Hash128` which would
+ /// Note: This could technically be a `u128` which would
/// be the "most accurate" hash as it'd encompass the item and crate
/// hash without loss, but it does pay the price of being larger.
/// Everything's a tradeoff, a 64-bit seed should be sufficient for our
/// purposes (primarily `-Z randomize-layout`)
- pub field_shuffle_seed: Hash64,
+ pub field_shuffle_seed: u64,
}
impl ReprOptions {
@@ -139,7 +138,7 @@ impl ReprOptions {
}
/// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
- /// was enabled for its declaration crate
+ /// was enabled for its declaration crate.
pub fn can_randomize_type_layout(&self) -> bool {
!self.inhibit_struct_field_reordering_opt()
&& self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
@@ -217,7 +216,8 @@ pub enum TargetDataLayoutErrors<'a> {
}
impl TargetDataLayout {
- /// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
+ /// Parse data layout from an
+ /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
///
/// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
/// determined from llvm string.
@@ -242,10 +242,11 @@ impl TargetDataLayout {
};
// Parse a size string.
- let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
+ let parse_size =
+ |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
// Parse an alignment string.
- let align = |s: &[&'a str], cause: &'a str| {
+ let parse_align = |s: &[&'a str], cause: &'a str| {
if s.is_empty() {
return Err(TargetDataLayoutErrors::MissingAlignment { cause });
}
@@ -269,22 +270,22 @@ impl TargetDataLayout {
[p] if p.starts_with('P') => {
dl.instruction_address_space = parse_address_space(&p[1..], "P")?
}
- ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
- ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
- ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+ ["a", ref a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
+ ["f32", ref a @ ..] => dl.f32_align = parse_align(a, "f32")?,
+ ["f64", ref a @ ..] => dl.f64_align = parse_align(a, "f64")?,
// FIXME(erikdesjardins): we should be parsing nonzero address spaces
// this will require replacing TargetDataLayout::{pointer_size,pointer_align}
// with e.g. `fn pointer_size_in(AddressSpace)`
[p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
- dl.pointer_size = size(s, p)?;
- dl.pointer_align = align(a, p)?;
+ dl.pointer_size = parse_size(s, p)?;
+ dl.pointer_align = parse_align(a, p)?;
}
[s, ref a @ ..] if s.starts_with('i') => {
let Ok(bits) = s[1..].parse::<u64>() else {
- size(&s[1..], "i")?; // For the user error.
+ parse_size(&s[1..], "i")?; // For the user error.
continue;
};
- let a = align(a, s)?;
+ let a = parse_align(a, s)?;
match bits {
1 => dl.i1_align = a,
8 => dl.i8_align = a,
@@ -301,8 +302,8 @@ impl TargetDataLayout {
}
}
[s, ref a @ ..] if s.starts_with('v') => {
- let v_size = size(&s[1..], "v")?;
- let a = align(a, s)?;
+ let v_size = parse_size(&s[1..], "v")?;
+ let a = parse_align(a, s)?;
if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
v.1 = a;
continue;
@@ -339,6 +340,7 @@ impl TargetDataLayout {
#[inline]
pub fn ptr_sized_integer(&self) -> Integer {
+ use Integer::*;
match self.pointer_size.bits() {
16 => I16,
32 => I32,
@@ -680,6 +682,7 @@ impl fmt::Display for AlignFromBytesError {
impl Align {
pub const ONE: Align = Align { pow2: 0 };
+ // LLVM has a maximal supported alignment of 2^29, we inherit that.
pub const MAX: Align = Align { pow2: 29 };
#[inline]
@@ -747,7 +750,6 @@ impl Align {
/// A pair of alignments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-
pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
@@ -773,7 +775,6 @@ impl AbiAndPrefAlign {
/// Integers, also used for enum discriminants.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-
pub enum Integer {
I8,
I16,
@@ -785,6 +786,7 @@ pub enum Integer {
impl Integer {
#[inline]
pub fn size(self) -> Size {
+ use Integer::*;
match self {
I8 => Size::from_bytes(1),
I16 => Size::from_bytes(2),
@@ -805,6 +807,7 @@ impl Integer {
}
pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+ use Integer::*;
let dl = cx.data_layout();
match self {
@@ -819,6 +822,7 @@ impl Integer {
/// Returns the largest signed value that can be represented by this Integer.
#[inline]
pub fn signed_max(self) -> i128 {
+ use Integer::*;
match self {
I8 => i8::MAX as i128,
I16 => i16::MAX as i128,
@@ -831,6 +835,7 @@ impl Integer {
/// Finds the smallest Integer type which can represent the signed value.
#[inline]
pub fn fit_signed(x: i128) -> Integer {
+ use Integer::*;
match x {
-0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
-0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
@@ -843,6 +848,7 @@ impl Integer {
/// Finds the smallest Integer type which can represent the unsigned value.
#[inline]
pub fn fit_unsigned(x: u128) -> Integer {
+ use Integer::*;
match x {
0..=0x0000_0000_0000_00ff => I8,
0..=0x0000_0000_0000_ffff => I16,
@@ -854,6 +860,7 @@ impl Integer {
/// Finds the smallest integer with the given alignment.
pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
+ use Integer::*;
let dl = cx.data_layout();
[I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
@@ -863,6 +870,7 @@ impl Integer {
/// Find the largest integer with the given alignment or less.
pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
+ use Integer::*;
let dl = cx.data_layout();
// FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
@@ -908,6 +916,7 @@ pub enum Primitive {
impl Primitive {
pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
+ use Primitive::*;
let dl = cx.data_layout();
match self {
@@ -922,6 +931,7 @@ impl Primitive {
}
pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+ use Primitive::*;
let dl = cx.data_layout();
match self {
@@ -937,8 +947,7 @@ impl Primitive {
}
/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
+/// start > end, it represents `start..=MAX`, followed by `0..=end`.
///
/// That is, for an i8 primitive, a range of `254..=2` means following
/// sequence:
@@ -970,21 +979,21 @@ impl WrappingRange {
/// Returns `self` with replaced `start`
#[inline(always)]
- pub fn with_start(mut self, start: u128) -> Self {
+ fn with_start(mut self, start: u128) -> Self {
self.start = start;
self
}
/// Returns `self` with replaced `end`
#[inline(always)]
- pub fn with_end(mut self, end: u128) -> Self {
+ fn with_end(mut self, end: u128) -> Self {
self.end = end;
self
}
/// Returns `true` if `size` completely fills the range.
#[inline]
- pub fn is_full_for(&self, size: Size) -> bool {
+ fn is_full_for(&self, size: Size) -> bool {
let max_value = size.unsigned_int_max();
debug_assert!(self.start <= max_value && self.end <= max_value);
self.start == (self.end.wrapping_add(1) & max_value)
@@ -1027,10 +1036,11 @@ pub enum Scalar {
impl Scalar {
#[inline]
pub fn is_bool(&self) -> bool {
+ use Integer::*;
matches!(
self,
Scalar::Initialized {
- value: Int(I8, false),
+ value: Primitive::Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 }
}
)
@@ -1066,7 +1076,8 @@ impl Scalar {
}
#[inline]
- /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+ /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
+ /// union.
pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
match self {
Scalar::Initialized { valid_range, .. } => valid_range,
@@ -1074,7 +1085,8 @@ impl Scalar {
}
}
- /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
+ /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
+ /// layout.
#[inline]
pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
match *self {
@@ -1093,36 +1105,11 @@ impl Scalar {
}
}
-rustc_index::newtype_index! {
- /// The *source-order* index of a field in a variant.
- ///
- /// This is how most code after type checking refers to fields, rather than
- /// using names (as names have hygiene complications and more complex lookup).
- ///
- /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.
- /// (It is for `repr(C)` `struct`s, however.)
- ///
- /// For example, in the following types,
- /// ```rust
- /// # enum Never {}
- /// # #[repr(u16)]
- /// enum Demo1 {
- /// Variant0 { a: Never, b: i32 } = 100,
- /// Variant1 { c: u8, d: u64 } = 10,
- /// }
- /// struct Demo2 { e: u8, f: u16, g: u8 }
- /// ```
- /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,
- /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and
- /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.
- #[derive(HashStable_Generic)]
- pub struct FieldIdx {}
-}
-
+// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
/// Describes how the fields of a type are located in memory.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum FieldsShape {
+pub enum FieldsShape<FieldIdx: Idx> {
/// Scalar primitives and `!`, which never have fields.
Primitive,
@@ -1162,7 +1149,7 @@ pub enum FieldsShape {
},
}
-impl FieldsShape {
+impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
#[inline]
pub fn count(&self) -> usize {
match *self {
@@ -1188,7 +1175,7 @@ impl FieldsShape {
assert!(i < count, "tried to access field {i} of array with {count} fields");
stride * i
}
- FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
+ FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
}
}
@@ -1200,7 +1187,7 @@ impl FieldsShape {
}
FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
FieldsShape::Arbitrary { ref memory_index, .. } => {
- memory_index[FieldIdx::from_usize(i)].try_into().unwrap()
+ memory_index[FieldIdx::new(i)].try_into().unwrap()
}
}
}
@@ -1216,7 +1203,7 @@ impl FieldsShape {
if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
if use_small {
for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
- inverse_small[mem_idx as usize] = field_idx.as_u32() as u8;
+ inverse_small[mem_idx as usize] = field_idx.index() as u8;
}
} else {
inverse_big = memory_index.invert_bijective_mapping();
@@ -1229,7 +1216,7 @@ impl FieldsShape {
if use_small {
inverse_small[i] as usize
} else {
- inverse_big[i as u32].as_usize()
+ inverse_big[i as u32].index()
}
}
})
@@ -1252,7 +1239,6 @@ impl AddressSpace {
/// in terms of categories of C types there are ABI rules for.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-
pub enum Abi {
Uninhabited,
Scalar(Scalar),
@@ -1373,9 +1359,10 @@ impl Abi {
}
}
+// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Variants {
+pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
/// Single enum variants, structs/tuples, unions, and all non-ADTs.
Single { index: VariantIdx },
@@ -1387,15 +1374,16 @@ pub enum Variants {
/// For enums, the tag is the sole field of the layout.
Multiple {
tag: Scalar,
- tag_encoding: TagEncoding,
+ tag_encoding: TagEncoding<VariantIdx>,
tag_field: usize,
- variants: IndexVec<VariantIdx, LayoutS>,
+ variants: IndexVec<VariantIdx, LayoutS<FieldIdx, VariantIdx>>,
},
}
+// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum TagEncoding {
+pub enum TagEncoding<VariantIdx: Idx> {
/// The tag directly stores the discriminant, but possibly with a smaller layout
/// (so converting the tag to the discriminant can require sign extension).
Direct,
@@ -1457,17 +1445,19 @@ impl Niche {
return None;
}
- // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
- // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
- // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
- // Having `None` in niche zero can enable some special optimizations.
+ // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
+ // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
+ // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
+ // and always taking the shortest path to niche zero. Having `None` in niche zero can
+ // enable some special optimizations.
//
// Bound selection criteria:
// 1. Select closest to zero given wrapping semantics.
// 2. Avoid moving past zero if possible.
//
- // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
- // If niche zero is already reserved, the selection of bounds are of little interest.
+ // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
+ // since they have to fit perfectly. If niche zero is already reserved, the selection of
+ // bounds are of little interest.
let move_start = |v: WrappingRange| {
let start = v.start.wrapping_sub(count) & max_value;
Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
@@ -1501,38 +1491,21 @@ impl Niche {
}
}
-rustc_index::newtype_index! {
- /// The *source-order* index of a variant in a type.
- ///
- /// For enums, these are always `0..variant_count`, regardless of any
- /// custom discriminants that may have been defined, and including any
- /// variants that may end up uninhabited due to field types. (Some of the
- /// variants may not be present in a monomorphized ABI [`Variants`], but
- /// those skipped variants are always counted when determining the *index*.)
- ///
- /// `struct`s, `tuples`, and `unions`s are considered to have a single variant
- /// with variant index zero, aka [`FIRST_VARIANT`].
- #[derive(HashStable_Generic)]
- pub struct VariantIdx {
- /// Equivalent to `VariantIdx(0)`.
- const FIRST_VARIANT = 0;
- }
-}
-
+// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
#[derive(PartialEq, Eq, Hash, Clone)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub struct LayoutS {
+pub struct LayoutS<FieldIdx: Idx, VariantIdx: Idx> {
/// Says where the fields are located within the layout.
- pub fields: FieldsShape,
+ pub fields: FieldsShape<FieldIdx>,
/// Encodes information about multi-variant layouts.
/// Even with `Multiple` variants, a layout still has its own fields! Those are then
/// shared between all variants. One of them will be the discriminant,
- /// but e.g. generators can have more.
+ /// but e.g. coroutines can have more.
///
/// To access all fields of this layout, both `fields` and the fields of the active variant
/// must be taken into account.
- pub variants: Variants,
+ pub variants: Variants<FieldIdx, VariantIdx>,
/// The `abi` defines how this data is passed between functions, and it defines
/// value restrictions via `valid_range`.
@@ -1561,13 +1534,13 @@ pub struct LayoutS {
pub unadjusted_abi_align: Align,
}
-impl LayoutS {
+impl<FieldIdx: Idx, VariantIdx: Idx> LayoutS<FieldIdx, VariantIdx> {
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.size(cx);
let align = scalar.align(cx);
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Scalar(scalar),
largest_niche,
@@ -1579,7 +1552,11 @@ impl LayoutS {
}
}
-impl fmt::Debug for LayoutS {
+impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutS<FieldIdx, VariantIdx>
+where
+ FieldsShape<FieldIdx>: fmt::Debug,
+ Variants<FieldIdx, VariantIdx>: fmt::Debug,
+{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// This is how `Layout` used to print before it become
// `Interned<LayoutS>`. We print it like this to avoid having to update
@@ -1607,61 +1584,6 @@ impl fmt::Debug for LayoutS {
}
}
-#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
-#[rustc_pass_by_value]
-pub struct Layout<'a>(pub Interned<'a, LayoutS>);
-
-impl<'a> fmt::Debug for Layout<'a> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // See comment on `<LayoutS as Debug>::fmt` above.
- self.0.0.fmt(f)
- }
-}
-
-impl<'a> Layout<'a> {
- pub fn fields(self) -> &'a FieldsShape {
- &self.0.0.fields
- }
-
- pub fn variants(self) -> &'a Variants {
- &self.0.0.variants
- }
-
- pub fn abi(self) -> Abi {
- self.0.0.abi
- }
-
- pub fn largest_niche(self) -> Option<Niche> {
- self.0.0.largest_niche
- }
-
- pub fn align(self) -> AbiAndPrefAlign {
- self.0.0.align
- }
-
- pub fn size(self) -> Size {
- self.0.0.size
- }
-
- pub fn max_repr_align(self) -> Option<Align> {
- self.0.0.max_repr_align
- }
-
- pub fn unadjusted_abi_align(self) -> Align {
- self.0.0.unadjusted_abi_align
- }
-
- /// Whether the layout is from a type that implements [`std::marker::PointerLike`].
- ///
- /// Currently, that means that the type is pointer-sized, pointer-aligned,
- /// and has a scalar ABI.
- pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
- self.size() == data_layout.pointer_size
- && self.align().abi == data_layout.pointer_align.abi
- && matches!(self.abi(), Abi::Scalar(..))
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum PointerKind {
/// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
@@ -1681,7 +1603,7 @@ pub struct PointeeInfo {
pub safe: Option<PointerKind>,
}
-impl LayoutS {
+impl<FieldIdx: Idx, VariantIdx: Idx> LayoutS<FieldIdx, VariantIdx> {
/// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool {