summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_abi/src/layout.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:42 +0000
commitcec1877e180393eba0f6ddb0cf97bf3a791631c7 (patch)
tree47b4dac2a9dd9a40c30c251b4d4a72d7ccf77e9f /compiler/rustc_abi/src/layout.rs
parentAdding debian version 1.74.1+dfsg1-1. (diff)
downloadrustc-cec1877e180393eba0f6ddb0cf97bf3a791631c7.tar.xz
rustc-cec1877e180393eba0f6ddb0cf97bf3a791631c7.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_abi/src/layout.rs')
-rw-r--r--compiler/rustc_abi/src/layout.rs312
1 files changed, 176 insertions, 136 deletions
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 0706dc18f..996fd5bbe 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -1,21 +1,27 @@
-use super::*;
-use std::fmt::Write;
+use std::fmt::{self, Write};
+use std::ops::Deref;
use std::{borrow::Borrow, cmp, iter, ops::Bound};
-#[cfg(feature = "randomize")]
-use rand::{seq::SliceRandom, SeedableRng};
-#[cfg(feature = "randomize")]
-use rand_xoshiro::Xoshiro128StarStar;
-
+use rustc_index::Idx;
use tracing::debug;
+use crate::{
+ Abi, AbiAndPrefAlign, Align, FieldsShape, IndexSlice, IndexVec, Integer, LayoutS, Niche,
+ NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout,
+ Variants, WrappingRange,
+};
+
pub trait LayoutCalculator {
type TargetDataLayoutRef: Borrow<TargetDataLayout>;
fn delay_bug(&self, txt: String);
fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
- fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
+ fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
+ &self,
+ a: Scalar,
+ b: Scalar,
+ ) -> LayoutS<FieldIdx, VariantIdx> {
let dl = self.current_data_layout();
let dl = dl.borrow();
let b_align = b.align(dl);
@@ -31,7 +37,7 @@ pub trait LayoutCalculator {
.max_by_key(|niche| niche.available(dl));
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO, b_offset].into(),
memory_index: [0, 1].into(),
@@ -45,40 +51,45 @@ pub trait LayoutCalculator {
}
}
- fn univariant(
+ fn univariant<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+ >(
&self,
dl: &TargetDataLayout,
- fields: &IndexSlice<FieldIdx, Layout<'_>>,
+ fields: &IndexSlice<FieldIdx, F>,
repr: &ReprOptions,
kind: StructKind,
- ) -> Option<LayoutS> {
+ ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
- // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
- // data-carrying variants can be packed into the space after/before the niche.
+ // Enums prefer niches close to the beginning or the end of the variants so that other
+ // (smaller) data-carrying variants can be packed into the space after/before the niche.
// If the default field ordering does not give us a niche at the front then we do a second
- // run and bias niches to the right and then check which one is closer to one of the struct's
- // edges.
+ // run and bias niches to the right and then check which one is closer to one of the
+ // struct's edges.
if let Some(layout) = &layout {
// Don't try to calculate an end-biased layout for unsizable structs,
// otherwise we could end up with different layouts for
- // Foo<Type> and Foo<dyn Trait> which would break unsizing
+ // Foo<Type> and Foo<dyn Trait> which would break unsizing.
if !matches!(kind, StructKind::MaybeUnsized) {
if let Some(niche) = layout.largest_niche {
let head_space = niche.offset.bytes();
- let niche_length = niche.value.size(dl).bytes();
- let tail_space = layout.size.bytes() - head_space - niche_length;
+ let niche_len = niche.value.size(dl).bytes();
+ let tail_space = layout.size.bytes() - head_space - niche_len;
- // This may end up doing redundant work if the niche is already in the last field
- // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
- // the unpadded size so we try anyway.
+ // This may end up doing redundant work if the niche is already in the last
+ // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
+ // to get the unpadded size so we try anyway.
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
.expect("alt layout should always work");
- let niche = alt_layout
+ let alt_niche = alt_layout
.largest_niche
.expect("alt layout should have a niche like the regular one");
- let alt_head_space = niche.offset.bytes();
- let alt_niche_len = niche.value.size(dl).bytes();
+ let alt_head_space = alt_niche.offset.bytes();
+ let alt_niche_len = alt_niche.value.size(dl).bytes();
let alt_tail_space =
alt_layout.size.bytes() - alt_head_space - alt_niche_len;
@@ -93,7 +104,7 @@ pub trait LayoutCalculator {
alt_layout: {}\n",
layout.size.bytes(),
head_space,
- niche_length,
+ niche_len,
tail_space,
alt_head_space,
alt_niche_len,
@@ -114,11 +125,13 @@ pub trait LayoutCalculator {
layout
}
- fn layout_of_never_type(&self) -> LayoutS {
+ fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
+ &self,
+ ) -> LayoutS<FieldIdx, VariantIdx> {
let dl = self.current_data_layout();
let dl = dl.borrow();
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
largest_niche: None,
@@ -129,10 +142,15 @@ pub trait LayoutCalculator {
}
}
- fn layout_of_struct_or_enum(
+ fn layout_of_struct_or_enum<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+ >(
&self,
repr: &ReprOptions,
- variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
+ variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
is_enum: bool,
is_unsafe_cell: bool,
scalar_valid_range: (Bound<u128>, Bound<u128>),
@@ -140,7 +158,7 @@ pub trait LayoutCalculator {
discriminants: impl Iterator<Item = (VariantIdx, i128)>,
dont_niche_optimize_enum: bool,
always_sized: bool,
- ) -> Option<LayoutS> {
+ ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let dl = self.current_data_layout();
let dl = dl.borrow();
@@ -155,11 +173,11 @@ pub trait LayoutCalculator {
// but *not* an encoding of the discriminant (e.g., a tag value).
// See issue #49298 for more details on the need to leave space
// for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
- let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
+ let absent = |fields: &IndexSlice<FieldIdx, F>| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
// We cannot ignore alignment; that might lead us to entirely discard a variant and
// produce an enum that is less aligned than it should be!
- let is_1zst = fields.iter().all(|f| f.0.is_1zst());
+ let is_1zst = fields.iter().all(|f| f.is_1zst());
uninhabited && is_1zst
};
let (present_first, present_second) = {
@@ -176,7 +194,7 @@ pub trait LayoutCalculator {
}
// If it's a struct, still compute a layout so that we can still compute the
// field offsets.
- None => FIRST_VARIANT,
+ None => VariantIdx::new(0),
};
let is_struct = !is_enum ||
@@ -279,12 +297,12 @@ pub trait LayoutCalculator {
// variant layouts, so we can't store them in the
// overall LayoutS. Store the overall LayoutS
// and the variant LayoutSs here until then.
- struct TmpLayout {
- layout: LayoutS,
- variants: IndexVec<VariantIdx, LayoutS>,
+ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
+ layout: LayoutS<FieldIdx, VariantIdx>,
+ variants: IndexVec<VariantIdx, LayoutS<FieldIdx, VariantIdx>>,
}
- let calculate_niche_filling_layout = || -> Option<TmpLayout> {
+ let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
if dont_niche_optimize_enum {
return None;
}
@@ -322,13 +340,14 @@ pub trait LayoutCalculator {
let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
- let count = niche_variants.size_hint().1.unwrap() as u128;
+ let count =
+ (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
// Find the field with the largest niche
let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
.iter()
.enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche()?)))
+ .filter_map(|(j, field)| Some((j, field.largest_niche?)))
.max_by_key(|(_, niche)| niche.available(dl))
.and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
let niche_offset =
@@ -443,7 +462,7 @@ pub trait LayoutCalculator {
let discr_type = repr.discr_type();
let bits = Integer::from_attr(dl, discr_type).size().bits();
for (i, mut val) in discriminants {
- if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
+ if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
continue;
}
if discr_type.is_signed() {
@@ -484,7 +503,7 @@ pub trait LayoutCalculator {
if repr.c() {
for fields in variants {
for field in fields {
- prefix_align = prefix_align.max(field.align().abi);
+ prefix_align = prefix_align.max(field.align.abi);
}
}
}
@@ -503,9 +522,9 @@ pub trait LayoutCalculator {
// Find the first field we can't move later
// to make room for a larger discriminant.
for field_idx in st.fields.index_by_increasing_offset() {
- let field = &field_layouts[FieldIdx::from_usize(field_idx)];
- if !field.0.is_1zst() {
- start_align = start_align.min(field.align().abi);
+ let field = &field_layouts[FieldIdx::new(field_idx)];
+ if !field.is_1zst() {
+ start_align = start_align.min(field.align.abi);
break;
}
}
@@ -520,6 +539,7 @@ pub trait LayoutCalculator {
// Align the maximum variant size to the largest alignment.
size = size.align_to(align.abi);
+ // FIXME(oli-obk): deduplicate and harden these checks
if size.bytes() >= dl.obj_size_bound() {
return None;
}
@@ -587,7 +607,7 @@ pub trait LayoutCalculator {
let tag_mask = ity.size().unsigned_int_max();
let tag = Scalar::Initialized {
- value: Int(ity, signed),
+ value: Primitive::Int(ity, signed),
valid_range: WrappingRange {
start: (min as u128 & tag_mask),
end: (max as u128 & tag_mask),
@@ -612,7 +632,7 @@ pub trait LayoutCalculator {
};
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
- let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
+ let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
let (field, offset) = match (fields.next(), fields.next()) {
(None, None) => {
common_prim_initialized_in_all_variants = false;
@@ -624,7 +644,7 @@ pub trait LayoutCalculator {
break;
}
};
- let prim = match field.abi() {
+ let prim = match field.abi {
Abi::Scalar(scalar) => {
common_prim_initialized_in_all_variants &=
matches!(scalar, Scalar::Initialized { .. });
@@ -655,7 +675,7 @@ pub trait LayoutCalculator {
// Common prim might be uninit.
Scalar::Union { value: prim }
};
- let pair = self.scalar_pair(tag, prim_scalar);
+ let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
@@ -663,8 +683,8 @@ pub trait LayoutCalculator {
}
_ => panic!(),
};
- if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
- && pair_offsets[FieldIdx::from_u32(1)] == *offset
+ if pair_offsets[FieldIdx::new(0)] == Size::ZERO
+ && pair_offsets[FieldIdx::new(1)] == *offset
&& align == pair.align
&& size == pair.size
{
@@ -684,7 +704,8 @@ pub trait LayoutCalculator {
// Also do not overwrite any already existing "clever" ABIs.
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
+ // Also need to bump up the size and alignment, so that the entire value fits
+ // in here.
variant.size = cmp::max(variant.size, size);
variant.align.abi = cmp::max(variant.align.abi, align.abi);
}
@@ -720,8 +741,9 @@ pub trait LayoutCalculator {
// pick the layout with the larger niche; otherwise,
// pick tagged as it has simpler codegen.
use cmp::Ordering::*;
- let niche_size =
- |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
+ let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
+ tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
+ };
match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
(Greater, _) => nl,
(Equal, Less) => nl,
@@ -741,11 +763,16 @@ pub trait LayoutCalculator {
Some(best_layout.layout)
}
- fn layout_of_union(
+ fn layout_of_union<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+ >(
&self,
repr: &ReprOptions,
- variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
- ) -> Option<LayoutS> {
+ variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
+ ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let dl = self.current_data_layout();
let dl = dl.borrow();
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
@@ -762,24 +789,24 @@ pub trait LayoutCalculator {
};
let mut size = Size::ZERO;
- let only_variant = &variants[FIRST_VARIANT];
+ let only_variant = &variants[VariantIdx::new(0)];
for field in only_variant {
- if field.0.is_unsized() {
+ if field.is_unsized() {
self.delay_bug("unsized field in union".to_string());
}
- align = align.max(field.align());
- max_repr_align = max_repr_align.max(field.max_repr_align());
- size = cmp::max(size, field.size());
+ align = align.max(field.align);
+ max_repr_align = max_repr_align.max(field.max_repr_align);
+ size = cmp::max(size, field.size);
- if field.0.is_zst() {
+ if field.is_zst() {
// Nothing more to do for ZST fields
continue;
}
if let Ok(common) = common_non_zst_abi_and_align {
// Discard valid range information and allow undef
- let field_abi = field.abi().to_union();
+ let field_abi = field.abi.to_union();
if let Some((common_abi, common_align)) = common {
if common_abi != field_abi {
@@ -790,15 +817,14 @@ pub trait LayoutCalculator {
// have the same alignment
if !matches!(common_abi, Abi::Aggregate { .. }) {
assert_eq!(
- common_align,
- field.align().abi,
+ common_align, field.align.abi,
"non-Aggregate field with matching ABI but differing alignment"
);
}
}
} else {
// First non-ZST field: record its ABI and alignment
- common_non_zst_abi_and_align = Ok(Some((field_abi, field.align().abi)));
+ common_non_zst_abi_and_align = Ok(Some((field_abi, field.align.abi)));
}
}
}
@@ -830,7 +856,7 @@ pub trait LayoutCalculator {
};
Some(LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
abi,
largest_niche: None,
@@ -848,14 +874,19 @@ enum NicheBias {
End,
}
-fn univariant(
+fn univariant<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+>(
this: &(impl LayoutCalculator + ?Sized),
dl: &TargetDataLayout,
- fields: &IndexSlice<FieldIdx, Layout<'_>>,
+ fields: &IndexSlice<FieldIdx, F>,
repr: &ReprOptions,
kind: StructKind,
niche_bias: NicheBias,
-) -> Option<LayoutS> {
+) -> Option<LayoutS<FieldIdx, VariantIdx>> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut max_repr_align = repr.align;
@@ -868,15 +899,17 @@ fn univariant(
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
- // we don't guarantee
+ // we don't guarantee.
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
#[cfg(feature = "randomize")]
{
- // `ReprOptions.layout_seed` is a deterministic seed that we can use to
- // randomize field ordering with
- let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
+ use rand::{seq::SliceRandom, SeedableRng};
+ // `ReprOptions.layout_seed` is a deterministic seed we can use to randomize field
+ // ordering.
+ let mut rng =
+ rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
- // Shuffle the ordering of the fields
+ // Shuffle the ordering of the fields.
optimizing.shuffle(&mut rng);
}
// Otherwise we just leave things alone and actually optimize the type's fields
@@ -884,35 +917,34 @@ fn univariant(
// To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
// not depend on the layout of the tail.
let max_field_align =
- fields_excluding_tail.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
+ fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
let largest_niche_size = fields_excluding_tail
.iter()
- .filter_map(|f| f.largest_niche())
+ .filter_map(|f| f.largest_niche)
.map(|n| n.available(dl))
.max()
.unwrap_or(0);
- // Calculates a sort key to group fields by their alignment or possibly some size-derived
- // pseudo-alignment.
- let alignment_group_key = |layout: Layout<'_>| {
+ // Calculates a sort key to group fields by their alignment or possibly some
+ // size-derived pseudo-alignment.
+ let alignment_group_key = |layout: &F| {
if let Some(pack) = pack {
- // return the packed alignment in bytes
- layout.align().abi.min(pack).bytes()
+ // Return the packed alignment in bytes.
+ layout.align.abi.min(pack).bytes()
} else {
- // returns log2(effective-align).
- // This is ok since `pack` applies to all fields equally.
- // The calculation assumes that size is an integer multiple of align, except for ZSTs.
- //
- let align = layout.align().abi.bytes();
- let size = layout.size().bytes();
- let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
- // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+ // Returns `log2(effective-align)`. This is ok since `pack` applies to all
+ // fields equally. The calculation assumes that size is an integer multiple of
+ // align, except for ZSTs.
+ let align = layout.align.abi.bytes();
+ let size = layout.size.bytes();
+ let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
+ // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
let size_as_align = align.max(size).trailing_zeros();
let size_as_align = if largest_niche_size > 0 {
match niche_bias {
- // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
- // to the front in the first case (for aligned loads) but keep the bool in front
- // in the second case for its niches.
+ // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
+ // array to the front in the first case (for aligned loads) but keep
+ // the bool in front in the second case for its niches.
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
// When moving niches towards the end of the struct then for
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
@@ -931,18 +963,18 @@ fn univariant(
match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
- // to get one niche into the preferred position. If it ever supported multiple niches
- // then a more advanced pick-and-pack approach could provide better results.
- // But even for the single-niche cache it's not optimal. E.g. for
- // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
- // but it would require packing the tuple together with the u16 to build a 4-byte
- // group so that the u32 can be placed after it without padding. This kind
- // of packing can't be achieved by sorting.
+ // Currently `LayoutS` only exposes a single niche so sorting is usually
+ // sufficient to get one niche into the preferred position. If it ever
+ // supported multiple niches then a more advanced pick-and-pack approach could
+ // provide better results. But even for the single-niche cache it's not
+ // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
+ // bool to the front but it would require packing the tuple together with the
+ // u16 to build a 4-byte group so that the u32 can be placed after it without
+ // padding. This kind of packing can't be achieved by sorting.
optimizing.sort_by_key(|&x| {
- let f = fields[x];
- let field_size = f.size().bytes();
- let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ let f = &fields[x];
+ let field_size = f.size.bytes();
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
let niche_size_key = match niche_bias {
// large niche first
NicheBias::Start => !niche_size,
@@ -950,8 +982,8 @@ fn univariant(
NicheBias::End => niche_size,
};
let inner_niche_offset_key = match niche_bias {
- NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
- NicheBias::End => f.largest_niche().map_or(0, |n| {
+ NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
+ NicheBias::End => f.largest_niche.map_or(0, |n| {
!(field_size - n.value.size(dl).bytes() - n.offset.bytes())
}),
};
@@ -975,8 +1007,8 @@ fn univariant(
// And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| {
- let f = fields[x];
- let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ let f = &fields[x];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
(alignment_group_key(f), niche_size)
});
}
@@ -1012,24 +1044,24 @@ fn univariant(
));
}
- if field.0.is_unsized() {
+ if field.is_unsized() {
sized = false;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
let field_align = if let Some(pack) = pack {
- field.align().min(AbiAndPrefAlign::new(pack))
+ field.align.min(AbiAndPrefAlign::new(pack))
} else {
- field.align()
+ field.align
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
- max_repr_align = max_repr_align.max(field.max_repr_align());
+ max_repr_align = max_repr_align.max(field.max_repr_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i] = offset;
- if let Some(mut niche) = field.largest_niche() {
+ if let Some(mut niche) = field.largest_niche {
let available = niche.available(dl);
// Pick up larger niches.
let prefer_new_niche = match niche_bias {
@@ -1044,7 +1076,7 @@ fn univariant(
}
}
- offset = offset.checked_add(field.size(), dl)?;
+ offset = offset.checked_add(field.size, dl)?;
}
// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
@@ -1068,16 +1100,20 @@ fn univariant(
inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
- inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
+ inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
};
let size = min_size.align_to(align.abi);
+ // FIXME(oli-obk): deduplicate and harden these checks
+ if size.bytes() >= dl.obj_size_bound() {
+ return None;
+ }
let mut layout_of_single_non_zst_field = None;
let mut abi = Abi::Aggregate { sized };
// Try to make this a Scalar/ScalarPair.
if sized && size.bytes() > 0 {
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
- let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
+ let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
@@ -1085,18 +1121,17 @@ fn univariant(
layout_of_single_non_zst_field = Some(field);
// Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
- {
- match field.abi() {
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
+ match field.abi {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi();
+ abi = field.abi;
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
- abi = field.abi();
+ abi = field.abi;
}
_ => {}
}
@@ -1105,7 +1140,7 @@ fn univariant(
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
- match (a.abi(), b.abi()) {
+ match (a.abi, b.abi) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
@@ -1113,7 +1148,7 @@ fn univariant(
} else {
((j, b), (i, a))
};
- let pair = this.scalar_pair(a, b);
+ let pair = this.scalar_pair::<FieldIdx, VariantIdx>(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
@@ -1121,8 +1156,8 @@ fn univariant(
}
_ => panic!(),
};
- if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
- && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
+ if offsets[i] == pair_offsets[FieldIdx::new(0)]
+ && offsets[j] == pair_offsets[FieldIdx::new(1)]
&& align == pair.align
&& size == pair.size
{
@@ -1138,13 +1173,13 @@ fn univariant(
_ => {}
}
}
- if fields.iter().any(|f| f.abi().is_uninhabited()) {
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
abi = Abi::Uninhabited;
}
let unadjusted_abi_align = if repr.transparent() {
match layout_of_single_non_zst_field {
- Some(l) => l.unadjusted_abi_align(),
+ Some(l) => l.unadjusted_abi_align,
None => {
// `repr(transparent)` with all ZST fields.
align.abi
@@ -1155,7 +1190,7 @@ fn univariant(
};
Some(LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
largest_niche,
@@ -1166,17 +1201,22 @@ fn univariant(
})
}
-fn format_field_niches(
- layout: &LayoutS,
- fields: &IndexSlice<FieldIdx, Layout<'_>>,
+fn format_field_niches<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+>(
+ layout: &LayoutS<FieldIdx, VariantIdx>,
+ fields: &IndexSlice<FieldIdx, F>,
dl: &TargetDataLayout,
) -> String {
let mut s = String::new();
for i in layout.fields.index_by_increasing_offset() {
let offset = layout.fields.offset(i);
- let f = fields[i.into()];
- write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
- if let Some(n) = f.largest_niche() {
+ let f = &fields[FieldIdx::new(i)];
+ write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
+ if let Some(n) = f.largest_niche {
write!(
s,
" n{}b{}s{}",