summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_target/src/abi
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_target/src/abi')
-rw-r--r--compiler/rustc_target/src/abi/call/loongarch.rs342
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs32
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs18
-rw-r--r--compiler/rustc_target/src/abi/mod.rs1330
4 files changed, 393 insertions, 1329 deletions
diff --git a/compiler/rustc_target/src/abi/call/loongarch.rs b/compiler/rustc_target/src/abi/call/loongarch.rs
new file mode 100644
index 000000000..d29b479de
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/loongarch.rs
@@ -0,0 +1,342 @@
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+enum RegPassKind {
+ Float(Reg),
+ Integer(Reg),
+ Unknown,
+}
+
+#[derive(Copy, Clone)]
+enum FloatConv {
+ FloatPair(Reg, Reg),
+ Float(Reg),
+ MixedPair(Reg, Reg),
+}
+
+#[derive(Copy, Clone)]
+struct CannotUseFpConv;
+
+fn is_loongarch_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+ match arg.layout.abi {
+ Abi::Vector { .. } => true,
+ _ => arg.layout.is_aggregate(),
+ }
+}
+
+fn should_use_fp_conv_helper<'a, Ty, C>(
+ cx: &C,
+ arg_layout: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ field1_kind: &mut RegPassKind,
+ field2_kind: &mut RegPassKind,
+) -> Result<(), CannotUseFpConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ match arg_layout.abi {
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => {
+ if arg_layout.size.bits() > xlen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ (RegPassKind::Float(_), RegPassKind::Unknown) => {
+ *field2_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ abi::F32 | abi::F64 => {
+ if arg_layout.size.bits() > flen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ (_, RegPassKind::Unknown) => {
+ *field2_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ },
+ Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Union(_) => {
+ if !arg_layout.is_zst() {
+ return Err(CannotUseFpConv);
+ }
+ }
+ FieldsShape::Array { count, .. } => {
+ for _ in 0..count {
+ let elem_layout = arg_layout.field(cx, 0);
+ should_use_fp_conv_helper(
+ cx,
+ &elem_layout,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ )?;
+ }
+ }
+ FieldsShape::Arbitrary { .. } => {
+ match arg_layout.variants {
+ abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
+ abi::Variants::Single { .. } => (),
+ }
+ for i in arg_layout.fields.index_by_increasing_offset() {
+ let field = arg_layout.field(cx, i);
+ should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
+ }
+ }
+ },
+ }
+ Ok(())
+}
+
+fn should_use_fp_conv<'a, Ty, C>(
+ cx: &C,
+ arg: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+) -> Option<FloatConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ let mut field1_kind = RegPassKind::Unknown;
+ let mut field2_kind = RegPassKind::Unknown;
+ if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
+ return None;
+ }
+ match (field1_kind, field2_kind) {
+ (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
+ (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ match conv {
+ FloatConv::Float(f) => {
+ arg.cast_to(f);
+ }
+ FloatConv::FloatPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ FloatConv::MixedPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ }
+ return false;
+ }
+
+ let total = arg.layout.size;
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_loongarch_aggregate(arg) {
+ arg.make_indirect();
+ }
+ return true;
+ }
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+ if is_loongarch_aggregate(arg) {
+ if total.bits() <= xlen {
+ arg.cast_to(xlen_reg);
+ } else {
+ arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
+ }
+ return false;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ extend_integer_width(arg, xlen);
+ false
+}
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ is_vararg: bool,
+ avail_gprs: &mut u64,
+ avail_fprs: &mut u64,
+) where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if !is_vararg {
+ match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
+ *avail_fprs -= 1;
+ arg.cast_to(f);
+ return;
+ }
+ Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
+ *avail_fprs -= 2;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
+ *avail_gprs -= 1;
+ *avail_fprs -= 1;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ _ => (),
+ }
+ }
+
+ let total = arg.layout.size;
+ let align = arg.layout.align.abi.bits();
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_loongarch_aggregate(arg) {
+ arg.make_indirect();
+ }
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ let double_xlen_reg = match xlen {
+ 32 => Reg::i64(),
+ 64 => Reg::i128(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ if total.bits() > xlen {
+ let align_regs = align > xlen;
+ if is_loongarch_aggregate(arg) {
+ arg.cast_to(Uniform {
+ unit: if align_regs { double_xlen_reg } else { xlen_reg },
+ total: Size::from_bits(xlen * 2),
+ });
+ }
+ if align_regs && is_vararg {
+ *avail_gprs -= *avail_gprs % 2;
+ }
+ if *avail_gprs >= 2 {
+ *avail_gprs -= 2;
+ } else {
+ *avail_gprs = 0;
+ }
+ return;
+ } else if is_loongarch_aggregate(arg) {
+ arg.cast_to(xlen_reg);
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ if *avail_gprs >= 1 {
+ extend_integer_width(arg, xlen);
+ *avail_gprs -= 1;
+ }
+}
+
+fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
+ if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, _) = scalar.primitive() {
+ // 32-bit integers are always sign-extended
+ if i.size().bits() == 32 && xlen > 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(xlen);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let xlen = cx.data_layout().pointer_size.bits();
+ let flen = match &cx.target_spec().llvm_abiname[..] {
+ "ilp32f" | "lp64f" => 32,
+ "ilp32d" | "lp64d" => 64,
+ _ => 0,
+ };
+
+ let mut avail_gprs = 8;
+ let mut avail_fprs = 8;
+
+ if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+ avail_gprs -= 1;
+ }
+
+ for (i, arg) in fn_abi.args.iter_mut().enumerate() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(
+ cx,
+ arg,
+ xlen,
+ flen,
+ i >= fn_abi.fixed_count as usize,
+ &mut avail_gprs,
+ &mut avail_fprs,
+ );
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 9e5f0e4d1..a5ffaebea 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -3,6 +3,7 @@ use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
use crate::spec::{self, HasTargetSpec};
use rustc_span::Symbol;
use std::fmt;
+use std::str::FromStr;
mod aarch64;
mod amdgpu;
@@ -10,6 +11,7 @@ mod arm;
mod avr;
mod bpf;
mod hexagon;
+mod loongarch;
mod m68k;
mod mips;
mod mips64;
@@ -260,7 +262,7 @@ impl CastTarget {
let mut size = self.rest.total;
for i in 0..self.prefix.iter().count() {
match self.prefix[i] {
- Some(v) => size += Size { raw: v.size.bytes() },
+ Some(v) => size += v.size,
None => {}
}
}
@@ -696,6 +698,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"amdgpu" => amdgpu::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"avr" => avr::compute_abi_info(self),
+ "loongarch64" => loongarch::compute_abi_info(cx, self),
"m68k" => m68k::compute_abi_info(self),
"mips" => mips::compute_abi_info(cx, self),
"mips64" => mips64::compute_abi_info(cx, self),
@@ -735,6 +738,33 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
}
+impl FromStr for Conv {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "C" => Ok(Conv::C),
+ "Rust" => Ok(Conv::Rust),
+ "RustCold" => Ok(Conv::Rust),
+ "ArmAapcs" => Ok(Conv::ArmAapcs),
+ "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
+ "Msp430Intr" => Ok(Conv::Msp430Intr),
+ "PtxKernel" => Ok(Conv::PtxKernel),
+ "X86Fastcall" => Ok(Conv::X86Fastcall),
+ "X86Intr" => Ok(Conv::X86Intr),
+ "X86Stdcall" => Ok(Conv::X86Stdcall),
+ "X86ThisCall" => Ok(Conv::X86ThisCall),
+ "X86VectorCall" => Ok(Conv::X86VectorCall),
+ "X86_64SysV" => Ok(Conv::X86_64SysV),
+ "X86_64Win64" => Ok(Conv::X86_64Win64),
+ "AmdGpuKernel" => Ok(Conv::AmdGpuKernel),
+ "AvrInterrupt" => Ok(Conv::AvrInterrupt),
+ "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
+ _ => Err(format!("'{}' is not a valid value for entry function call convetion.", s)),
+ }
+ }
+}
+
// Some types are used a lot. Make sure they don't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index 1b74959ad..c8b6ac5ae 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -78,7 +78,7 @@ fn arg_scalar_pair<C>(
where
C: HasDataLayout,
{
- data = arg_scalar(cx, &scalar1, offset, data);
+ data = arg_scalar(cx, scalar1, offset, data);
match (scalar1.primitive(), scalar2.primitive()) {
(abi::F32, _) => offset += Reg::f32().size,
(_, abi::F64) => offset += Reg::f64().size,
@@ -87,10 +87,10 @@ where
_ => {}
}
- if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
- offset.raw += 4 - (offset.raw % 4);
+ if (offset.bytes() % 4) != 0 && scalar2.primitive().is_float() {
+ offset += Size::from_bytes(4 - (offset.bytes() % 4));
}
- data = arg_scalar(cx, &scalar2, offset, data);
+ data = arg_scalar(cx, scalar2, offset, data);
return data;
}
@@ -169,14 +169,14 @@ where
has_float: false,
arg_attribute: ArgAttribute::default(),
},
- Size { raw: 0 },
+ Size::ZERO,
);
if data.has_float {
// Structure { float, int, int } doesn't like to be handled like
// { float, long int }. Other way around it doesn't mind.
if data.last_offset < arg.layout.size
- && (data.last_offset.raw % 8) != 0
+ && (data.last_offset.bytes() % 8) != 0
&& data.prefix_index < data.prefix.len()
{
data.prefix[data.prefix_index] = Some(Reg::i32());
@@ -185,7 +185,7 @@ where
}
let mut rest_size = arg.layout.size - data.last_offset;
- if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+ if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
data.prefix[data.prefix_index] = Some(Reg::i32());
rest_size = rest_size - Reg::i32().size;
}
@@ -214,13 +214,13 @@ where
C: HasDataLayout,
{
if !fn_abi.ret.is_ignore() {
- classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+ classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
}
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
- classify_arg(cx, arg, Size { raw: 16 });
+ classify_arg(cx, arg, Size::from_bytes(16));
}
}
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 7171ca7bf..53c9878ab 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -2,284 +2,16 @@ pub use Integer::*;
pub use Primitive::*;
use crate::json::{Json, ToJson};
-use crate::spec::Target;
-use std::convert::{TryFrom, TryInto};
use std::fmt;
-use std::iter::Step;
-use std::num::{NonZeroUsize, ParseIntError};
-use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
-use std::str::FromStr;
+use std::ops::Deref;
use rustc_data_structures::intern::Interned;
-use rustc_index::vec::{Idx, IndexVec};
use rustc_macros::HashStable_Generic;
pub mod call;
-/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
-/// for a target, which contains everything needed to compute layouts.
-pub struct TargetDataLayout {
- pub endian: Endian,
- pub i1_align: AbiAndPrefAlign,
- pub i8_align: AbiAndPrefAlign,
- pub i16_align: AbiAndPrefAlign,
- pub i32_align: AbiAndPrefAlign,
- pub i64_align: AbiAndPrefAlign,
- pub i128_align: AbiAndPrefAlign,
- pub f32_align: AbiAndPrefAlign,
- pub f64_align: AbiAndPrefAlign,
- pub pointer_size: Size,
- pub pointer_align: AbiAndPrefAlign,
- pub aggregate_align: AbiAndPrefAlign,
-
- /// Alignments for vector types.
- pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
-
- pub instruction_address_space: AddressSpace,
-
- /// Minimum size of #[repr(C)] enums (default I32 bits)
- pub c_enum_min_size: Integer,
-}
-
-impl Default for TargetDataLayout {
- /// Creates an instance of `TargetDataLayout`.
- fn default() -> TargetDataLayout {
- let align = |bits| Align::from_bits(bits).unwrap();
- TargetDataLayout {
- endian: Endian::Big,
- i1_align: AbiAndPrefAlign::new(align(8)),
- i8_align: AbiAndPrefAlign::new(align(8)),
- i16_align: AbiAndPrefAlign::new(align(16)),
- i32_align: AbiAndPrefAlign::new(align(32)),
- i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
- i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
- f32_align: AbiAndPrefAlign::new(align(32)),
- f64_align: AbiAndPrefAlign::new(align(64)),
- pointer_size: Size::from_bits(64),
- pointer_align: AbiAndPrefAlign::new(align(64)),
- aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
- vector_align: vec![
- (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
- (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
- ],
- instruction_address_space: AddressSpace::DATA,
- c_enum_min_size: Integer::I32,
- }
- }
-}
-
-pub enum TargetDataLayoutErrors<'a> {
- InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
- InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
- MissingAlignment { cause: &'a str },
- InvalidAlignment { cause: &'a str, err: String },
- InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
- InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
- InvalidBitsSize { err: String },
-}
-
-impl TargetDataLayout {
- pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
- // Parse an address space index from a string.
- let parse_address_space = |s: &'a str, cause: &'a str| {
- s.parse::<u32>().map(AddressSpace).map_err(|err| {
- TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
- })
- };
-
- // Parse a bit count from a string.
- let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
- s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
- kind,
- bit: s,
- cause,
- err,
- })
- };
-
- // Parse a size string.
- let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
-
- // Parse an alignment string.
- let align = |s: &[&'a str], cause: &'a str| {
- if s.is_empty() {
- return Err(TargetDataLayoutErrors::MissingAlignment { cause });
- }
- let align_from_bits = |bits| {
- Align::from_bits(bits)
- .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
- };
- let abi = parse_bits(s[0], "alignment", cause)?;
- let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
- Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
- };
-
- let mut dl = TargetDataLayout::default();
- let mut i128_align_src = 64;
- for spec in target.data_layout.split('-') {
- let spec_parts = spec.split(':').collect::<Vec<_>>();
-
- match &*spec_parts {
- ["e"] => dl.endian = Endian::Little,
- ["E"] => dl.endian = Endian::Big,
- [p] if p.starts_with('P') => {
- dl.instruction_address_space = parse_address_space(&p[1..], "P")?
- }
- ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
- ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
- ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
- [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
- dl.pointer_size = size(s, p)?;
- dl.pointer_align = align(a, p)?;
- }
- [s, ref a @ ..] if s.starts_with('i') => {
- let Ok(bits) = s[1..].parse::<u64>() else {
- size(&s[1..], "i")?; // For the user error.
- continue;
- };
- let a = align(a, s)?;
- match bits {
- 1 => dl.i1_align = a,
- 8 => dl.i8_align = a,
- 16 => dl.i16_align = a,
- 32 => dl.i32_align = a,
- 64 => dl.i64_align = a,
- _ => {}
- }
- if bits >= i128_align_src && bits <= 128 {
- // Default alignment for i128 is decided by taking the alignment of
- // largest-sized i{64..=128}.
- i128_align_src = bits;
- dl.i128_align = a;
- }
- }
- [s, ref a @ ..] if s.starts_with('v') => {
- let v_size = size(&s[1..], "v")?;
- let a = align(a, s)?;
- if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
- v.1 = a;
- continue;
- }
- // No existing entry, add a new one.
- dl.vector_align.push((v_size, a));
- }
- _ => {} // Ignore everything else.
- }
- }
-
- // Perform consistency checks against the Target information.
- if dl.endian != target.endian {
- return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
- dl: dl.endian.as_str(),
- target: target.endian.as_str(),
- });
- }
-
- let target_pointer_width: u64 = target.pointer_width.into();
- if dl.pointer_size.bits() != target_pointer_width {
- return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
- pointer_size: dl.pointer_size.bits(),
- target: target.pointer_width,
- });
- }
-
- dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
- Ok(bits) => bits,
- Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
- };
-
- Ok(dl)
- }
-
- /// Returns exclusive upper bound on object size.
- ///
- /// The theoretical maximum object size is defined as the maximum positive `isize` value.
- /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
- /// index every address within an object along with one byte past the end, along with allowing
- /// `isize` to store the difference between any two pointers into an object.
- ///
- /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
- /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
- /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
- /// address space on 64-bit ARMv8 and x86_64.
- #[inline]
- pub fn obj_size_bound(&self) -> u64 {
- match self.pointer_size.bits() {
- 16 => 1 << 15,
- 32 => 1 << 31,
- 64 => 1 << 47,
- bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
- }
- }
-
- #[inline]
- pub fn ptr_sized_integer(&self) -> Integer {
- match self.pointer_size.bits() {
- 16 => I16,
- 32 => I32,
- 64 => I64,
- bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
- }
- }
-
- #[inline]
- pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
- for &(size, align) in &self.vector_align {
- if size == vec_size {
- return align;
- }
- }
- // Default to natural alignment, which is what LLVM does.
- // That is, use the size, rounded up to a power of 2.
- AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
- }
-}
-
-pub trait HasDataLayout {
- fn data_layout(&self) -> &TargetDataLayout;
-}
-
-impl HasDataLayout for TargetDataLayout {
- #[inline]
- fn data_layout(&self) -> &TargetDataLayout {
- self
- }
-}
-
-/// Endianness of the target, which must match cfg(target-endian).
-#[derive(Copy, Clone, PartialEq)]
-pub enum Endian {
- Little,
- Big,
-}
-
-impl Endian {
- pub fn as_str(&self) -> &'static str {
- match self {
- Self::Little => "little",
- Self::Big => "big",
- }
- }
-}
-
-impl fmt::Debug for Endian {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(self.as_str())
- }
-}
-
-impl FromStr for Endian {
- type Err = String;
-
- fn from_str(s: &str) -> Result<Self, Self::Err> {
- match s {
- "little" => Ok(Self::Little),
- "big" => Ok(Self::Big),
- _ => Err(format!(r#"unknown endian: "{}""#, s)),
- }
- }
-}
+pub use rustc_abi::*;
impl ToJson for Endian {
fn to_json(&self) -> Json {
@@ -287,1025 +19,15 @@ impl ToJson for Endian {
}
}
-/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
-pub struct Size {
- raw: u64,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Size {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "Size({} bytes)", self.bytes())
- }
-}
-
-impl Size {
- pub const ZERO: Size = Size { raw: 0 };
-
- /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
- /// not a multiple of 8.
- pub fn from_bits(bits: impl TryInto<u64>) -> Size {
- let bits = bits.try_into().ok().unwrap();
- // Avoid potential overflow from `bits + 7`.
- Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
- }
-
- #[inline]
- pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
- let bytes: u64 = bytes.try_into().ok().unwrap();
- Size { raw: bytes }
- }
-
- #[inline]
- pub fn bytes(self) -> u64 {
- self.raw
- }
-
- #[inline]
- pub fn bytes_usize(self) -> usize {
- self.bytes().try_into().unwrap()
- }
-
- #[inline]
- pub fn bits(self) -> u64 {
- #[cold]
- fn overflow(bytes: u64) -> ! {
- panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
- }
-
- self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
- }
-
- #[inline]
- pub fn bits_usize(self) -> usize {
- self.bits().try_into().unwrap()
- }
-
- #[inline]
- pub fn align_to(self, align: Align) -> Size {
- let mask = align.bytes() - 1;
- Size::from_bytes((self.bytes() + mask) & !mask)
- }
-
- #[inline]
- pub fn is_aligned(self, align: Align) -> bool {
- let mask = align.bytes() - 1;
- self.bytes() & mask == 0
- }
-
- #[inline]
- pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
- let dl = cx.data_layout();
-
- let bytes = self.bytes().checked_add(offset.bytes())?;
-
- if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
- }
-
- #[inline]
- pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
- let dl = cx.data_layout();
-
- let bytes = self.bytes().checked_mul(count)?;
- if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
- }
-
- /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
- /// (i.e., if it is negative, fill with 1's on the left).
- #[inline]
- pub fn sign_extend(self, value: u128) -> u128 {
- let size = self.bits();
- if size == 0 {
- // Truncated until nothing is left.
- return 0;
- }
- // Sign-extend it.
- let shift = 128 - size;
- // Shift the unsigned value to the left, then shift back to the right as signed
- // (essentially fills with sign bit on the left).
- (((value << shift) as i128) >> shift) as u128
- }
-
- /// Truncates `value` to `self` bits.
- #[inline]
- pub fn truncate(self, value: u128) -> u128 {
- let size = self.bits();
- if size == 0 {
- // Truncated until nothing is left.
- return 0;
- }
- let shift = 128 - size;
- // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
- (value << shift) >> shift
- }
-
- #[inline]
- pub fn signed_int_min(&self) -> i128 {
- self.sign_extend(1_u128 << (self.bits() - 1)) as i128
- }
-
- #[inline]
- pub fn signed_int_max(&self) -> i128 {
- i128::MAX >> (128 - self.bits())
- }
-
- #[inline]
- pub fn unsigned_int_max(&self) -> u128 {
- u128::MAX >> (128 - self.bits())
- }
-}
-
-// Panicking addition, subtraction and multiplication for convenience.
-// Avoid during layout computation, return `LayoutError` instead.
-
-impl Add for Size {
- type Output = Size;
- #[inline]
- fn add(self, other: Size) -> Size {
- Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
- panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
- }))
- }
-}
-
-impl Sub for Size {
- type Output = Size;
- #[inline]
- fn sub(self, other: Size) -> Size {
- Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
- panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
- }))
- }
-}
-
-impl Mul<Size> for u64 {
- type Output = Size;
- #[inline]
- fn mul(self, size: Size) -> Size {
- size * self
- }
-}
-
-impl Mul<u64> for Size {
- type Output = Size;
- #[inline]
- fn mul(self, count: u64) -> Size {
- match self.bytes().checked_mul(count) {
- Some(bytes) => Size::from_bytes(bytes),
- None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
- }
- }
-}
-
-impl AddAssign for Size {
- #[inline]
- fn add_assign(&mut self, other: Size) {
- *self = *self + other;
- }
-}
-
-impl Step for Size {
- #[inline]
- fn steps_between(start: &Self, end: &Self) -> Option<usize> {
- u64::steps_between(&start.bytes(), &end.bytes())
- }
-
- #[inline]
- fn forward_checked(start: Self, count: usize) -> Option<Self> {
- u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
- }
-
- #[inline]
- fn forward(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::forward(start.bytes(), count))
- }
-
- #[inline]
- unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
- }
-
- #[inline]
- fn backward_checked(start: Self, count: usize) -> Option<Self> {
- u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
- }
-
- #[inline]
- fn backward(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::backward(start.bytes(), count))
- }
-
- #[inline]
- unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
- }
-}
-
-/// Alignment of a type in bytes (always a power of two).
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
-pub struct Align {
- pow2: u8,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Align {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "Align({} bytes)", self.bytes())
- }
-}
-
-impl Align {
- pub const ONE: Align = Align { pow2: 0 };
- pub const MAX: Align = Align { pow2: 29 };
-
- #[inline]
- pub fn from_bits(bits: u64) -> Result<Align, String> {
- Align::from_bytes(Size::from_bits(bits).bytes())
- }
-
- #[inline]
- pub fn from_bytes(align: u64) -> Result<Align, String> {
- // Treat an alignment of 0 bytes like 1-byte alignment.
- if align == 0 {
- return Ok(Align::ONE);
- }
-
- #[cold]
- fn not_power_of_2(align: u64) -> String {
- format!("`{}` is not a power of 2", align)
- }
-
- #[cold]
- fn too_large(align: u64) -> String {
- format!("`{}` is too large", align)
- }
-
- let mut bytes = align;
- let mut pow2: u8 = 0;
- while (bytes & 1) == 0 {
- pow2 += 1;
- bytes >>= 1;
- }
- if bytes != 1 {
- return Err(not_power_of_2(align));
- }
- if pow2 > Self::MAX.pow2 {
- return Err(too_large(align));
- }
-
- Ok(Align { pow2 })
- }
-
- #[inline]
- pub fn bytes(self) -> u64 {
- 1 << self.pow2
- }
-
- #[inline]
- pub fn bits(self) -> u64 {
- self.bytes() * 8
- }
-
- /// Computes the best alignment possible for the given offset
- /// (the largest power of two that the offset is a multiple of).
- ///
- /// N.B., for an offset of `0`, this happens to return `2^64`.
- #[inline]
- pub fn max_for_offset(offset: Size) -> Align {
- Align { pow2: offset.bytes().trailing_zeros() as u8 }
- }
-
- /// Lower the alignment, if necessary, such that the given offset
- /// is aligned to it (the offset is a multiple of the alignment).
- #[inline]
- pub fn restrict_for_offset(self, offset: Size) -> Align {
- self.min(Align::max_for_offset(offset))
- }
-}
-
-/// A pair of alignments, ABI-mandated and preferred.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
-pub struct AbiAndPrefAlign {
- pub abi: Align,
- pub pref: Align,
-}
-
-impl AbiAndPrefAlign {
- #[inline]
- pub fn new(align: Align) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: align, pref: align }
- }
-
- #[inline]
- pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
- }
-
- #[inline]
- pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
- }
-}
-
-/// Integers, also used for enum discriminants.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
-pub enum Integer {
- I8,
- I16,
- I32,
- I64,
- I128,
-}
-
-impl Integer {
- #[inline]
- pub fn size(self) -> Size {
- match self {
- I8 => Size::from_bytes(1),
- I16 => Size::from_bytes(2),
- I32 => Size::from_bytes(4),
- I64 => Size::from_bytes(8),
- I128 => Size::from_bytes(16),
- }
- }
-
- pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
- let dl = cx.data_layout();
-
- match self {
- I8 => dl.i8_align,
- I16 => dl.i16_align,
- I32 => dl.i32_align,
- I64 => dl.i64_align,
- I128 => dl.i128_align,
- }
- }
-
- /// Finds the smallest Integer type which can represent the signed value.
- #[inline]
- pub fn fit_signed(x: i128) -> Integer {
- match x {
- -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
- -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
- -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
- -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
- _ => I128,
- }
- }
-
- /// Finds the smallest Integer type which can represent the unsigned value.
- #[inline]
- pub fn fit_unsigned(x: u128) -> Integer {
- match x {
- 0..=0x0000_0000_0000_00ff => I8,
- 0..=0x0000_0000_0000_ffff => I16,
- 0..=0x0000_0000_ffff_ffff => I32,
- 0..=0xffff_ffff_ffff_ffff => I64,
- _ => I128,
- }
- }
-
- /// Finds the smallest integer with the given alignment.
- pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
- let dl = cx.data_layout();
-
- for candidate in [I8, I16, I32, I64, I128] {
- if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
- return Some(candidate);
- }
- }
- None
- }
-
- /// Find the largest integer with the given alignment or less.
- pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
- let dl = cx.data_layout();
-
- // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
- for candidate in [I64, I32, I16] {
- if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
- return candidate;
- }
- }
- I8
- }
-
- // FIXME(eddyb) consolidate this and other methods that find the appropriate
- // `Integer` given some requirements.
- #[inline]
- fn from_size(size: Size) -> Result<Self, String> {
- match size.bits() {
- 8 => Ok(Integer::I8),
- 16 => Ok(Integer::I16),
- 32 => Ok(Integer::I32),
- 64 => Ok(Integer::I64),
- 128 => Ok(Integer::I128),
- _ => Err(format!("rust does not support integers with {} bits", size.bits())),
- }
- }
-}
-
-/// Fundamental unit of memory access and layout.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Primitive {
- /// The `bool` is the signedness of the `Integer` type.
- ///
- /// One would think we would not care about such details this low down,
- /// but some ABIs are described in terms of C types and ISAs where the
- /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
- /// a negative integer passed by zero-extension will appear positive in
- /// the callee, and most operations on it will produce the wrong values.
- Int(Integer, bool),
- F32,
- F64,
- Pointer,
-}
-
-impl Primitive {
- pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
- let dl = cx.data_layout();
-
- match self {
- Int(i, _) => i.size(),
- F32 => Size::from_bits(32),
- F64 => Size::from_bits(64),
- Pointer => dl.pointer_size,
- }
- }
-
- pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
- let dl = cx.data_layout();
-
- match self {
- Int(i, _) => i.align(dl),
- F32 => dl.f32_align,
- F64 => dl.f64_align,
- Pointer => dl.pointer_align,
- }
- }
-
- // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
- #[inline]
- pub fn is_float(self) -> bool {
- matches!(self, F32 | F64)
- }
-
- // FIXME(eddyb) remove, it's completely unused.
- #[inline]
- pub fn is_int(self) -> bool {
- matches!(self, Int(..))
- }
-
- #[inline]
- pub fn is_ptr(self) -> bool {
- matches!(self, Pointer)
- }
-}
-
-/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
-///
-/// That is, for an i8 primitive, a range of `254..=2` means following
-/// sequence:
-///
-/// 254 (-2), 255 (-1), 0, 1, 2
-///
-/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-#[derive(HashStable_Generic)]
-pub struct WrappingRange {
- pub start: u128,
- pub end: u128,
-}
-
-impl WrappingRange {
- pub fn full(size: Size) -> Self {
- Self { start: 0, end: size.unsigned_int_max() }
- }
-
- /// Returns `true` if `v` is contained in the range.
- #[inline(always)]
- pub fn contains(&self, v: u128) -> bool {
- if self.start <= self.end {
- self.start <= v && v <= self.end
- } else {
- self.start <= v || v <= self.end
- }
- }
-
- /// Returns `self` with replaced `start`
- #[inline(always)]
- pub fn with_start(mut self, start: u128) -> Self {
- self.start = start;
- self
- }
-
- /// Returns `self` with replaced `end`
- #[inline(always)]
- pub fn with_end(mut self, end: u128) -> Self {
- self.end = end;
- self
- }
-
- /// Returns `true` if `size` completely fills the range.
- #[inline]
- pub fn is_full_for(&self, size: Size) -> bool {
- let max_value = size.unsigned_int_max();
- debug_assert!(self.start <= max_value && self.end <= max_value);
- self.start == (self.end.wrapping_add(1) & max_value)
- }
-}
-
-impl fmt::Debug for WrappingRange {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.start > self.end {
- write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
- } else {
- write!(fmt, "{}..={}", self.start, self.end)?;
- }
- Ok(())
- }
-}
-
-/// Information about one scalar component of a Rust type.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
-pub enum Scalar {
- Initialized {
- value: Primitive,
-
- // FIXME(eddyb) always use the shortest range, e.g., by finding
- // the largest space between two consecutive valid values and
- // taking everything else as the (shortest) valid range.
- valid_range: WrappingRange,
- },
- Union {
- /// Even for unions, we need to use the correct registers for the kind of
- /// values inside the union, so we keep the `Primitive` type around. We
- /// also use it to compute the size of the scalar.
- /// However, unions never have niches and even allow undef,
- /// so there is no `valid_range`.
- value: Primitive,
- },
-}
-
-impl Scalar {
- #[inline]
- pub fn is_bool(&self) -> bool {
- matches!(
- self,
- Scalar::Initialized {
- value: Int(I8, false),
- valid_range: WrappingRange { start: 0, end: 1 }
- }
- )
- }
-
- /// Get the primitive representation of this type, ignoring the valid range and whether the
- /// value is allowed to be undefined (due to being a union).
- pub fn primitive(&self) -> Primitive {
- match *self {
- Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
- }
- }
-
- pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
- self.primitive().align(cx)
- }
-
- pub fn size(self, cx: &impl HasDataLayout) -> Size {
- self.primitive().size(cx)
- }
-
- #[inline]
- pub fn to_union(&self) -> Self {
- Self::Union { value: self.primitive() }
- }
-
- #[inline]
- pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
- match *self {
- Scalar::Initialized { valid_range, .. } => valid_range,
- Scalar::Union { value } => WrappingRange::full(value.size(cx)),
- }
- }
-
- #[inline]
- /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
- pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
- match self {
- Scalar::Initialized { valid_range, .. } => valid_range,
- Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
- }
- }
-
- /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
- #[inline]
- pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
- match *self {
- Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
- Scalar::Union { .. } => true,
- }
- }
-
- /// Returns `true` if this type can be left uninit.
- #[inline]
- pub fn is_uninit_valid(&self) -> bool {
- match *self {
- Scalar::Initialized { .. } => false,
- Scalar::Union { .. } => true,
- }
- }
-}
-
-/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum FieldsShape {
- /// Scalar primitives and `!`, which never have fields.
- Primitive,
-
- /// All fields start at no offset. The `usize` is the field count.
- Union(NonZeroUsize),
-
- /// Array/vector-like placement, with all fields of identical types.
- Array { stride: Size, count: u64 },
-
- /// Struct-like placement, with precomputed offsets.
- ///
- /// Fields are guaranteed to not overlap, but note that gaps
- /// before, between and after all the fields are NOT always
- /// padding, and as such their contents may not be discarded.
- /// For example, enum variants leave a gap at the start,
- /// where the discriminant field in the enum layout goes.
- Arbitrary {
- /// Offsets for the first byte of each field,
- /// ordered to match the source definition order.
- /// This vector does not go in increasing order.
- // FIXME(eddyb) use small vector optimization for the common case.
- offsets: Vec<Size>,
-
- /// Maps source order field indices to memory order indices,
- /// depending on how the fields were reordered (if at all).
- /// This is a permutation, with both the source order and the
- /// memory order using the same (0..n) index ranges.
- ///
- /// Note that during computation of `memory_index`, sometimes
- /// it is easier to operate on the inverse mapping (that is,
- /// from memory order to source order), and that is usually
- /// named `inverse_memory_index`.
- ///
- // FIXME(eddyb) build a better abstraction for permutations, if possible.
- // FIXME(camlorn) also consider small vector optimization here.
- memory_index: Vec<u32>,
- },
-}
-
-impl FieldsShape {
- #[inline]
- pub fn count(&self) -> usize {
- match *self {
- FieldsShape::Primitive => 0,
- FieldsShape::Union(count) => count.get(),
- FieldsShape::Array { count, .. } => count.try_into().unwrap(),
- FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
- }
- }
-
- #[inline]
- pub fn offset(&self, i: usize) -> Size {
- match *self {
- FieldsShape::Primitive => {
- unreachable!("FieldsShape::offset: `Primitive`s have no fields")
- }
- FieldsShape::Union(count) => {
- assert!(
- i < count.get(),
- "tried to access field {} of union with {} fields",
- i,
- count
- );
- Size::ZERO
- }
- FieldsShape::Array { stride, count } => {
- let i = u64::try_from(i).unwrap();
- assert!(i < count);
- stride * i
- }
- FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
- }
- }
-
- #[inline]
- pub fn memory_index(&self, i: usize) -> usize {
- match *self {
- FieldsShape::Primitive => {
- unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
- }
- FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
- FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
- }
- }
-
- /// Gets source indices of the fields by increasing offsets.
- #[inline]
- pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
- let mut inverse_small = [0u8; 64];
- let mut inverse_big = vec![];
- let use_small = self.count() <= inverse_small.len();
-
- // We have to write this logic twice in order to keep the array small.
- if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
- if use_small {
- for i in 0..self.count() {
- inverse_small[memory_index[i] as usize] = i as u8;
- }
- } else {
- inverse_big = vec![0; self.count()];
- for i in 0..self.count() {
- inverse_big[memory_index[i] as usize] = i as u32;
- }
- }
- }
-
- (0..self.count()).map(move |i| match *self {
- FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
- FieldsShape::Arbitrary { .. } => {
- if use_small {
- inverse_small[i] as usize
- } else {
- inverse_big[i] as usize
- }
- }
- })
- }
-}
-
-/// An identifier that specifies the address space that some operation
-/// should operate on. Special address spaces have an effect on code generation,
-/// depending on the target and the address spaces it implements.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct AddressSpace(pub u32);
-
-impl AddressSpace {
- /// The default address space, corresponding to data space.
- pub const DATA: Self = AddressSpace(0);
-}
-
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Abi {
- Uninhabited,
- Scalar(Scalar),
- ScalarPair(Scalar, Scalar),
- Vector {
- element: Scalar,
- count: u64,
- },
- Aggregate {
- /// If true, the size is exact, otherwise it's only a lower bound.
- sized: bool,
- },
-}
-
-impl Abi {
- /// Returns `true` if the layout corresponds to an unsized type.
- #[inline]
- pub fn is_unsized(&self) -> bool {
- match *self {
- Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Aggregate { sized } => !sized,
- }
- }
-
- /// Returns `true` if this is a single signed integer scalar
- #[inline]
- pub fn is_signed(&self) -> bool {
- match self {
- Abi::Scalar(scal) => match scal.primitive() {
- Primitive::Int(_, signed) => signed,
- _ => false,
- },
- _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
- }
- }
-
- /// Returns `true` if this is an uninhabited type
- #[inline]
- pub fn is_uninhabited(&self) -> bool {
- matches!(*self, Abi::Uninhabited)
- }
-
- /// Returns `true` is this is a scalar type
- #[inline]
- pub fn is_scalar(&self) -> bool {
- matches!(*self, Abi::Scalar(_))
- }
-}
-
rustc_index::newtype_index! {
pub struct VariantIdx {
derive [HashStable_Generic]
}
}
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Variants<'a> {
- /// Single enum variants, structs/tuples, unions, and all non-ADTs.
- Single { index: VariantIdx },
-
- /// Enum-likes with more than one inhabited variant: each variant comes with
- /// a *discriminant* (usually the same as the variant index but the user can
- /// assign explicit discriminant values). That discriminant is encoded
- /// as a *tag* on the machine. The layout of each variant is
- /// a struct, and they all have space reserved for the tag.
- /// For enums, the tag is the sole field of the layout.
- Multiple {
- tag: Scalar,
- tag_encoding: TagEncoding,
- tag_field: usize,
- variants: IndexVec<VariantIdx, Layout<'a>>,
- },
-}
-
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum TagEncoding {
- /// The tag directly stores the discriminant, but possibly with a smaller layout
- /// (so converting the tag to the discriminant can require sign extension).
- Direct,
-
- /// Niche (values invalid for a type) encoding the discriminant:
- /// Discriminant and variant index coincide.
- /// The variant `untagged_variant` contains a niche at an arbitrary
- /// offset (field `tag_field` of the enum), which for a variant with
- /// discriminant `d` is set to
- /// `(d - niche_variants.start).wrapping_add(niche_start)`.
- ///
- /// For example, `Option<(usize, &T)>` is represented such that
- /// `None` has a null pointer for the second tuple field, and
- /// `Some` is the identity function (with a non-null reference).
- Niche {
- untagged_variant: VariantIdx,
- niche_variants: RangeInclusive<VariantIdx>,
- niche_start: u128,
- },
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub struct Niche {
- pub offset: Size,
- pub value: Primitive,
- pub valid_range: WrappingRange,
-}
-
-impl Niche {
- pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
- let Scalar::Initialized { value, valid_range } = scalar else { return None };
- let niche = Niche { offset, value, valid_range };
- if niche.available(cx) > 0 { Some(niche) } else { None }
- }
-
- pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
- let Self { value, valid_range: v, .. } = *self;
- let size = value.size(cx);
- assert!(size.bits() <= 128);
- let max_value = size.unsigned_int_max();
-
- // Find out how many values are outside the valid range.
- let niche = v.end.wrapping_add(1)..v.start;
- niche.end.wrapping_sub(niche.start) & max_value
- }
-
- pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
- assert!(count > 0);
-
- let Self { value, valid_range: v, .. } = *self;
- let size = value.size(cx);
- assert!(size.bits() <= 128);
- let max_value = size.unsigned_int_max();
-
- let niche = v.end.wrapping_add(1)..v.start;
- let available = niche.end.wrapping_sub(niche.start) & max_value;
- if count > available {
- return None;
- }
-
- // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
- // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
- // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
- // Having `None` in niche zero can enable some special optimizations.
- //
- // Bound selection criteria:
- // 1. Select closest to zero given wrapping semantics.
- // 2. Avoid moving past zero if possible.
- //
- // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
- // If niche zero is already reserved, the selection of bounds are of little interest.
- let move_start = |v: WrappingRange| {
- let start = v.start.wrapping_sub(count) & max_value;
- Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
- };
- let move_end = |v: WrappingRange| {
- let start = v.end.wrapping_add(1) & max_value;
- let end = v.end.wrapping_add(count) & max_value;
- Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
- };
- let distance_end_zero = max_value - v.end;
- if v.start > v.end {
- // zero is unavailable because wrapping occurs
- move_end(v)
- } else if v.start <= distance_end_zero {
- if count <= v.start {
- move_start(v)
- } else {
- // moved past zero, use other bound
- move_end(v)
- }
- } else {
- let end = v.end.wrapping_add(count) & max_value;
- let overshot_zero = (1..=v.end).contains(&end);
- if overshot_zero {
- // moved past zero, use other bound
- move_start(v)
- } else {
- move_end(v)
- }
- }
- }
-}
-
-#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
-pub struct LayoutS<'a> {
- /// Says where the fields are located within the layout.
- pub fields: FieldsShape,
-
- /// Encodes information about multi-variant layouts.
- /// Even with `Multiple` variants, a layout still has its own fields! Those are then
- /// shared between all variants. One of them will be the discriminant,
- /// but e.g. generators can have more.
- ///
- /// To access all fields of this layout, both `fields` and the fields of the active variant
- /// must be taken into account.
- pub variants: Variants<'a>,
-
- /// The `abi` defines how this data is passed between functions, and it defines
- /// value restrictions via `valid_range`.
- ///
- /// Note that this is entirely orthogonal to the recursive structure defined by
- /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
- /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
- /// have to be taken into account to find all fields of this layout.
- pub abi: Abi,
-
- /// The leaf scalar with the largest number of invalid values
- /// (i.e. outside of its `valid_range`), if it exists.
- pub largest_niche: Option<Niche>,
-
- pub align: AbiAndPrefAlign,
- pub size: Size,
-}
-
-impl<'a> LayoutS<'a> {
- pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
- let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
- let size = scalar.size(cx);
- let align = scalar.align(cx);
- LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Primitive,
- abi: Abi::Scalar(scalar),
- largest_niche,
- size,
- align,
- }
- }
-}
-
-impl<'a> fmt::Debug for LayoutS<'a> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // This is how `Layout` used to print before it become
- // `Interned<LayoutS>`. We print it like this to avoid having to update
- // expected output in a lot of tests.
- let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
- f.debug_struct("Layout")
- .field("size", size)
- .field("align", align)
- .field("abi", abi)
- .field("fields", fields)
- .field("largest_niche", largest_niche)
- .field("variants", variants)
- .finish()
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
#[rustc_pass_by_value]
-pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+pub struct Layout<'a>(pub Interned<'a, LayoutS<VariantIdx>>);
impl<'a> fmt::Debug for Layout<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -1319,7 +41,7 @@ impl<'a> Layout<'a> {
&self.0.0.fields
}
- pub fn variants(self) -> &'a Variants<'a> {
+ pub fn variants(self) -> &'a Variants<VariantIdx> {
&self.0.0.variants
}
@@ -1354,47 +76,12 @@ pub struct TyAndLayout<'a, Ty> {
}
impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
- type Target = &'a LayoutS<'a>;
- fn deref(&self) -> &&'a LayoutS<'a> {
+ type Target = &'a LayoutS<VariantIdx>;
+ fn deref(&self) -> &&'a LayoutS<VariantIdx> {
&self.layout.0.0
}
}
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum PointerKind {
- /// Most general case, we know no restrictions to tell LLVM.
- SharedMutable,
-
- /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
- Frozen,
-
- /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
- UniqueBorrowed,
-
- /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
- UniqueBorrowedPinned,
-
- /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
- /// nor `dereferenceable`.
- UniqueOwned,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct PointeeInfo {
- pub size: Size,
- pub align: Align,
- pub safe: Option<PointerKind>,
- pub address_space: AddressSpace,
-}
-
-/// Used in `might_permit_raw_init` to indicate the kind of initialisation
-/// that is checked to be valid
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum InitKind {
- Zero,
- UninitMitigated0x01Fill,
-}
-
/// Trait that needs to be implemented by the higher-level type representation
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
pub trait TyAbiInterface<'a, C>: Sized {
@@ -1490,6 +177,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
self.abi.is_unsized()
}
+ #[inline]
+ pub fn is_sized(&self) -> bool {
+ self.abi.is_sized()
+ }
+
/// Returns `true` if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
match self.abi {