summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_target/src/abi
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_target/src/abi
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_target/src/abi')
-rw-r--r--compiler/rustc_target/src/abi/call/aarch64.rs86
-rw-r--r--compiler/rustc_target/src/abi/call/amdgpu.rs35
-rw-r--r--compiler/rustc_target/src/abi/call/arm.rs97
-rw-r--r--compiler/rustc_target/src/abi/call/avr.rs59
-rw-r--r--compiler/rustc_target/src/abi/call/bpf.rs31
-rw-r--r--compiler/rustc_target/src/abi/call/hexagon.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/m68k.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/mips.rs51
-rw-r--r--compiler/rustc_target/src/abi/call/mips64.rs167
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs734
-rw-r--r--compiler/rustc_target/src/abi/call/msp430.rs39
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx.rs33
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx64.rs64
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc64.rs141
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs348
-rw-r--r--compiler/rustc_target/src/abi/call/s390x.rs57
-rw-r--r--compiler/rustc_target/src/abi/call/sparc.rs51
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs226
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs83
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs117
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs248
-rw-r--r--compiler/rustc_target/src/abi/call/x86_win64.rs40
-rw-r--r--compiler/rustc_target/src/abi/mod.rs1558
24 files changed, 4355 insertions, 0 deletions
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
new file mode 100644
index 000000000..4613a459c
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -0,0 +1,86 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ let size = arg.layout.size;
+
+ // Ensure we have at most four uniquely addressable members.
+ if size > unit.size.checked_mul(4, cx).unwrap() {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => size.bits() == 64 || size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ return;
+ }
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
+ ret.cast_to(uniform);
+ return;
+ }
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ ret.cast_to(Uniform { unit: Reg::i64(), total: size });
+ return;
+ }
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(32);
+ return;
+ }
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
+ arg.cast_to(uniform);
+ return;
+ }
+ let size = arg.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ arg.cast_to(Uniform { unit: Reg::i64(), total: size });
+ return;
+ }
+ arg.make_indirect();
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/amdgpu.rs b/compiler/rustc_target/src/abi/call/amdgpu.rs
new file mode 100644
index 000000000..9be97476c
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/amdgpu.rs
@@ -0,0 +1,35 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<'a, Ty, C>(_cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ ret.extend_integer_width_to(32);
+}
+
+fn classify_arg<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.extend_integer_width_to(32);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/arm.rs b/compiler/rustc_target/src/abi/call/arm.rs
new file mode 100644
index 000000000..e66c2132b
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/arm.rs
@@ -0,0 +1,97 @@
+use crate::abi::call::{ArgAbi, Conv, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ let size = arg.layout.size;
+
+ // Ensure we have at most four uniquely addressable members.
+ if size > unit.size.checked_mul(4, cx).unwrap() {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => size.bits() == 64 || size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, vfp: bool)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ return;
+ }
+
+ if vfp {
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
+ ret.cast_to(uniform);
+ return;
+ }
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 32 {
+ ret.cast_to(Uniform { unit: Reg::i32(), total: size });
+ return;
+ }
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, vfp: bool)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(32);
+ return;
+ }
+
+ if vfp {
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
+ arg.cast_to(uniform);
+ return;
+ }
+ }
+
+ let align = arg.layout.align.abi.bytes();
+ let total = arg.layout.size;
+ arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ // If this is a target with a hard-float ABI, and the function is not explicitly
+ // `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
+ let vfp = cx.target_spec().llvm_target.ends_with("hf")
+ && fn_abi.conv != Conv::ArmAapcs
+ && !fn_abi.c_variadic;
+
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, vfp);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, vfp);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/avr.rs b/compiler/rustc_target/src/abi/call/avr.rs
new file mode 100644
index 000000000..c1f7a1e3a
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/avr.rs
@@ -0,0 +1,59 @@
+//! LLVM-frontend specific AVR calling convention implementation.
+//!
+//! # Current calling convention ABI
+//!
+//! Inherited from Clang's `clang::DefaultABIInfo` implementation - self described
+//! as
+//!
+//! > the default implementation for ABI specific details. This implementation
+//! > provides information which results in
+//! > self-consistent and sensible LLVM IR generation, but does not
+//! > conform to any particular ABI.
+//! >
+//! > - Doxygen Doxumentation of `clang::DefaultABIInfo`
+//!
+//! This calling convention may not match AVR-GCC in all cases.
+//!
+//! In the future, an AVR-GCC compatible argument classification ABI should be
+//! adopted in both Rust and Clang.
+//!
+//! *NOTE*: Currently, this module implements the same calling convention
+//! that clang with AVR currently does - the default, simple, unspecialized
+//! ABI implementation available to all targets. This ABI is not
+//! binary-compatible with AVR-GCC. Once LLVM [PR46140](https://bugs.llvm.org/show_bug.cgi?id=46140)
+//! is completed, this module should be updated to match so that both Clang
+//! and Rust emit code to the same AVR-GCC compatible ABI.
+//!
+//! In particular, both Clang and Rust may not have the same semantics
+//! when promoting arguments to indirect references as AVR-GCC. It is important
+//! to note that the core AVR ABI implementation within LLVM itself is ABI
+//! compatible with AVR-GCC - Rust and AVR-GCC only differ in the small amount
+//! of compiler frontend specific calling convention logic implemented here.
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret_ty<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect();
+ }
+}
+
+pub fn compute_abi_info<Ty>(fty: &mut FnAbi<'_, Ty>) {
+ if !fty.ret.is_ignore() {
+ classify_ret_ty(&mut fty.ret);
+ }
+
+ for arg in &mut fty.args {
+ if arg.is_ignore() {
+ continue;
+ }
+
+ classify_arg_ty(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/bpf.rs b/compiler/rustc_target/src/abi/call/bpf.rs
new file mode 100644
index 000000000..466c52553
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/bpf.rs
@@ -0,0 +1,31 @@
+// see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/BPF/BPFCallingConv.td
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() || ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() || arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/hexagon.rs b/compiler/rustc_target/src/abi/call/hexagon.rs
new file mode 100644
index 000000000..8028443b8
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/hexagon.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
new file mode 100644
index 000000000..58fdc00b6
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect_byval();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mips.rs b/compiler/rustc_target/src/abi/call/mips.rs
new file mode 100644
index 000000000..cc4431976
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mips.rs
@@ -0,0 +1,51 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{HasDataLayout, Size};
+
+fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ } else {
+ ret.make_indirect();
+ *offset += cx.data_layout().pointer_size;
+ }
+}
+
+fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+
+ if arg.layout.is_aggregate() {
+ arg.cast_to(Uniform { unit: Reg::i32(), total: size });
+ if !offset.is_aligned(align) {
+ arg.pad_with(Reg::i32());
+ }
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+
+ *offset = offset.align_to(align) + size.align_to(align);
+}
+
+pub fn compute_abi_info<Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
+where
+ C: HasDataLayout,
+{
+ let mut offset = Size::ZERO;
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, &mut offset);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, &mut offset);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs
new file mode 100644
index 000000000..cd54167aa
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mips64.rs
@@ -0,0 +1,167 @@
+use crate::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Reg, Uniform,
+};
+use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
+
+fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
+ // Always sign extend u32 values on 64-bit mips
+ if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, signed) = scalar.primitive() {
+ if !signed && i.size().bits() == 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(bits);
+}
+
+fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ match ret.layout.field(cx, i).abi {
+ abi::Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::F32 => Some(Reg::f32()),
+ abi::F64 => Some(Reg::f64()),
+ _ => None,
+ },
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ extend_integer_width_mips(ret, 64);
+ return;
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
+ // use of float registers to structures (not unions) containing exactly one or two
+ // float fields.
+
+ if let abi::FieldsShape::Arbitrary { .. } = ret.layout.fields {
+ if ret.layout.fields.count() == 1 {
+ if let Some(reg) = float_reg(cx, ret, 0) {
+ ret.cast_to(reg);
+ return;
+ }
+ } else if ret.layout.fields.count() == 2 {
+ if let Some(reg0) = float_reg(cx, ret, 0) {
+ if let Some(reg1) = float_reg(cx, ret, 1) {
+ ret.cast_to(CastTarget::pair(reg0, reg1));
+ return;
+ }
+ }
+ }
+ }
+
+ // Cast to a uniform int structure
+ ret.cast_to(Uniform { unit: Reg::i64(), total: size });
+ } else {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ extend_integer_width_mips(arg, 64);
+ return;
+ }
+
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let mut prefix = [None; 8];
+ let mut prefix_index = 0;
+
+ match arg.layout.fields {
+ abi::FieldsShape::Primitive => unreachable!(),
+ abi::FieldsShape::Array { .. } => {
+ // Arrays are passed indirectly
+ arg.make_indirect();
+ return;
+ }
+ abi::FieldsShape::Union(_) => {
+ // Unions and are always treated as a series of 64-bit integer chunks
+ }
+ abi::FieldsShape::Arbitrary { .. } => {
+ // Structures are split up into a series of 64-bit integer chunks, but any aligned
+ // doubles not part of another aggregate are passed as floats.
+ let mut last_offset = Size::ZERO;
+
+ for i in 0..arg.layout.fields.count() {
+ let field = arg.layout.field(cx, i);
+ let offset = arg.layout.fields.offset(i);
+
+ // We only care about aligned doubles
+ if let abi::Abi::Scalar(scalar) = field.abi {
+ if let abi::F64 = scalar.primitive() {
+ if offset.is_aligned(dl.f64_align.abi) {
+ // Insert enough integers to cover [last_offset, offset)
+ assert!(last_offset.is_aligned(dl.f64_align.abi));
+ for _ in 0..((offset - last_offset).bits() / 64)
+ .min((prefix.len() - prefix_index) as u64)
+ {
+ prefix[prefix_index] = Some(Reg::i64());
+ prefix_index += 1;
+ }
+
+ if prefix_index == prefix.len() {
+ break;
+ }
+
+ prefix[prefix_index] = Some(Reg::f64());
+ prefix_index += 1;
+ last_offset = offset + Reg::f64().size;
+ }
+ }
+ }
+ }
+ }
+ };
+
+ // Extract first 8 chunks as the prefix
+ let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
+ arg.cast_to(CastTarget {
+ prefix,
+ rest: Uniform { unit: Reg::i64(), total: rest_size },
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
new file mode 100644
index 000000000..577126a95
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -0,0 +1,734 @@
+use crate::abi::{self, Abi, Align, FieldsShape, Size};
+use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
+use crate::spec::{self, HasTargetSpec};
+use rustc_span::Symbol;
+use std::fmt;
+
+mod aarch64;
+mod amdgpu;
+mod arm;
+mod avr;
+mod bpf;
+mod hexagon;
+mod m68k;
+mod mips;
+mod mips64;
+mod msp430;
+mod nvptx;
+mod nvptx64;
+mod powerpc;
+mod powerpc64;
+mod riscv;
+mod s390x;
+mod sparc;
+mod sparc64;
+mod wasm;
+mod x86;
+mod x86_64;
+mod x86_win64;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum PassMode {
+ /// Ignore the argument.
+ ///
+ /// The argument is either uninhabited or a ZST.
+ Ignore,
+ /// Pass the argument directly.
+ ///
+ /// The argument has a layout abi of `Scalar`, `Vector` or in rare cases `Aggregate`.
+ Direct(ArgAttributes),
+ /// Pass a pair's elements directly in two arguments.
+ ///
+ /// The argument has a layout abi of `ScalarPair`.
+ Pair(ArgAttributes, ArgAttributes),
+ /// Pass the argument after casting it, to either
+ /// a single uniform or a pair of registers.
+ Cast(CastTarget),
+ /// Pass the argument indirectly via a hidden pointer.
+ /// The `extra_attrs` value, if any, is for the extra data (vtable or length)
+ /// which indicates that it refers to an unsized rvalue.
+ /// `on_stack` defines that the the value should be passed at a fixed
+ /// stack offset in accordance to the ABI rather than passed using a
+ /// pointer. This corresponds to the `byval` LLVM argument attribute.
+ Indirect { attrs: ArgAttributes, extra_attrs: Option<ArgAttributes>, on_stack: bool },
+}
+
+// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
+// of this module
+pub use attr_impl::ArgAttribute;
+
+#[allow(non_upper_case_globals)]
+#[allow(unused)]
+mod attr_impl {
+ // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
+ bitflags::bitflags! {
+ #[derive(Default, HashStable_Generic)]
+ pub struct ArgAttribute: u16 {
+ const NoAlias = 1 << 1;
+ const NoCapture = 1 << 2;
+ const NonNull = 1 << 3;
+ const ReadOnly = 1 << 4;
+ const InReg = 1 << 5;
+ // Due to past miscompiles in LLVM, we use a separate attribute for
+ // &mut arguments, so that the codegen backend can decide whether
+ // or not to actually emit the attribute. It can also be controlled
+ // with the `-Zmutable-noalias` debugging option.
+ const NoAliasMutRef = 1 << 6;
+ const NoUndef = 1 << 7;
+ }
+ }
+}
+
+/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
+/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
+/// not necessary to extend the argument, this enum is ignored.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum ArgExtension {
+ None,
+ Zext,
+ Sext,
+}
+
+/// A compact representation of LLVM attributes (at least those relevant for this module)
+/// that can be manipulated without interacting with LLVM's Attribute machinery.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct ArgAttributes {
+ pub regular: ArgAttribute,
+ pub arg_ext: ArgExtension,
+ /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
+ /// (corresponding to LLVM's dereferenceable and dereferenceable_or_null attributes).
+ pub pointee_size: Size,
+ pub pointee_align: Option<Align>,
+}
+
+impl ArgAttributes {
+ pub fn new() -> Self {
+ ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ }
+ }
+
+ pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
+ assert!(
+ self.arg_ext == ArgExtension::None || self.arg_ext == ext,
+ "cannot set {:?} when {:?} is already set",
+ ext,
+ self.arg_ext
+ );
+ self.arg_ext = ext;
+ self
+ }
+
+ pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
+ self.regular |= attr;
+ self
+ }
+
+ pub fn contains(&self, attr: ArgAttribute) -> bool {
+ self.regular.contains(attr)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum RegKind {
+ Integer,
+ Float,
+ Vector,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Reg {
+ pub kind: RegKind,
+ pub size: Size,
+}
+
+macro_rules! reg_ctor {
+ ($name:ident, $kind:ident, $bits:expr) => {
+ pub fn $name() -> Reg {
+ Reg { kind: RegKind::$kind, size: Size::from_bits($bits) }
+ }
+ };
+}
+
+impl Reg {
+ reg_ctor!(i8, Integer, 8);
+ reg_ctor!(i16, Integer, 16);
+ reg_ctor!(i32, Integer, 32);
+ reg_ctor!(i64, Integer, 64);
+ reg_ctor!(i128, Integer, 128);
+
+ reg_ctor!(f32, Float, 32);
+ reg_ctor!(f64, Float, 64);
+}
+
+impl Reg {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ let dl = cx.data_layout();
+ match self.kind {
+ RegKind::Integer => match self.size.bits() {
+ 1 => dl.i1_align.abi,
+ 2..=8 => dl.i8_align.abi,
+ 9..=16 => dl.i16_align.abi,
+ 17..=32 => dl.i32_align.abi,
+ 33..=64 => dl.i64_align.abi,
+ 65..=128 => dl.i128_align.abi,
+ _ => panic!("unsupported integer: {:?}", self),
+ },
+ RegKind::Float => match self.size.bits() {
+ 32 => dl.f32_align.abi,
+ 64 => dl.f64_align.abi,
+ _ => panic!("unsupported float: {:?}", self),
+ },
+ RegKind::Vector => dl.vector_align(self.size).abi,
+ }
+ }
+}
+
+/// An argument passed entirely registers with the
+/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Uniform {
+ pub unit: Reg,
+
+ /// The total size of the argument, which can be:
+ /// * equal to `unit.size` (one scalar/vector),
+ /// * a multiple of `unit.size` (an array of scalar/vectors),
+ /// * if `unit.kind` is `Integer`, the last element
+ /// can be shorter, i.e., `{ i64, i64, i32 }` for
+ /// 64-bit integers with a total size of 20 bytes.
+ pub total: Size,
+}
+
+impl From<Reg> for Uniform {
+ fn from(unit: Reg) -> Uniform {
+ Uniform { unit, total: unit.size }
+ }
+}
+
+impl Uniform {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ self.unit.align(cx)
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct CastTarget {
+ pub prefix: [Option<Reg>; 8],
+ pub rest: Uniform,
+ pub attrs: ArgAttributes,
+}
+
+impl From<Reg> for CastTarget {
+ fn from(unit: Reg) -> CastTarget {
+ CastTarget::from(Uniform::from(unit))
+ }
+}
+
+impl From<Uniform> for CastTarget {
+ fn from(uniform: Uniform) -> CastTarget {
+ CastTarget {
+ prefix: [None; 8],
+ rest: uniform,
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ }
+ }
+}
+
+impl CastTarget {
+ pub fn pair(a: Reg, b: Reg) -> CastTarget {
+ CastTarget {
+ prefix: [Some(a), None, None, None, None, None, None, None],
+ rest: Uniform::from(b),
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ }
+ }
+
+ pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
+ let mut size = self.rest.total;
+ for i in 0..self.prefix.iter().count() {
+ match self.prefix[i] {
+ Some(v) => size += Size { raw: v.size.bytes() },
+ None => {}
+ }
+ }
+ return size;
+ }
+
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ self.prefix
+ .iter()
+ .filter_map(|x| x.map(|reg| reg.align(cx)))
+ .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
+ acc.max(align)
+ })
+ }
+}
+
+/// Return value from the `homogeneous_aggregate` test function.
+#[derive(Copy, Clone, Debug)]
+pub enum HomogeneousAggregate {
+ /// Yes, all the "leaf fields" of this struct are passed in the
+ /// same way (specified in the `Reg` value).
+ Homogeneous(Reg),
+
+ /// There are no leaf fields at all.
+ NoData,
+}
+
+/// Error from the `homogeneous_aggregate` test function, indicating
+/// there are distinct leaf fields passed in different ways,
+/// or this is uninhabited.
+#[derive(Copy, Clone, Debug)]
+pub struct Heterogeneous;
+
+impl HomogeneousAggregate {
+ /// If this is a homogeneous aggregate, returns the homogeneous
+ /// unit, else `None`.
+ pub fn unit(self) -> Option<Reg> {
+ match self {
+ HomogeneousAggregate::Homogeneous(reg) => Some(reg),
+ HomogeneousAggregate::NoData => None,
+ }
+ }
+
+ /// Try to combine two `HomogeneousAggregate`s, e.g. from two fields in
+ /// the same `struct`. Only succeeds if only one of them has any data,
+ /// or both units are identical.
+ fn merge(self, other: HomogeneousAggregate) -> Result<HomogeneousAggregate, Heterogeneous> {
+ match (self, other) {
+ (x, HomogeneousAggregate::NoData) | (HomogeneousAggregate::NoData, x) => Ok(x),
+
+ (HomogeneousAggregate::Homogeneous(a), HomogeneousAggregate::Homogeneous(b)) => {
+ if a != b {
+ return Err(Heterogeneous);
+ }
+ Ok(self)
+ }
+ }
+ }
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ fn is_aggregate(&self) -> bool {
+ match self.abi {
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
+ }
+ }
+
+ /// Returns `Homogeneous` if this layout is an aggregate containing fields of
+ /// only a single type (e.g., `(u32, u32)`). Such aggregates are often
+ /// special-cased in ABIs.
+ ///
+ /// Note: We generally ignore fields of zero-sized type when computing
+ /// this value (see #56877).
+ ///
+ /// This is public so that it can be used in unit tests, but
+ /// should generally only be relevant to the ABI details of
+ /// specific targets.
+ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, Heterogeneous>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ {
+ match self.abi {
+ Abi::Uninhabited => Err(Heterogeneous),
+
+ // The primitive for this algorithm.
+ Abi::Scalar(scalar) => {
+ let kind = match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => RegKind::Integer,
+ abi::F32 | abi::F64 => RegKind::Float,
+ };
+ Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
+ }
+
+ Abi::Vector { .. } => {
+ assert!(!self.is_zst());
+ Ok(HomogeneousAggregate::Homogeneous(Reg {
+ kind: RegKind::Vector,
+ size: self.size,
+ }))
+ }
+
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ // Helper for computing `homogeneous_aggregate`, allowing a custom
+ // starting offset (used below for handling variants).
+ let from_fields_at =
+ |layout: Self,
+ start: Size|
+ -> Result<(HomogeneousAggregate, Size), Heterogeneous> {
+ let is_union = match layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Array { count, .. } => {
+ assert_eq!(start, Size::ZERO);
+
+ let result = if count > 0 {
+ layout.field(cx, 0).homogeneous_aggregate(cx)?
+ } else {
+ HomogeneousAggregate::NoData
+ };
+ return Ok((result, layout.size));
+ }
+ FieldsShape::Union(_) => true,
+ FieldsShape::Arbitrary { .. } => false,
+ };
+
+ let mut result = HomogeneousAggregate::NoData;
+ let mut total = start;
+
+ for i in 0..layout.fields.count() {
+ if !is_union && total != layout.fields.offset(i) {
+ return Err(Heterogeneous);
+ }
+
+ let field = layout.field(cx, i);
+
+ result = result.merge(field.homogeneous_aggregate(cx)?)?;
+
+ // Keep track of the offset (without padding).
+ let size = field.size;
+ if is_union {
+ total = total.max(size);
+ } else {
+ total += size;
+ }
+ }
+
+ Ok((result, total))
+ };
+
+ let (mut result, mut total) = from_fields_at(*self, Size::ZERO)?;
+
+ match &self.variants {
+ abi::Variants::Single { .. } => {}
+ abi::Variants::Multiple { variants, .. } => {
+ // Treat enum variants like union members.
+ // HACK(eddyb) pretend the `enum` field (discriminant)
+ // is at the start of every variant (otherwise the gap
+ // at the start of all variants would disqualify them).
+ //
+ // NB: for all tagged `enum`s (which include all non-C-like
+ // `enum`s with defined FFI representation), this will
+ // match the homogeneous computation on the equivalent
+ // `struct { tag; union { variant1; ... } }` and/or
+ // `union { struct { tag; variant1; } ... }`
+ // (the offsets of variant fields should be identical
+ // between the two for either to be a homogeneous aggregate).
+ let variant_start = total;
+ for variant_idx in variants.indices() {
+ let (variant_result, variant_total) =
+ from_fields_at(self.for_variant(cx, variant_idx), variant_start)?;
+
+ result = result.merge(variant_result)?;
+ total = total.max(variant_total);
+ }
+ }
+ }
+
+ // There needs to be no padding.
+ if total != self.size {
+ Err(Heterogeneous)
+ } else {
+ match result {
+ HomogeneousAggregate::Homogeneous(_) => {
+ assert_ne!(total, Size::ZERO);
+ }
+ HomogeneousAggregate::NoData => {
+ assert_eq!(total, Size::ZERO);
+ }
+ }
+ Ok(result)
+ }
+ }
+ }
+ }
+}
+
+/// Information about how to pass an argument to,
+/// or return a value from, a function, under some ABI.
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct ArgAbi<'a, Ty> {
+ pub layout: TyAndLayout<'a, Ty>,
+
+ /// Dummy argument, which is emitted before the real argument.
+ pub pad: Option<Reg>,
+
+ pub mode: PassMode,
+}
+
+impl<'a, Ty> ArgAbi<'a, Ty> {
+ pub fn new(
+ cx: &impl HasDataLayout,
+ layout: TyAndLayout<'a, Ty>,
+ scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
+ ) -> Self {
+ let mode = match layout.abi {
+ Abi::Uninhabited => PassMode::Ignore,
+ Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
+ Abi::ScalarPair(a, b) => PassMode::Pair(
+ scalar_attrs(&layout, a, Size::ZERO),
+ scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
+ ),
+ Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+ Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
+ };
+ ArgAbi { layout, pad: None, mode }
+ }
+
+ fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
+ let mut attrs = ArgAttributes::new();
+
+ // For non-immediate arguments the callee gets its own copy of
+ // the value on the stack, so there are no aliases. It's also
+ // program-invisible so can't possibly capture
+ attrs
+ .set(ArgAttribute::NoAlias)
+ .set(ArgAttribute::NoCapture)
+ .set(ArgAttribute::NonNull)
+ .set(ArgAttribute::NoUndef);
+ attrs.pointee_size = layout.size;
+ // FIXME(eddyb) We should be doing this, but at least on
+ // i686-pc-windows-msvc, it results in wrong stack offsets.
+ // attrs.pointee_align = Some(layout.align.abi);
+
+ let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
+
+ PassMode::Indirect { attrs, extra_attrs, on_stack: false }
+ }
+
+ pub fn make_indirect(&mut self) {
+ match self.mode {
+ PassMode::Direct(_) | PassMode::Pair(_, _) => {}
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: false } => return,
+ _ => panic!("Tried to make {:?} indirect", self.mode),
+ }
+
+ self.mode = Self::indirect_pass_mode(&self.layout);
+ }
+
+ pub fn make_indirect_byval(&mut self) {
+ self.make_indirect();
+ match self.mode {
+ PassMode::Indirect { attrs: _, extra_attrs: _, ref mut on_stack } => {
+ *on_stack = true;
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ pub fn extend_integer_width_to(&mut self, bits: u64) {
+ // Only integers have signedness
+ if let Abi::Scalar(scalar) = self.layout.abi {
+ if let abi::Int(i, signed) = scalar.primitive() {
+ if i.size().bits() < bits {
+ if let PassMode::Direct(ref mut attrs) = self.mode {
+ if signed {
+ attrs.ext(ArgExtension::Sext)
+ } else {
+ attrs.ext(ArgExtension::Zext)
+ };
+ }
+ }
+ }
+ }
+ }
+
+ pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
+ self.mode = PassMode::Cast(target.into());
+ }
+
+ pub fn pad_with(&mut self, reg: Reg) {
+ self.pad = Some(reg);
+ }
+
+ pub fn is_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { .. })
+ }
+
+ pub fn is_sized_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ })
+ }
+
+ pub fn is_unsized_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ })
+ }
+
+ pub fn is_ignore(&self) -> bool {
+ matches!(self.mode, PassMode::Ignore)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Conv {
+ // General language calling conventions, for which every target
+ // should have its own backend (e.g. LLVM) support.
+ C,
+ Rust,
+
+ /// For things unlikely to be called, where smaller caller codegen is
+ /// preferred over raw speed.
+ /// Stronger than just `#[cold]` because `fn` pointers might be incompatible.
+ RustCold,
+
+ // Target-specific calling conventions.
+ ArmAapcs,
+ CCmseNonSecureCall,
+
+ Msp430Intr,
+
+ PtxKernel,
+
+ X86Fastcall,
+ X86Intr,
+ X86Stdcall,
+ X86ThisCall,
+ X86VectorCall,
+
+ X86_64SysV,
+ X86_64Win64,
+
+ AmdGpuKernel,
+ AvrInterrupt,
+ AvrNonBlockingInterrupt,
+}
+
+/// Metadata describing how the arguments to a native function
+/// should be passed in order to respect the native ABI.
+///
+/// I will do my best to describe this structure, but these
+/// comments are reverse-engineered and may be inaccurate. -NDM
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct FnAbi<'a, Ty> {
+ /// The LLVM types of each argument.
+ pub args: Vec<ArgAbi<'a, Ty>>,
+
+ /// LLVM return type.
+ pub ret: ArgAbi<'a, Ty>,
+
+ pub c_variadic: bool,
+
+ /// The count of non-variadic arguments.
+ ///
+ /// Should only be different from args.len() when c_variadic is true.
+ /// This can be used to know whether an argument is variadic or not.
+ pub fixed_count: usize,
+
+ pub conv: Conv,
+
+ pub can_unwind: bool,
+}
+
+/// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum AdjustForForeignAbiError {
+ /// Target architecture doesn't support "foreign" (i.e. non-Rust) ABIs.
+ Unsupported { arch: Symbol, abi: spec::abi::Abi },
+}
+
+impl fmt::Display for AdjustForForeignAbiError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Unsupported { arch, abi } => {
+ write!(f, "target architecture {:?} does not support `extern {}` ABI", arch, abi)
+ }
+ }
+ }
+}
+
+impl<'a, Ty> FnAbi<'a, Ty> {
+ pub fn adjust_for_foreign_abi<C>(
+ &mut self,
+ cx: &C,
+ abi: spec::abi::Abi,
+ ) -> Result<(), AdjustForForeignAbiError>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+ {
+ if abi == spec::abi::Abi::X86Interrupt {
+ if let Some(arg) = self.args.first_mut() {
+ arg.make_indirect_byval();
+ }
+ return Ok(());
+ }
+
+ match &cx.target_spec().arch[..] {
+ "x86" => {
+ let flavor = if let spec::abi::Abi::Fastcall { .. }
+ | spec::abi::Abi::Vectorcall { .. } = abi
+ {
+ x86::Flavor::FastcallOrVectorcall
+ } else {
+ x86::Flavor::General
+ };
+ x86::compute_abi_info(cx, self, flavor);
+ }
+ "x86_64" => match abi {
+ spec::abi::Abi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
+ spec::abi::Abi::Win64 { .. } => x86_win64::compute_abi_info(self),
+ _ => {
+ if cx.target_spec().is_like_windows {
+ x86_win64::compute_abi_info(self)
+ } else {
+ x86_64::compute_abi_info(cx, self)
+ }
+ }
+ },
+ "aarch64" => aarch64::compute_abi_info(cx, self),
+ "amdgpu" => amdgpu::compute_abi_info(cx, self),
+ "arm" => arm::compute_abi_info(cx, self),
+ "avr" => avr::compute_abi_info(self),
+ "m68k" => m68k::compute_abi_info(self),
+ "mips" => mips::compute_abi_info(cx, self),
+ "mips64" => mips64::compute_abi_info(cx, self),
+ "powerpc" => powerpc::compute_abi_info(self),
+ "powerpc64" => powerpc64::compute_abi_info(cx, self),
+ "s390x" => s390x::compute_abi_info(cx, self),
+ "msp430" => msp430::compute_abi_info(self),
+ "sparc" => sparc::compute_abi_info(cx, self),
+ "sparc64" => sparc64::compute_abi_info(cx, self),
+ "nvptx" => nvptx::compute_abi_info(self),
+ "nvptx64" => {
+ if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::PtxKernel {
+ nvptx64::compute_ptx_kernel_abi_info(cx, self)
+ } else {
+ nvptx64::compute_abi_info(self)
+ }
+ }
+ "hexagon" => hexagon::compute_abi_info(self),
+ "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
+ "wasm32" | "wasm64" => {
+ if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::Wasm {
+ wasm::compute_wasm_abi_info(self)
+ } else {
+ wasm::compute_c_abi_info(cx, self)
+ }
+ }
+ "asmjs" => wasm::compute_c_abi_info(cx, self),
+ "bpf" => bpf::compute_abi_info(self),
+ arch => {
+ return Err(AdjustForForeignAbiError::Unsupported {
+ arch: Symbol::intern(arch),
+ abi,
+ });
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/msp430.rs b/compiler/rustc_target/src/abi/call/msp430.rs
new file mode 100644
index 000000000..0ba73657b
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/msp430.rs
@@ -0,0 +1,39 @@
+// Reference: MSP430 Embedded Application Binary Interface
+// https://www.ti.com/lit/an/slaa534a/slaa534a.pdf
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+// 3.5 Structures or Unions Passed and Returned by Reference
+//
+// "Structures (including classes) and unions larger than 32 bits are passed and
+// returned by reference. To pass a structure or union by reference, the caller
+// places its address in the appropriate location: either in a register or on
+// the stack, according to its position in the argument list. (..)"
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(16);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(16);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/nvptx.rs b/compiler/rustc_target/src/abi/call/nvptx.rs
new file mode 100644
index 000000000..428dd95bb
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/nvptx.rs
@@ -0,0 +1,33 @@
+// Reference: PTX Writer's Guide to Interoperability
+// https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/nvptx64.rs b/compiler/rustc_target/src/abi/call/nvptx64.rs
new file mode 100644
index 000000000..fc16f1c97
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/nvptx64.rs
@@ -0,0 +1,64 @@
+use crate::abi::call::{ArgAbi, FnAbi, PassMode, Reg, Size, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ }
+}
+
+fn classify_arg_kernel<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if matches!(arg.mode, PassMode::Pair(..)) && (arg.layout.is_adt() || arg.layout.is_tuple()) {
+ let align_bytes = arg.layout.align.abi.bytes();
+
+ let unit = match align_bytes {
+ 1 => Reg::i8(),
+ 2 => Reg::i16(),
+ 4 => Reg::i32(),
+ 8 => Reg::i64(),
+ 16 => Reg::i128(),
+ _ => unreachable!("Align is given as power of 2 no larger than 16 bytes"),
+ };
+ arg.cast_to(Uniform { unit, total: Size::from_bytes(2 * align_bytes) });
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
+
+pub fn compute_ptx_kernel_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.layout.is_unit() && !fn_abi.ret.layout.is_never() {
+ panic!("Kernels should not return anything other than () or !");
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg_kernel(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/powerpc.rs b/compiler/rustc_target/src/abi/call/powerpc.rs
new file mode 100644
index 000000000..27a5c6d2f
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/powerpc.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
new file mode 100644
index 000000000..c22ef9c8f
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -0,0 +1,141 @@
+// FIXME:
+// Alignment of 128 bit types is not currently handled, this will
+// need to be fixed when PowerPC vector support is added.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{Endian, HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum ABI {
+ ELFv1, // original ABI used for powerpc64 (big-endian)
+ ELFv2, // newer ABI used for powerpc64le and musl (both endians)
+}
+use ABI::*;
+
+fn is_homogeneous_aggregate<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ abi: ABI,
+) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ // ELFv1 only passes one-member aggregates transparently.
+ // ELFv2 passes up to eight uniquely addressable members.
+ if (abi == ELFv1 && arg.layout.size > unit.size)
+ || arg.layout.size > unit.size.checked_mul(8, cx).unwrap()
+ {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => arg.layout.size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: arg.layout.size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, abi: ABI)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(64);
+ return;
+ }
+
+ // The ELFv1 ABI doesn't return aggregates in registers
+ if abi == ELFv1 {
+ ret.make_indirect();
+ return;
+ }
+
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret, abi) {
+ ret.cast_to(uniform);
+ return;
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ let unit = if cx.data_layout().endian == Endian::Big {
+ Reg { kind: RegKind::Integer, size }
+ } else if bits <= 8 {
+ Reg::i8()
+ } else if bits <= 16 {
+ Reg::i16()
+ } else if bits <= 32 {
+ Reg::i32()
+ } else {
+ Reg::i64()
+ };
+
+ ret.cast_to(Uniform { unit, total: size });
+ return;
+ }
+
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg, abi) {
+ arg.cast_to(uniform);
+ return;
+ }
+
+ let size = arg.layout.size;
+ let (unit, total) = if size.bits() <= 64 {
+ // Aggregates smaller than a doubleword should appear in
+ // the least-significant bits of the parameter doubleword.
+ (Reg { kind: RegKind::Integer, size }, size)
+ } else {
+ // Aggregates larger than a doubleword should be padded
+ // at the tail to fill out a whole number of doublewords.
+ let reg_i64 = Reg::i64();
+ (reg_i64, size.align_to(reg_i64.align(cx)))
+ };
+
+ arg.cast_to(Uniform { unit, total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let abi = if cx.target_spec().env == "musl" {
+ ELFv2
+ } else {
+ match cx.data_layout().endian {
+ Endian::Big => ELFv1,
+ Endian::Little => ELFv2,
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, abi);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, abi);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
new file mode 100644
index 000000000..752b44f64
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -0,0 +1,348 @@
+// Reference: RISC-V ELF psABI specification
+// https://github.com/riscv/riscv-elf-psabi-doc
+//
+// Reference: Clang RISC-V ELF psABI lowering code
+// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
+
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+enum RegPassKind {
+ Float(Reg),
+ Integer(Reg),
+ Unknown,
+}
+
+#[derive(Copy, Clone)]
+enum FloatConv {
+ FloatPair(Reg, Reg),
+ Float(Reg),
+ MixedPair(Reg, Reg),
+}
+
+#[derive(Copy, Clone)]
+struct CannotUseFpConv;
+
+fn is_riscv_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+ match arg.layout.abi {
+ Abi::Vector { .. } => true,
+ _ => arg.layout.is_aggregate(),
+ }
+}
+
+fn should_use_fp_conv_helper<'a, Ty, C>(
+ cx: &C,
+ arg_layout: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ field1_kind: &mut RegPassKind,
+ field2_kind: &mut RegPassKind,
+) -> Result<(), CannotUseFpConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ match arg_layout.abi {
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => {
+ if arg_layout.size.bits() > xlen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ (RegPassKind::Float(_), RegPassKind::Unknown) => {
+ *field2_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ abi::F32 | abi::F64 => {
+ if arg_layout.size.bits() > flen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ (_, RegPassKind::Unknown) => {
+ *field2_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ },
+ Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Union(_) => {
+ if !arg_layout.is_zst() {
+ return Err(CannotUseFpConv);
+ }
+ }
+ FieldsShape::Array { count, .. } => {
+ for _ in 0..count {
+ let elem_layout = arg_layout.field(cx, 0);
+ should_use_fp_conv_helper(
+ cx,
+ &elem_layout,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ )?;
+ }
+ }
+ FieldsShape::Arbitrary { .. } => {
+ match arg_layout.variants {
+ abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
+ abi::Variants::Single { .. } => (),
+ }
+ for i in arg_layout.fields.index_by_increasing_offset() {
+ let field = arg_layout.field(cx, i);
+ should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
+ }
+ }
+ },
+ }
+ Ok(())
+}
+
+fn should_use_fp_conv<'a, Ty, C>(
+ cx: &C,
+ arg: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+) -> Option<FloatConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ let mut field1_kind = RegPassKind::Unknown;
+ let mut field2_kind = RegPassKind::Unknown;
+ if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
+ return None;
+ }
+ match (field1_kind, field2_kind) {
+ (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
+ (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ match conv {
+ FloatConv::Float(f) => {
+ arg.cast_to(f);
+ }
+ FloatConv::FloatPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ FloatConv::MixedPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ }
+ return false;
+ }
+
+ let total = arg.layout.size;
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_riscv_aggregate(arg) {
+ arg.make_indirect();
+ }
+ return true;
+ }
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+ if is_riscv_aggregate(arg) {
+ if total.bits() <= xlen {
+ arg.cast_to(xlen_reg);
+ } else {
+ arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
+ }
+ return false;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ extend_integer_width(arg, xlen);
+ false
+}
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ is_vararg: bool,
+ avail_gprs: &mut u64,
+ avail_fprs: &mut u64,
+) where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if !is_vararg {
+ match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
+ *avail_fprs -= 1;
+ arg.cast_to(f);
+ return;
+ }
+ Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
+ *avail_fprs -= 2;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
+ *avail_gprs -= 1;
+ *avail_fprs -= 1;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ _ => (),
+ }
+ }
+
+ let total = arg.layout.size;
+ let align = arg.layout.align.abi.bits();
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_riscv_aggregate(arg) {
+ arg.make_indirect();
+ }
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ let double_xlen_reg = match xlen {
+ 32 => Reg::i64(),
+ 64 => Reg::i128(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ if total.bits() > xlen {
+ let align_regs = align > xlen;
+ if is_riscv_aggregate(arg) {
+ arg.cast_to(Uniform {
+ unit: if align_regs { double_xlen_reg } else { xlen_reg },
+ total: Size::from_bits(xlen * 2),
+ });
+ }
+ if align_regs && is_vararg {
+ *avail_gprs -= *avail_gprs % 2;
+ }
+ if *avail_gprs >= 2 {
+ *avail_gprs -= 2;
+ } else {
+ *avail_gprs = 0;
+ }
+ return;
+ } else if is_riscv_aggregate(arg) {
+ arg.cast_to(xlen_reg);
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ if *avail_gprs >= 1 {
+ extend_integer_width(arg, xlen);
+ *avail_gprs -= 1;
+ }
+}
+
+fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
+ if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, _) = scalar.primitive() {
+ // 32-bit integers are always sign-extended
+ if i.size().bits() == 32 && xlen > 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(xlen);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let flen = match &cx.target_spec().llvm_abiname[..] {
+ "ilp32f" | "lp64f" => 32,
+ "ilp32d" | "lp64d" => 64,
+ _ => 0,
+ };
+ let xlen = cx.data_layout().pointer_size.bits();
+
+ let mut avail_gprs = 8;
+ let mut avail_fprs = 8;
+
+ if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+ avail_gprs -= 1;
+ }
+
+ for (i, arg) in fn_abi.args.iter_mut().enumerate() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(
+ cx,
+ arg,
+ xlen,
+ flen,
+ i >= fn_abi.fixed_count,
+ &mut avail_gprs,
+ &mut avail_fprs,
+ );
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs
new file mode 100644
index 000000000..13706e8c2
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/s390x.rs
@@ -0,0 +1,57 @@
+// FIXME: The assumes we're using the non-vector ABI, i.e., compiling
+// for a pre-z13 machine or using -mno-vx.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
+ ret.extend_integer_width_to(64);
+ } else {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ if arg.layout.is_single_fp_element(cx) {
+ match arg.layout.size.bytes() {
+ 4 => arg.cast_to(Reg::f32()),
+ 8 => arg.cast_to(Reg::f64()),
+ _ => arg.make_indirect(),
+ }
+ } else {
+ match arg.layout.size.bytes() {
+ 1 => arg.cast_to(Reg::i8()),
+ 2 => arg.cast_to(Reg::i16()),
+ 4 => arg.cast_to(Reg::i32()),
+ 8 => arg.cast_to(Reg::i64()),
+ _ => arg.make_indirect(),
+ }
+ }
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/sparc.rs b/compiler/rustc_target/src/abi/call/sparc.rs
new file mode 100644
index 000000000..cc4431976
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/sparc.rs
@@ -0,0 +1,51 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{HasDataLayout, Size};
+
+fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ } else {
+ ret.make_indirect();
+ *offset += cx.data_layout().pointer_size;
+ }
+}
+
+fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+
+ if arg.layout.is_aggregate() {
+ arg.cast_to(Uniform { unit: Reg::i32(), total: size });
+ if !offset.is_aligned(align) {
+ arg.pad_with(Reg::i32());
+ }
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+
+ *offset = offset.align_to(align) + size.align_to(align);
+}
+
+pub fn compute_abi_info<Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
+where
+ C: HasDataLayout,
+{
+ let mut offset = Size::ZERO;
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, &mut offset);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, &mut offset);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
new file mode 100644
index 000000000..cc3a0a699
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -0,0 +1,226 @@
+// FIXME: This needs an audit for correctness and completeness.
+
+use crate::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Reg, Uniform,
+};
+use crate::abi::{self, HasDataLayout, Scalar, Size, TyAbiInterface, TyAndLayout};
+
+#[derive(Clone, Debug)]
+pub struct Sdata {
+ pub prefix: [Option<Reg>; 8],
+ pub prefix_index: usize,
+ pub last_offset: Size,
+ pub has_float: bool,
+ pub arg_attribute: ArgAttribute,
+}
+
+fn arg_scalar<C>(cx: &C, scalar: &Scalar, offset: Size, mut data: Sdata) -> Sdata
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+
+ if !scalar.primitive().is_float() {
+ return data;
+ }
+
+ data.has_float = true;
+
+ if !data.last_offset.is_aligned(dl.f64_align.abi) && data.last_offset < offset {
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i32().size;
+ }
+
+ for _ in 0..((offset - data.last_offset).bits() / 64)
+ .min((data.prefix.len() - data.prefix_index) as u64)
+ {
+ data.prefix[data.prefix_index] = Some(Reg::i64());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i64().size;
+ }
+
+ if data.last_offset < offset {
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i32().size;
+ }
+
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+
+ if scalar.primitive() == abi::F32 {
+ data.arg_attribute = ArgAttribute::InReg;
+ data.prefix[data.prefix_index] = Some(Reg::f32());
+ data.last_offset = offset + Reg::f32().size;
+ } else {
+ data.prefix[data.prefix_index] = Some(Reg::f64());
+ data.last_offset = offset + Reg::f64().size;
+ }
+ data.prefix_index += 1;
+ return data;
+}
+
+fn arg_scalar_pair<C>(
+ cx: &C,
+ scalar1: &Scalar,
+ scalar2: &Scalar,
+ mut offset: Size,
+ mut data: Sdata,
+) -> Sdata
+where
+ C: HasDataLayout,
+{
+ data = arg_scalar(cx, &scalar1, offset, data);
+ match (scalar1.primitive(), scalar2.primitive()) {
+ (abi::F32, _) => offset += Reg::f32().size,
+ (_, abi::F64) => offset += Reg::f64().size,
+ (abi::Int(i, _signed), _) => offset += i.size(),
+ (abi::Pointer, _) => offset += Reg::i64().size,
+ _ => {}
+ }
+
+ if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
+ offset.raw += 4 - (offset.raw % 4);
+ }
+ data = arg_scalar(cx, &scalar2, offset, data);
+ return data;
+}
+
+fn parse_structure<'a, Ty, C>(
+ cx: &C,
+ layout: TyAndLayout<'a, Ty>,
+ mut data: Sdata,
+ mut offset: Size,
+) -> Sdata
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if let abi::FieldsShape::Union(_) = layout.fields {
+ return data;
+ }
+
+ match layout.abi {
+ abi::Abi::Scalar(scalar) => {
+ data = arg_scalar(cx, &scalar, offset, data);
+ }
+ abi::Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ if offset < layout.fields.offset(i) {
+ offset = layout.fields.offset(i);
+ }
+ data = parse_structure(cx, layout.field(cx, i), data.clone(), offset);
+ }
+ }
+ _ => {
+ if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
+ data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
+ }
+ }
+ }
+
+ return data;
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: Size)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ let total = arg.layout.size;
+ if total > in_registers_max {
+ arg.make_indirect();
+ return;
+ }
+
+ match arg.layout.fields {
+ abi::FieldsShape::Primitive => unreachable!(),
+ abi::FieldsShape::Array { .. } => {
+ // Arrays are passed indirectly
+ arg.make_indirect();
+ return;
+ }
+ abi::FieldsShape::Union(_) => {
+ // Unions and are always treated as a series of 64-bit integer chunks
+ }
+ abi::FieldsShape::Arbitrary { .. } => {
+ // Structures with floating point numbers need special care.
+
+ let mut data = parse_structure(
+ cx,
+ arg.layout,
+ Sdata {
+ prefix: [None; 8],
+ prefix_index: 0,
+ last_offset: Size::ZERO,
+ has_float: false,
+ arg_attribute: ArgAttribute::default(),
+ },
+ Size { raw: 0 },
+ );
+
+ if data.has_float {
+ // Structure { float, int, int } doesn't like to be handled like
+ // { float, long int }. Other way around it doesn't mind.
+ if data.last_offset < arg.layout.size
+ && (data.last_offset.raw % 8) != 0
+ && data.prefix_index < data.prefix.len()
+ {
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset += Reg::i32().size;
+ }
+
+ let mut rest_size = arg.layout.size - data.last_offset;
+ if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ rest_size = rest_size - Reg::i32().size;
+ }
+
+ arg.cast_to(CastTarget {
+ prefix: data.prefix,
+ rest: Uniform { unit: Reg::i64(), total: rest_size },
+ attrs: ArgAttributes {
+ regular: data.arg_attribute,
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ });
+ return;
+ }
+ }
+ }
+
+ arg.cast_to(Uniform { unit: Reg::i64(), total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, Size { raw: 16 });
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
new file mode 100644
index 000000000..3237cde10
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -0,0 +1,83 @@
+use crate::abi::call::{ArgAbi, FnAbi, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if val.layout.is_aggregate() {
+ if let Some(unit) = val.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()) {
+ let size = val.layout.size;
+ if unit.size == size {
+ val.cast_to(Uniform { unit, total: size });
+ return true;
+ }
+ }
+ }
+ false
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ ret.extend_integer_width_to(32);
+ if ret.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, ret) {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.extend_integer_width_to(32);
+ if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
+ arg.make_indirect_byval();
+ }
+}
+
+/// The purpose of this ABI is to match the C ABI (aka clang) exactly.
+pub fn compute_c_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
+
+/// The purpose of this ABI is for matching the WebAssembly standard. This
+/// intentionally diverges from the C ABI and is specifically crafted to take
+/// advantage of LLVM's support of multiple returns in WebAssembly.
+pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+
+ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ ret.extend_integer_width_to(32);
+ }
+
+ fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ arg.extend_integer_width_to(32);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
new file mode 100644
index 000000000..c7d59baf9
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -0,0 +1,117 @@
+use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+#[derive(PartialEq)]
+pub enum Flavor {
+ General,
+ FastcallOrVectorcall,
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: Flavor)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ if !fn_abi.ret.is_ignore() {
+ if fn_abi.ret.layout.is_aggregate() {
+ // Returning a structure. Most often, this will use
+ // a hidden first argument. On some platforms, though,
+ // small structs are returned as integers.
+ //
+ // Some links:
+ // https://www.angelcode.com/dev/callconv/callconv.html
+ // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
+ let t = cx.target_spec();
+ if t.abi_return_struct_as_int {
+ // According to Clang, everyone but MSVC returns single-element
+ // float aggregates directly in a floating-point register.
+ if !t.is_like_msvc && fn_abi.ret.layout.is_single_fp_element(cx) {
+ match fn_abi.ret.layout.size.bytes() {
+ 4 => fn_abi.ret.cast_to(Reg::f32()),
+ 8 => fn_abi.ret.cast_to(Reg::f64()),
+ _ => fn_abi.ret.make_indirect(),
+ }
+ } else {
+ match fn_abi.ret.layout.size.bytes() {
+ 1 => fn_abi.ret.cast_to(Reg::i8()),
+ 2 => fn_abi.ret.cast_to(Reg::i16()),
+ 4 => fn_abi.ret.cast_to(Reg::i32()),
+ 8 => fn_abi.ret.cast_to(Reg::i64()),
+ _ => fn_abi.ret.make_indirect(),
+ }
+ }
+ } else {
+ fn_abi.ret.make_indirect();
+ }
+ } else {
+ fn_abi.ret.extend_integer_width_to(32);
+ }
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ if arg.layout.is_aggregate() {
+ arg.make_indirect_byval();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+ }
+
+ if flavor == Flavor::FastcallOrVectorcall {
+ // Mark arguments as InReg like clang does it,
+ // so our fastcall/vectorcall is compatible with C/C++ fastcall/vectorcall.
+
+ // Clang reference: lib/CodeGen/TargetInfo.cpp
+ // See X86_32ABIInfo::shouldPrimitiveUseInReg(), X86_32ABIInfo::updateFreeRegs()
+
+ // IsSoftFloatABI is only set to true on ARM platforms,
+ // which in turn can't be x86?
+
+ let mut free_regs = 2;
+
+ for arg in &mut fn_abi.args {
+ let attrs = match arg.mode {
+ PassMode::Ignore
+ | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ continue;
+ }
+ PassMode::Direct(ref mut attrs) => attrs,
+ PassMode::Pair(..)
+ | PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
+ | PassMode::Cast(_) => {
+ unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
+ }
+ };
+
+ // At this point we know this must be a primitive of sorts.
+ let unit = arg.layout.homogeneous_aggregate(cx).unwrap().unit().unwrap();
+ assert_eq!(unit.size, arg.layout.size);
+ if unit.kind == RegKind::Float {
+ continue;
+ }
+
+ let size_in_regs = (arg.layout.size.bits() + 31) / 32;
+
+ if size_in_regs == 0 {
+ continue;
+ }
+
+ if size_in_regs > free_regs {
+ break;
+ }
+
+ free_regs -= size_in_regs;
+
+ if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
+ attrs.set(ArgAttribute::InReg);
+ }
+
+ if free_regs == 0 {
+ break;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
new file mode 100644
index 000000000..a52e01a49
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -0,0 +1,248 @@
+// The classification code for the x86_64 ABI is taken from the clay language
+// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
+
+use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
+use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+/// Classification of "eightbyte" components.
+// N.B., the order of the variants is from general to specific,
+// such that `unify(a, b)` is the "smaller" of `a` and `b`.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+enum Class {
+ Int,
+ Sse,
+ SseUp,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct Memory;
+
+// Currently supported vector size (AVX-512).
+const LARGEST_VECTOR_SIZE: usize = 512;
+const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &ArgAbi<'a, Ty>,
+) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ fn classify<'a, Ty, C>(
+ cx: &C,
+ layout: TyAndLayout<'a, Ty>,
+ cls: &mut [Option<Class>],
+ off: Size,
+ ) -> Result<(), Memory>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+ {
+ if !off.is_aligned(layout.align.abi) {
+ if !layout.is_zst() {
+ return Err(Memory);
+ }
+ return Ok(());
+ }
+
+ let mut c = match layout.abi {
+ Abi::Uninhabited => return Ok(()),
+
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => Class::Int,
+ abi::F32 | abi::F64 => Class::Sse,
+ },
+
+ Abi::Vector { .. } => Class::Sse,
+
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ let field_off = off + layout.fields.offset(i);
+ classify(cx, layout.field(cx, i), cls, field_off)?;
+ }
+
+ match &layout.variants {
+ abi::Variants::Single { .. } => {}
+ abi::Variants::Multiple { variants, .. } => {
+ // Treat enum variants like union members.
+ for variant_idx in variants.indices() {
+ classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
+ }
+ }
+ }
+
+ return Ok(());
+ }
+ };
+
+ // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
+ let first = (off.bytes() / 8) as usize;
+ let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
+ for cls in &mut cls[first..=last] {
+ *cls = Some(cls.map_or(c, |old| old.min(c)));
+
+ // Everything after the first Sse "eightbyte"
+ // component is the upper half of a register.
+ if c == Class::Sse {
+ c = Class::SseUp;
+ }
+ }
+
+ Ok(())
+ }
+
+ let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
+ if n > MAX_EIGHTBYTES {
+ return Err(Memory);
+ }
+
+ let mut cls = [None; MAX_EIGHTBYTES];
+ classify(cx, arg.layout, &mut cls, Size::ZERO)?;
+ if n > 2 {
+ if cls[0] != Some(Class::Sse) {
+ return Err(Memory);
+ }
+ if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
+ return Err(Memory);
+ }
+ } else {
+ let mut i = 0;
+ while i < n {
+ if cls[i] == Some(Class::SseUp) {
+ cls[i] = Some(Class::Sse);
+ } else if cls[i] == Some(Class::Sse) {
+ i += 1;
+ while i != n && cls[i] == Some(Class::SseUp) {
+ i += 1;
+ }
+ } else {
+ i += 1;
+ }
+ }
+ }
+
+ Ok(cls)
+}
+
+fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
+ if *i >= cls.len() {
+ return None;
+ }
+
+ match cls[*i] {
+ None => None,
+ Some(Class::Int) => {
+ *i += 1;
+ Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
+ }
+ Some(Class::Sse) => {
+ let vec_len =
+ 1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
+ *i += vec_len;
+ Some(if vec_len == 1 {
+ match size.bytes() {
+ 4 => Reg::f32(),
+ _ => Reg::f64(),
+ }
+ } else {
+ Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
+ })
+ }
+ Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
+ }
+}
+
+fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
+ let mut i = 0;
+ let lo = reg_component(cls, &mut i, size).unwrap();
+ let offset = Size::from_bytes(8) * (i as u64);
+ let mut target = CastTarget::from(lo);
+ if size > offset {
+ if let Some(hi) = reg_component(cls, &mut i, size - offset) {
+ target = CastTarget::pair(lo, hi);
+ }
+ }
+ assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
+ target
+}
+
+const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
+const MAX_SSE_REGS: usize = 8; // XMM0-7
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ let mut int_regs = MAX_INT_REGS;
+ let mut sse_regs = MAX_SSE_REGS;
+
+ let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
+ let mut cls_or_mem = classify_arg(cx, arg);
+
+ if is_arg {
+ if let Ok(cls) = cls_or_mem {
+ let mut needed_int = 0;
+ let mut needed_sse = 0;
+ for c in cls {
+ match c {
+ Some(Class::Int) => needed_int += 1,
+ Some(Class::Sse) => needed_sse += 1,
+ _ => {}
+ }
+ }
+ match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
+ (Some(left_int), Some(left_sse)) => {
+ int_regs = left_int;
+ sse_regs = left_sse;
+ }
+ _ => {
+ // Not enough registers for this argument, so it will be
+ // passed on the stack, but we only mark aggregates
+ // explicitly as indirect `byval` arguments, as LLVM will
+ // automatically put immediates on the stack itself.
+ if arg.layout.is_aggregate() {
+ cls_or_mem = Err(Memory);
+ }
+ }
+ }
+ }
+ }
+
+ match cls_or_mem {
+ Err(Memory) => {
+ if is_arg {
+ arg.make_indirect_byval();
+ } else {
+ // `sret` parameter thus one less integer register available
+ arg.make_indirect();
+ // NOTE(eddyb) return is handled first, so no registers
+ // should've been used yet.
+ assert_eq!(int_regs, MAX_INT_REGS);
+ int_regs -= 1;
+ }
+ }
+ Ok(ref cls) => {
+ // split into sized chunks passed individually
+ if arg.layout.is_aggregate() {
+ let size = arg.layout.size;
+ arg.cast_to(cast_target(cls, size))
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+ }
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ x86_64_arg_or_ret(&mut fn_abi.ret, false);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ x86_64_arg_or_ret(arg, true);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86_win64.rs b/compiler/rustc_target/src/abi/call/x86_win64.rs
new file mode 100644
index 000000000..2aad641b1
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86_win64.rs
@@ -0,0 +1,40 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg};
+use crate::abi::Abi;
+
+// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ let fixup = |a: &mut ArgAbi<'_, Ty>| {
+ match a.layout.abi {
+ Abi::Uninhabited => {}
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match a.layout.size.bits() {
+ 8 => a.cast_to(Reg::i8()),
+ 16 => a.cast_to(Reg::i16()),
+ 32 => a.cast_to(Reg::i32()),
+ 64 => a.cast_to(Reg::i64()),
+ _ => a.make_indirect(),
+ },
+ Abi::Vector { .. } => {
+ // FIXME(eddyb) there should be a size cap here
+ // (probably what clang calls "illegal vectors").
+ }
+ Abi::Scalar(_) => {
+ if a.layout.size.bytes() > 8 {
+ a.make_indirect();
+ } else {
+ a.extend_integer_width_to(32);
+ }
+ }
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ fixup(&mut fn_abi.ret);
+ }
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ fixup(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
new file mode 100644
index 000000000..92ce4d91d
--- /dev/null
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -0,0 +1,1558 @@
+pub use Integer::*;
+pub use Primitive::*;
+
+use crate::json::{Json, ToJson};
+use crate::spec::Target;
+
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::iter::Step;
+use std::num::NonZeroUsize;
+use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
+use std::str::FromStr;
+
+use rustc_data_structures::intern::Interned;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_macros::HashStable_Generic;
+
+pub mod call;
+
+/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
+/// for a target, which contains everything needed to compute layouts.
+pub struct TargetDataLayout {
+ pub endian: Endian,
+ pub i1_align: AbiAndPrefAlign,
+ pub i8_align: AbiAndPrefAlign,
+ pub i16_align: AbiAndPrefAlign,
+ pub i32_align: AbiAndPrefAlign,
+ pub i64_align: AbiAndPrefAlign,
+ pub i128_align: AbiAndPrefAlign,
+ pub f32_align: AbiAndPrefAlign,
+ pub f64_align: AbiAndPrefAlign,
+ pub pointer_size: Size,
+ pub pointer_align: AbiAndPrefAlign,
+ pub aggregate_align: AbiAndPrefAlign,
+
+ /// Alignments for vector types.
+ pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
+
+ pub instruction_address_space: AddressSpace,
+
+ /// Minimum size of #[repr(C)] enums (default I32 bits)
+ pub c_enum_min_size: Integer,
+}
+
+impl Default for TargetDataLayout {
+ /// Creates an instance of `TargetDataLayout`.
+ fn default() -> TargetDataLayout {
+ let align = |bits| Align::from_bits(bits).unwrap();
+ TargetDataLayout {
+ endian: Endian::Big,
+ i1_align: AbiAndPrefAlign::new(align(8)),
+ i8_align: AbiAndPrefAlign::new(align(8)),
+ i16_align: AbiAndPrefAlign::new(align(16)),
+ i32_align: AbiAndPrefAlign::new(align(32)),
+ i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+ i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+ f32_align: AbiAndPrefAlign::new(align(32)),
+ f64_align: AbiAndPrefAlign::new(align(64)),
+ pointer_size: Size::from_bits(64),
+ pointer_align: AbiAndPrefAlign::new(align(64)),
+ aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
+ vector_align: vec![
+ (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
+ (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
+ ],
+ instruction_address_space: AddressSpace::DATA,
+ c_enum_min_size: Integer::I32,
+ }
+ }
+}
+
+impl TargetDataLayout {
+ pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
+ // Parse an address space index from a string.
+ let parse_address_space = |s: &str, cause: &str| {
+ s.parse::<u32>().map(AddressSpace).map_err(|err| {
+ format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
+ })
+ };
+
+ // Parse a bit count from a string.
+ let parse_bits = |s: &str, kind: &str, cause: &str| {
+ s.parse::<u64>().map_err(|err| {
+ format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
+ })
+ };
+
+ // Parse a size string.
+ let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
+
+ // Parse an alignment string.
+ let align = |s: &[&str], cause: &str| {
+ if s.is_empty() {
+ return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
+ }
+ let align_from_bits = |bits| {
+ Align::from_bits(bits).map_err(|err| {
+ format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
+ })
+ };
+ let abi = parse_bits(s[0], "alignment", cause)?;
+ let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
+ Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
+ };
+
+ let mut dl = TargetDataLayout::default();
+ let mut i128_align_src = 64;
+ for spec in target.data_layout.split('-') {
+ let spec_parts = spec.split(':').collect::<Vec<_>>();
+
+ match &*spec_parts {
+ ["e"] => dl.endian = Endian::Little,
+ ["E"] => dl.endian = Endian::Big,
+ [p] if p.starts_with('P') => {
+ dl.instruction_address_space = parse_address_space(&p[1..], "P")?
+ }
+ ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
+ ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
+ ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+ [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
+ dl.pointer_size = size(s, p)?;
+ dl.pointer_align = align(a, p)?;
+ }
+ [s, ref a @ ..] if s.starts_with('i') => {
+ let Ok(bits) = s[1..].parse::<u64>() else {
+ size(&s[1..], "i")?; // For the user error.
+ continue;
+ };
+ let a = align(a, s)?;
+ match bits {
+ 1 => dl.i1_align = a,
+ 8 => dl.i8_align = a,
+ 16 => dl.i16_align = a,
+ 32 => dl.i32_align = a,
+ 64 => dl.i64_align = a,
+ _ => {}
+ }
+ if bits >= i128_align_src && bits <= 128 {
+ // Default alignment for i128 is decided by taking the alignment of
+ // largest-sized i{64..=128}.
+ i128_align_src = bits;
+ dl.i128_align = a;
+ }
+ }
+ [s, ref a @ ..] if s.starts_with('v') => {
+ let v_size = size(&s[1..], "v")?;
+ let a = align(a, s)?;
+ if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
+ v.1 = a;
+ continue;
+ }
+ // No existing entry, add a new one.
+ dl.vector_align.push((v_size, a));
+ }
+ _ => {} // Ignore everything else.
+ }
+ }
+
+ // Perform consistency checks against the Target information.
+ if dl.endian != target.endian {
+ return Err(format!(
+ "inconsistent target specification: \"data-layout\" claims \
+ architecture is {}-endian, while \"target-endian\" is `{}`",
+ dl.endian.as_str(),
+ target.endian.as_str(),
+ ));
+ }
+
+ let target_pointer_width: u64 = target.pointer_width.into();
+ if dl.pointer_size.bits() != target_pointer_width {
+ return Err(format!(
+ "inconsistent target specification: \"data-layout\" claims \
+ pointers are {}-bit, while \"target-pointer-width\" is `{}`",
+ dl.pointer_size.bits(),
+ target.pointer_width
+ ));
+ }
+
+ dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
+
+ Ok(dl)
+ }
+
+ /// Returns exclusive upper bound on object size.
+ ///
+ /// The theoretical maximum object size is defined as the maximum positive `isize` value.
+ /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
+ /// index every address within an object along with one byte past the end, along with allowing
+ /// `isize` to store the difference between any two pointers into an object.
+ ///
+ /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
+ /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
+ /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
+ /// address space on 64-bit ARMv8 and x86_64.
+ #[inline]
+ pub fn obj_size_bound(&self) -> u64 {
+ match self.pointer_size.bits() {
+ 16 => 1 << 15,
+ 32 => 1 << 31,
+ 64 => 1 << 47,
+ bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
+ }
+ }
+
+ #[inline]
+ pub fn ptr_sized_integer(&self) -> Integer {
+ match self.pointer_size.bits() {
+ 16 => I16,
+ 32 => I32,
+ 64 => I64,
+ bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ }
+ }
+
+ #[inline]
+ pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
+ for &(size, align) in &self.vector_align {
+ if size == vec_size {
+ return align;
+ }
+ }
+ // Default to natural alignment, which is what LLVM does.
+ // That is, use the size, rounded up to a power of 2.
+ AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
+ }
+}
+
+pub trait HasDataLayout {
+ fn data_layout(&self) -> &TargetDataLayout;
+}
+
+impl HasDataLayout for TargetDataLayout {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ self
+ }
+}
+
+/// Endianness of the target, which must match cfg(target-endian).
+#[derive(Copy, Clone, PartialEq)]
+pub enum Endian {
+ Little,
+ Big,
+}
+
+impl Endian {
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ Self::Little => "little",
+ Self::Big => "big",
+ }
+ }
+}
+
+impl fmt::Debug for Endian {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.as_str())
+ }
+}
+
+impl FromStr for Endian {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "little" => Ok(Self::Little),
+ "big" => Ok(Self::Big),
+ _ => Err(format!(r#"unknown endian: "{}""#, s)),
+ }
+ }
+}
+
+impl ToJson for Endian {
+ fn to_json(&self) -> Json {
+ self.as_str().to_json()
+ }
+}
+
+/// Size of a type in bytes.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub struct Size {
+ raw: u64,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Size {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Size({} bytes)", self.bytes())
+ }
+}
+
+impl Size {
+ pub const ZERO: Size = Size { raw: 0 };
+
+ /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
+ /// not a multiple of 8.
+ pub fn from_bits(bits: impl TryInto<u64>) -> Size {
+ let bits = bits.try_into().ok().unwrap();
+ // Avoid potential overflow from `bits + 7`.
+ Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
+ }
+
+ #[inline]
+ pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
+ let bytes: u64 = bytes.try_into().ok().unwrap();
+ Size { raw: bytes }
+ }
+
+ #[inline]
+ pub fn bytes(self) -> u64 {
+ self.raw
+ }
+
+ #[inline]
+ pub fn bytes_usize(self) -> usize {
+ self.bytes().try_into().unwrap()
+ }
+
+ #[inline]
+ pub fn bits(self) -> u64 {
+ #[cold]
+ fn overflow(bytes: u64) -> ! {
+ panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
+ }
+
+ self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
+ }
+
+ #[inline]
+ pub fn bits_usize(self) -> usize {
+ self.bits().try_into().unwrap()
+ }
+
+ #[inline]
+ pub fn align_to(self, align: Align) -> Size {
+ let mask = align.bytes() - 1;
+ Size::from_bytes((self.bytes() + mask) & !mask)
+ }
+
+ #[inline]
+ pub fn is_aligned(self, align: Align) -> bool {
+ let mask = align.bytes() - 1;
+ self.bytes() & mask == 0
+ }
+
+ #[inline]
+ pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
+ let dl = cx.data_layout();
+
+ let bytes = self.bytes().checked_add(offset.bytes())?;
+
+ if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+ }
+
+ #[inline]
+ pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
+ let dl = cx.data_layout();
+
+ let bytes = self.bytes().checked_mul(count)?;
+ if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+ }
+
+ /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
+ /// (i.e., if it is negative, fill with 1's on the left).
+ #[inline]
+ pub fn sign_extend(self, value: u128) -> u128 {
+ let size = self.bits();
+ if size == 0 {
+ // Truncated until nothing is left.
+ return 0;
+ }
+ // Sign-extend it.
+ let shift = 128 - size;
+ // Shift the unsigned value to the left, then shift back to the right as signed
+ // (essentially fills with sign bit on the left).
+ (((value << shift) as i128) >> shift) as u128
+ }
+
+ /// Truncates `value` to `self` bits.
+ #[inline]
+ pub fn truncate(self, value: u128) -> u128 {
+ let size = self.bits();
+ if size == 0 {
+ // Truncated until nothing is left.
+ return 0;
+ }
+ let shift = 128 - size;
+ // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
+ (value << shift) >> shift
+ }
+
+ #[inline]
+ pub fn signed_int_min(&self) -> i128 {
+ self.sign_extend(1_u128 << (self.bits() - 1)) as i128
+ }
+
+ #[inline]
+ pub fn signed_int_max(&self) -> i128 {
+ i128::MAX >> (128 - self.bits())
+ }
+
+ #[inline]
+ pub fn unsigned_int_max(&self) -> u128 {
+ u128::MAX >> (128 - self.bits())
+ }
+}
+
+// Panicking addition, subtraction and multiplication for convenience.
+// Avoid during layout computation, return `LayoutError` instead.
+
+impl Add for Size {
+ type Output = Size;
+ #[inline]
+ fn add(self, other: Size) -> Size {
+ Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
+ panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
+ }))
+ }
+}
+
+impl Sub for Size {
+ type Output = Size;
+ #[inline]
+ fn sub(self, other: Size) -> Size {
+ Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
+ panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
+ }))
+ }
+}
+
+impl Mul<Size> for u64 {
+ type Output = Size;
+ #[inline]
+ fn mul(self, size: Size) -> Size {
+ size * self
+ }
+}
+
+impl Mul<u64> for Size {
+ type Output = Size;
+ #[inline]
+ fn mul(self, count: u64) -> Size {
+ match self.bytes().checked_mul(count) {
+ Some(bytes) => Size::from_bytes(bytes),
+ None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
+ }
+ }
+}
+
+impl AddAssign for Size {
+ #[inline]
+ fn add_assign(&mut self, other: Size) {
+ *self = *self + other;
+ }
+}
+
+impl Step for Size {
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ u64::steps_between(&start.bytes(), &end.bytes())
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, count: usize) -> Option<Self> {
+ u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
+ }
+
+ #[inline]
+ fn forward(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::forward(start.bytes(), count))
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, count: usize) -> Option<Self> {
+ u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
+ }
+
+ #[inline]
+ fn backward(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::backward(start.bytes(), count))
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
+ }
+}
+
+/// Alignment of a type in bytes (always a power of two).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub struct Align {
+ pow2: u8,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Align {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Align({} bytes)", self.bytes())
+ }
+}
+
+impl Align {
+ pub const ONE: Align = Align { pow2: 0 };
+ pub const MAX: Align = Align { pow2: 29 };
+
+ #[inline]
+ pub fn from_bits(bits: u64) -> Result<Align, String> {
+ Align::from_bytes(Size::from_bits(bits).bytes())
+ }
+
+ #[inline]
+ pub fn from_bytes(align: u64) -> Result<Align, String> {
+ // Treat an alignment of 0 bytes like 1-byte alignment.
+ if align == 0 {
+ return Ok(Align::ONE);
+ }
+
+ #[cold]
+ fn not_power_of_2(align: u64) -> String {
+ format!("`{}` is not a power of 2", align)
+ }
+
+ #[cold]
+ fn too_large(align: u64) -> String {
+ format!("`{}` is too large", align)
+ }
+
+ let mut bytes = align;
+ let mut pow2: u8 = 0;
+ while (bytes & 1) == 0 {
+ pow2 += 1;
+ bytes >>= 1;
+ }
+ if bytes != 1 {
+ return Err(not_power_of_2(align));
+ }
+ if pow2 > Self::MAX.pow2 {
+ return Err(too_large(align));
+ }
+
+ Ok(Align { pow2 })
+ }
+
+ #[inline]
+ pub fn bytes(self) -> u64 {
+ 1 << self.pow2
+ }
+
+ #[inline]
+ pub fn bits(self) -> u64 {
+ self.bytes() * 8
+ }
+
+ /// Computes the best alignment possible for the given offset
+ /// (the largest power of two that the offset is a multiple of).
+ ///
+ /// N.B., for an offset of `0`, this happens to return `2^64`.
+ #[inline]
+ pub fn max_for_offset(offset: Size) -> Align {
+ Align { pow2: offset.bytes().trailing_zeros() as u8 }
+ }
+
+ /// Lower the alignment, if necessary, such that the given offset
+ /// is aligned to it (the offset is a multiple of the alignment).
+ #[inline]
+ pub fn restrict_for_offset(self, offset: Size) -> Align {
+ self.min(Align::max_for_offset(offset))
+ }
+}
+
+/// A pair of alignments, ABI-mandated and preferred.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub struct AbiAndPrefAlign {
+ pub abi: Align,
+ pub pref: Align,
+}
+
+impl AbiAndPrefAlign {
+ #[inline]
+ pub fn new(align: Align) -> AbiAndPrefAlign {
+ AbiAndPrefAlign { abi: align, pref: align }
+ }
+
+ #[inline]
+ pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+ AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
+ }
+
+ #[inline]
+ pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+ AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
+ }
+}
+
+/// Integers, also used for enum discriminants.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
+pub enum Integer {
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+}
+
+impl Integer {
+ #[inline]
+ pub fn size(self) -> Size {
+ match self {
+ I8 => Size::from_bytes(1),
+ I16 => Size::from_bytes(2),
+ I32 => Size::from_bytes(4),
+ I64 => Size::from_bytes(8),
+ I128 => Size::from_bytes(16),
+ }
+ }
+
+ pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+ let dl = cx.data_layout();
+
+ match self {
+ I8 => dl.i8_align,
+ I16 => dl.i16_align,
+ I32 => dl.i32_align,
+ I64 => dl.i64_align,
+ I128 => dl.i128_align,
+ }
+ }
+
+ /// Finds the smallest Integer type which can represent the signed value.
+ #[inline]
+ pub fn fit_signed(x: i128) -> Integer {
+ match x {
+ -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
+ -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
+ -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
+ -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
+ _ => I128,
+ }
+ }
+
+ /// Finds the smallest Integer type which can represent the unsigned value.
+ #[inline]
+ pub fn fit_unsigned(x: u128) -> Integer {
+ match x {
+ 0..=0x0000_0000_0000_00ff => I8,
+ 0..=0x0000_0000_0000_ffff => I16,
+ 0..=0x0000_0000_ffff_ffff => I32,
+ 0..=0xffff_ffff_ffff_ffff => I64,
+ _ => I128,
+ }
+ }
+
+ /// Finds the smallest integer with the given alignment.
+ pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
+ let dl = cx.data_layout();
+
+ for candidate in [I8, I16, I32, I64, I128] {
+ if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
+ return Some(candidate);
+ }
+ }
+ None
+ }
+
+ /// Find the largest integer with the given alignment or less.
+ pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
+ let dl = cx.data_layout();
+
+ // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
+ for candidate in [I64, I32, I16] {
+ if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
+ return candidate;
+ }
+ }
+ I8
+ }
+
+ // FIXME(eddyb) consolidate this and other methods that find the appropriate
+ // `Integer` given some requirements.
+ #[inline]
+ fn from_size(size: Size) -> Result<Self, String> {
+ match size.bits() {
+ 8 => Ok(Integer::I8),
+ 16 => Ok(Integer::I16),
+ 32 => Ok(Integer::I32),
+ 64 => Ok(Integer::I64),
+ 128 => Ok(Integer::I128),
+ _ => Err(format!("rust does not support integers with {} bits", size.bits())),
+ }
+ }
+}
+
+/// Fundamental unit of memory access and layout.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Primitive {
+ /// The `bool` is the signedness of the `Integer` type.
+ ///
+ /// One would think we would not care about such details this low down,
+ /// but some ABIs are described in terms of C types and ISAs where the
+ /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
+ /// a negative integer passed by zero-extension will appear positive in
+ /// the callee, and most operations on it will produce the wrong values.
+ Int(Integer, bool),
+ F32,
+ F64,
+ Pointer,
+}
+
+impl Primitive {
+ pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
+ let dl = cx.data_layout();
+
+ match self {
+ Int(i, _) => i.size(),
+ F32 => Size::from_bits(32),
+ F64 => Size::from_bits(64),
+ Pointer => dl.pointer_size,
+ }
+ }
+
+ pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+ let dl = cx.data_layout();
+
+ match self {
+ Int(i, _) => i.align(dl),
+ F32 => dl.f32_align,
+ F64 => dl.f64_align,
+ Pointer => dl.pointer_align,
+ }
+ }
+
+ // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
+ #[inline]
+ pub fn is_float(self) -> bool {
+ matches!(self, F32 | F64)
+ }
+
+ // FIXME(eddyb) remove, it's completely unused.
+ #[inline]
+ pub fn is_int(self) -> bool {
+ matches!(self, Int(..))
+ }
+
+ #[inline]
+ pub fn is_ptr(self) -> bool {
+ matches!(self, Pointer)
+ }
+}
+
+/// Inclusive wrap-around range of valid values, that is, if
+/// start > end, it represents `start..=MAX`,
+/// followed by `0..=end`.
+///
+/// That is, for an i8 primitive, a range of `254..=2` means following
+/// sequence:
+///
+/// 254 (-2), 255 (-1), 0, 1, 2
+///
+/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub struct WrappingRange {
+ pub start: u128,
+ pub end: u128,
+}
+
+impl WrappingRange {
+ pub fn full(size: Size) -> Self {
+ Self { start: 0, end: size.unsigned_int_max() }
+ }
+
+ /// Returns `true` if `v` is contained in the range.
+ #[inline(always)]
+ pub fn contains(&self, v: u128) -> bool {
+ if self.start <= self.end {
+ self.start <= v && v <= self.end
+ } else {
+ self.start <= v || v <= self.end
+ }
+ }
+
+ /// Returns `self` with replaced `start`
+ #[inline(always)]
+ pub fn with_start(mut self, start: u128) -> Self {
+ self.start = start;
+ self
+ }
+
+ /// Returns `self` with replaced `end`
+ #[inline(always)]
+ pub fn with_end(mut self, end: u128) -> Self {
+ self.end = end;
+ self
+ }
+
+ /// Returns `true` if `size` completely fills the range.
+ #[inline]
+ pub fn is_full_for(&self, size: Size) -> bool {
+ let max_value = size.unsigned_int_max();
+ debug_assert!(self.start <= max_value && self.end <= max_value);
+ self.start == (self.end.wrapping_add(1) & max_value)
+ }
+}
+
+impl fmt::Debug for WrappingRange {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.start > self.end {
+ write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
+ } else {
+ write!(fmt, "{}..={}", self.start, self.end)?;
+ }
+ Ok(())
+ }
+}
+
+/// Information about one scalar component of a Rust type.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum Scalar {
+ Initialized {
+ value: Primitive,
+
+ // FIXME(eddyb) always use the shortest range, e.g., by finding
+ // the largest space between two consecutive valid values and
+ // taking everything else as the (shortest) valid range.
+ valid_range: WrappingRange,
+ },
+ Union {
+ /// Even for unions, we need to use the correct registers for the kind of
+ /// values inside the union, so we keep the `Primitive` type around. We
+ /// also use it to compute the size of the scalar.
+ /// However, unions never have niches and even allow undef,
+ /// so there is no `valid_range`.
+ value: Primitive,
+ },
+}
+
+impl Scalar {
+ #[inline]
+ pub fn is_bool(&self) -> bool {
+ matches!(
+ self,
+ Scalar::Initialized {
+ value: Int(I8, false),
+ valid_range: WrappingRange { start: 0, end: 1 }
+ }
+ )
+ }
+
+ /// Get the primitive representation of this type, ignoring the valid range and whether the
+ /// value is allowed to be undefined (due to being a union).
+ pub fn primitive(&self) -> Primitive {
+ match *self {
+ Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
+ }
+ }
+
+ pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
+ self.primitive().align(cx)
+ }
+
+ pub fn size(self, cx: &impl HasDataLayout) -> Size {
+ self.primitive().size(cx)
+ }
+
+ #[inline]
+ pub fn to_union(&self) -> Self {
+ Self::Union { value: self.primitive() }
+ }
+
+ #[inline]
+ pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
+ match *self {
+ Scalar::Initialized { valid_range, .. } => valid_range,
+ Scalar::Union { value } => WrappingRange::full(value.size(cx)),
+ }
+ }
+
+ #[inline]
+ /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+ pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
+ match self {
+ Scalar::Initialized { valid_range, .. } => valid_range,
+ Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
+ }
+ }
+
+ /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
+ #[inline]
+ pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
+ match *self {
+ Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
+ Scalar::Union { .. } => true,
+ }
+ }
+
+ /// Returns `true` if this type can be left uninit.
+ #[inline]
+ pub fn is_uninit_valid(&self) -> bool {
+ match *self {
+ Scalar::Initialized { .. } => false,
+ Scalar::Union { .. } => true,
+ }
+ }
+}
+
+/// Describes how the fields of a type are located in memory.
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum FieldsShape {
+ /// Scalar primitives and `!`, which never have fields.
+ Primitive,
+
+ /// All fields start at no offset. The `usize` is the field count.
+ Union(NonZeroUsize),
+
+ /// Array/vector-like placement, with all fields of identical types.
+ Array { stride: Size, count: u64 },
+
+ /// Struct-like placement, with precomputed offsets.
+ ///
+ /// Fields are guaranteed to not overlap, but note that gaps
+ /// before, between and after all the fields are NOT always
+ /// padding, and as such their contents may not be discarded.
+ /// For example, enum variants leave a gap at the start,
+ /// where the discriminant field in the enum layout goes.
+ Arbitrary {
+ /// Offsets for the first byte of each field,
+ /// ordered to match the source definition order.
+ /// This vector does not go in increasing order.
+ // FIXME(eddyb) use small vector optimization for the common case.
+ offsets: Vec<Size>,
+
+ /// Maps source order field indices to memory order indices,
+ /// depending on how the fields were reordered (if at all).
+ /// This is a permutation, with both the source order and the
+ /// memory order using the same (0..n) index ranges.
+ ///
+ /// Note that during computation of `memory_index`, sometimes
+ /// it is easier to operate on the inverse mapping (that is,
+ /// from memory order to source order), and that is usually
+ /// named `inverse_memory_index`.
+ ///
+ // FIXME(eddyb) build a better abstraction for permutations, if possible.
+ // FIXME(camlorn) also consider small vector optimization here.
+ memory_index: Vec<u32>,
+ },
+}
+
+impl FieldsShape {
+ #[inline]
+ pub fn count(&self) -> usize {
+ match *self {
+ FieldsShape::Primitive => 0,
+ FieldsShape::Union(count) => count.get(),
+ FieldsShape::Array { count, .. } => count.try_into().unwrap(),
+ FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
+ }
+ }
+
+ #[inline]
+ pub fn offset(&self, i: usize) -> Size {
+ match *self {
+ FieldsShape::Primitive => {
+ unreachable!("FieldsShape::offset: `Primitive`s have no fields")
+ }
+ FieldsShape::Union(count) => {
+ assert!(
+ i < count.get(),
+ "tried to access field {} of union with {} fields",
+ i,
+ count
+ );
+ Size::ZERO
+ }
+ FieldsShape::Array { stride, count } => {
+ let i = u64::try_from(i).unwrap();
+ assert!(i < count);
+ stride * i
+ }
+ FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
+ }
+ }
+
+ #[inline]
+ pub fn memory_index(&self, i: usize) -> usize {
+ match *self {
+ FieldsShape::Primitive => {
+ unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
+ }
+ FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+ FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
+ }
+ }
+
+ /// Gets source indices of the fields by increasing offsets.
+ #[inline]
+ pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
+ let mut inverse_small = [0u8; 64];
+ let mut inverse_big = vec![];
+ let use_small = self.count() <= inverse_small.len();
+
+ // We have to write this logic twice in order to keep the array small.
+ if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
+ if use_small {
+ for i in 0..self.count() {
+ inverse_small[memory_index[i] as usize] = i as u8;
+ }
+ } else {
+ inverse_big = vec![0; self.count()];
+ for i in 0..self.count() {
+ inverse_big[memory_index[i] as usize] = i as u32;
+ }
+ }
+ }
+
+ (0..self.count()).map(move |i| match *self {
+ FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+ FieldsShape::Arbitrary { .. } => {
+ if use_small {
+ inverse_small[i] as usize
+ } else {
+ inverse_big[i] as usize
+ }
+ }
+ })
+ }
+}
+
+/// An identifier that specifies the address space that some operation
+/// should operate on. Special address spaces have an effect on code generation,
+/// depending on the target and the address spaces it implements.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct AddressSpace(pub u32);
+
+impl AddressSpace {
+ /// The default address space, corresponding to data space.
+ pub const DATA: Self = AddressSpace(0);
+}
+
+/// Describes how values of the type are passed by target ABIs,
+/// in terms of categories of C types there are ABI rules for.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Abi {
+ Uninhabited,
+ Scalar(Scalar),
+ ScalarPair(Scalar, Scalar),
+ Vector {
+ element: Scalar,
+ count: u64,
+ },
+ Aggregate {
+ /// If true, the size is exact, otherwise it's only a lower bound.
+ sized: bool,
+ },
+}
+
+impl Abi {
+ /// Returns `true` if the layout corresponds to an unsized type.
+ #[inline]
+ pub fn is_unsized(&self) -> bool {
+ match *self {
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+ Abi::Aggregate { sized } => !sized,
+ }
+ }
+
+ /// Returns `true` if this is a single signed integer scalar
+ #[inline]
+ pub fn is_signed(&self) -> bool {
+ match self {
+ Abi::Scalar(scal) => match scal.primitive() {
+ Primitive::Int(_, signed) => signed,
+ _ => false,
+ },
+ _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
+ }
+ }
+
+ /// Returns `true` if this is an uninhabited type
+ #[inline]
+ pub fn is_uninhabited(&self) -> bool {
+ matches!(*self, Abi::Uninhabited)
+ }
+
+ /// Returns `true` is this is a scalar type
+ #[inline]
+ pub fn is_scalar(&self) -> bool {
+ matches!(*self, Abi::Scalar(_))
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct VariantIdx {
+ derive [HashStable_Generic]
+ }
+}
+
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Variants<'a> {
+ /// Single enum variants, structs/tuples, unions, and all non-ADTs.
+ Single { index: VariantIdx },
+
+ /// Enum-likes with more than one inhabited variant: each variant comes with
+ /// a *discriminant* (usually the same as the variant index but the user can
+ /// assign explicit discriminant values). That discriminant is encoded
+ /// as a *tag* on the machine. The layout of each variant is
+ /// a struct, and they all have space reserved for the tag.
+ /// For enums, the tag is the sole field of the layout.
+ Multiple {
+ tag: Scalar,
+ tag_encoding: TagEncoding,
+ tag_field: usize,
+ variants: IndexVec<VariantIdx, Layout<'a>>,
+ },
+}
+
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum TagEncoding {
+ /// The tag directly stores the discriminant, but possibly with a smaller layout
+ /// (so converting the tag to the discriminant can require sign extension).
+ Direct,
+
+ /// Niche (values invalid for a type) encoding the discriminant:
+ /// Discriminant and variant index coincide.
+ /// The variant `dataful_variant` contains a niche at an arbitrary
+ /// offset (field `tag_field` of the enum), which for a variant with
+ /// discriminant `d` is set to
+ /// `(d - niche_variants.start).wrapping_add(niche_start)`.
+ ///
+ /// For example, `Option<(usize, &T)>` is represented such that
+ /// `None` has a null pointer for the second tuple field, and
+ /// `Some` is the identity function (with a non-null reference).
+ Niche {
+ dataful_variant: VariantIdx,
+ niche_variants: RangeInclusive<VariantIdx>,
+ niche_start: u128,
+ },
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Niche {
+ pub offset: Size,
+ pub value: Primitive,
+ pub valid_range: WrappingRange,
+}
+
+impl Niche {
+ pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
+ let Scalar::Initialized { value, valid_range } = scalar else { return None };
+ let niche = Niche { offset, value, valid_range };
+ if niche.available(cx) > 0 { Some(niche) } else { None }
+ }
+
+ pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
+ let Self { value, valid_range: v, .. } = *self;
+ let size = value.size(cx);
+ assert!(size.bits() <= 128);
+ let max_value = size.unsigned_int_max();
+
+ // Find out how many values are outside the valid range.
+ let niche = v.end.wrapping_add(1)..v.start;
+ niche.end.wrapping_sub(niche.start) & max_value
+ }
+
+ pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
+ assert!(count > 0);
+
+ let Self { value, valid_range: v, .. } = *self;
+ let size = value.size(cx);
+ assert!(size.bits() <= 128);
+ let max_value = size.unsigned_int_max();
+
+ let niche = v.end.wrapping_add(1)..v.start;
+ let available = niche.end.wrapping_sub(niche.start) & max_value;
+ if count > available {
+ return None;
+ }
+
+ // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
+ // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
+ // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
+ // Having `None` in niche zero can enable some special optimizations.
+ //
+ // Bound selection criteria:
+ // 1. Select closest to zero given wrapping semantics.
+ // 2. Avoid moving past zero if possible.
+ //
+ // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
+ // If niche zero is already reserved, the selection of bounds are of little interest.
+ let move_start = |v: WrappingRange| {
+ let start = v.start.wrapping_sub(count) & max_value;
+ Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
+ };
+ let move_end = |v: WrappingRange| {
+ let start = v.end.wrapping_add(1) & max_value;
+ let end = v.end.wrapping_add(count) & max_value;
+ Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
+ };
+ let distance_end_zero = max_value - v.end;
+ if v.start > v.end {
+ // zero is unavailable because wrapping occurs
+ move_end(v)
+ } else if v.start <= distance_end_zero {
+ if count <= v.start {
+ move_start(v)
+ } else {
+ // moved past zero, use other bound
+ move_end(v)
+ }
+ } else {
+ let end = v.end.wrapping_add(count) & max_value;
+ let overshot_zero = (1..=v.end).contains(&end);
+ if overshot_zero {
+ // moved past zero, use other bound
+ move_start(v)
+ } else {
+ move_end(v)
+ }
+ }
+ }
+}
+
+#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
+pub struct LayoutS<'a> {
+ /// Says where the fields are located within the layout.
+ pub fields: FieldsShape,
+
+ /// Encodes information about multi-variant layouts.
+ /// Even with `Multiple` variants, a layout still has its own fields! Those are then
+ /// shared between all variants. One of them will be the discriminant,
+ /// but e.g. generators can have more.
+ ///
+ /// To access all fields of this layout, both `fields` and the fields of the active variant
+ /// must be taken into account.
+ pub variants: Variants<'a>,
+
+ /// The `abi` defines how this data is passed between functions, and it defines
+ /// value restrictions via `valid_range`.
+ ///
+ /// Note that this is entirely orthogonal to the recursive structure defined by
+ /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
+ /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
+ /// have to be taken into account to find all fields of this layout.
+ pub abi: Abi,
+
+ /// The leaf scalar with the largest number of invalid values
+ /// (i.e. outside of its `valid_range`), if it exists.
+ pub largest_niche: Option<Niche>,
+
+ pub align: AbiAndPrefAlign,
+ pub size: Size,
+}
+
+impl<'a> LayoutS<'a> {
+ pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
+ let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
+ let size = scalar.size(cx);
+ let align = scalar.align(cx);
+ LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Scalar(scalar),
+ largest_niche,
+ size,
+ align,
+ }
+ }
+}
+
+impl<'a> fmt::Debug for LayoutS<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This is how `Layout` used to print before it become
+ // `Interned<LayoutS>`. We print it like this to avoid having to update
+ // expected output in a lot of tests.
+ let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
+ f.debug_struct("Layout")
+ .field("size", size)
+ .field("align", align)
+ .field("abi", abi)
+ .field("fields", fields)
+ .field("largest_niche", largest_niche)
+ .field("variants", variants)
+ .finish()
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
+#[rustc_pass_by_value]
+pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+
+impl<'a> fmt::Debug for Layout<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // See comment on `<LayoutS as Debug>::fmt` above.
+ self.0.0.fmt(f)
+ }
+}
+
+impl<'a> Layout<'a> {
+ pub fn fields(self) -> &'a FieldsShape {
+ &self.0.0.fields
+ }
+
+ pub fn variants(self) -> &'a Variants<'a> {
+ &self.0.0.variants
+ }
+
+ pub fn abi(self) -> Abi {
+ self.0.0.abi
+ }
+
+ pub fn largest_niche(self) -> Option<Niche> {
+ self.0.0.largest_niche
+ }
+
+ pub fn align(self) -> AbiAndPrefAlign {
+ self.0.0.align
+ }
+
+ pub fn size(self) -> Size {
+ self.0.0.size
+ }
+}
+
+/// The layout of a type, alongside the type itself.
+/// Provides various type traversal APIs (e.g., recursing into fields).
+///
+/// Note that the layout is NOT guaranteed to always be identical
+/// to that obtained from `layout_of(ty)`, as we need to produce
+/// layouts for which Rust types do not exist, such as enum variants
+/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
+pub struct TyAndLayout<'a, Ty> {
+ pub ty: Ty,
+ pub layout: Layout<'a>,
+}
+
+impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
+ type Target = &'a LayoutS<'a>;
+ fn deref(&self) -> &&'a LayoutS<'a> {
+ &self.layout.0.0
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PointerKind {
+ /// Most general case, we know no restrictions to tell LLVM.
+ SharedMutable,
+
+ /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
+ Frozen,
+
+ /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
+ UniqueBorrowed,
+
+ /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
+ UniqueBorrowedPinned,
+
+ /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
+ /// nor `dereferenceable`.
+ UniqueOwned,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PointeeInfo {
+ pub size: Size,
+ pub align: Align,
+ pub safe: Option<PointerKind>,
+ pub address_space: AddressSpace,
+}
+
+/// Used in `might_permit_raw_init` to indicate the kind of initialisation
+/// that is checked to be valid
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitKind {
+ Zero,
+ Uninit,
+}
+
+/// Trait that needs to be implemented by the higher-level type representation
+/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
+pub trait TyAbiInterface<'a, C>: Sized {
+ fn ty_and_layout_for_variant(
+ this: TyAndLayout<'a, Self>,
+ cx: &C,
+ variant_index: VariantIdx,
+ ) -> TyAndLayout<'a, Self>;
+ fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
+ fn ty_and_layout_pointee_info_at(
+ this: TyAndLayout<'a, Self>,
+ cx: &C,
+ offset: Size,
+ ) -> Option<PointeeInfo>;
+ fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_never(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::ty_and_layout_for_variant(self, cx, variant_index)
+ }
+
+ pub fn field<C>(self, cx: &C, i: usize) -> Self
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::ty_and_layout_field(self, cx, i)
+ }
+
+ pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::ty_and_layout_pointee_info_at(self, cx, offset)
+ }
+
+ pub fn is_single_fp_element<C>(self, cx: &C) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ C: HasDataLayout,
+ {
+ match self.abi {
+ Abi::Scalar(scalar) => scalar.primitive().is_float(),
+ Abi::Aggregate { .. } => {
+ if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
+ self.field(cx, 0).is_single_fp_element(cx)
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+ }
+
+ pub fn is_adt<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_adt(self)
+ }
+
+ pub fn is_never<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_never(self)
+ }
+
+ pub fn is_tuple<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_tuple(self)
+ }
+
+ pub fn is_unit<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_unit(self)
+ }
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ /// Returns `true` if the layout corresponds to an unsized type.
+ pub fn is_unsized(&self) -> bool {
+ self.abi.is_unsized()
+ }
+
+ /// Returns `true` if the type is a ZST and not unsized.
+ pub fn is_zst(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+ Abi::Uninhabited => self.size.bytes() == 0,
+ Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
+ }
+ }
+
+ /// Determines if this type permits "raw" initialization by just transmuting some
+ /// memory into an instance of `T`.
+ ///
+ /// `init_kind` indicates if the memory is zero-initialized or left uninitialized.
+ ///
+ /// This code is intentionally conservative, and will not detect
+ /// * zero init of an enum whose 0 variant does not allow zero initialization
+ /// * making uninitialized types who have a full valid range (ints, floats, raw pointers)
+ /// * Any form of invalid value being made inside an array (unless the value is uninhabited)
+ ///
+ /// A strict form of these checks that uses const evaluation exists in
+ /// `rustc_const_eval::might_permit_raw_init`, and a tracking issue for making these checks
+ /// stricter is <https://github.com/rust-lang/rust/issues/66151>.
+ ///
+ /// FIXME: Once all the conservatism is removed from here, and the checks are ran by default,
+ /// we can use the const evaluation checks always instead.
+ pub fn might_permit_raw_init<C>(self, cx: &C, init_kind: InitKind) -> bool
+ where
+ Self: Copy,
+ Ty: TyAbiInterface<'a, C>,
+ C: HasDataLayout,
+ {
+ let scalar_allows_raw_init = move |s: Scalar| -> bool {
+ match init_kind {
+ InitKind::Zero => {
+ // The range must contain 0.
+ s.valid_range(cx).contains(0)
+ }
+ InitKind::Uninit => {
+ // The range must include all values.
+ s.is_always_valid(cx)
+ }
+ }
+ };
+
+ // Check the ABI.
+ let valid = match self.abi {
+ Abi::Uninhabited => false, // definitely UB
+ Abi::Scalar(s) => scalar_allows_raw_init(s),
+ Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
+ Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
+ Abi::Aggregate { .. } => true, // Fields are checked below.
+ };
+ if !valid {
+ // This is definitely not okay.
+ return false;
+ }
+
+ // If we have not found an error yet, we need to recursively descend into fields.
+ match &self.fields {
+ FieldsShape::Primitive | FieldsShape::Union { .. } => {}
+ FieldsShape::Array { .. } => {
+ // FIXME(#66151): For now, we are conservative and do not check arrays by default.
+ }
+ FieldsShape::Arbitrary { offsets, .. } => {
+ for idx in 0..offsets.len() {
+ if !self.field(cx, idx).might_permit_raw_init(cx, init_kind) {
+ // We found a field that is unhappy with this kind of initialization.
+ return false;
+ }
+ }
+ }
+ }
+
+ // FIXME(#66151): For now, we are conservative and do not check `self.variants`.
+ true
+ }
+}