summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_target/src/abi/call
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_target/src/abi/call
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_target/src/abi/call')
-rw-r--r--compiler/rustc_target/src/abi/call/aarch64.rs86
-rw-r--r--compiler/rustc_target/src/abi/call/amdgpu.rs35
-rw-r--r--compiler/rustc_target/src/abi/call/arm.rs97
-rw-r--r--compiler/rustc_target/src/abi/call/avr.rs59
-rw-r--r--compiler/rustc_target/src/abi/call/bpf.rs31
-rw-r--r--compiler/rustc_target/src/abi/call/hexagon.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/m68k.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/mips.rs51
-rw-r--r--compiler/rustc_target/src/abi/call/mips64.rs167
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs734
-rw-r--r--compiler/rustc_target/src/abi/call/msp430.rs39
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx.rs33
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx64.rs64
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc64.rs141
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs348
-rw-r--r--compiler/rustc_target/src/abi/call/s390x.rs57
-rw-r--r--compiler/rustc_target/src/abi/call/sparc.rs51
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs226
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs83
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs117
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs248
-rw-r--r--compiler/rustc_target/src/abi/call/x86_win64.rs40
23 files changed, 2797 insertions, 0 deletions
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
new file mode 100644
index 000000000..4613a459c
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -0,0 +1,86 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ let size = arg.layout.size;
+
+ // Ensure we have at most four uniquely addressable members.
+ if size > unit.size.checked_mul(4, cx).unwrap() {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => size.bits() == 64 || size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ return;
+ }
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
+ ret.cast_to(uniform);
+ return;
+ }
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ ret.cast_to(Uniform { unit: Reg::i64(), total: size });
+ return;
+ }
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(32);
+ return;
+ }
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
+ arg.cast_to(uniform);
+ return;
+ }
+ let size = arg.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ arg.cast_to(Uniform { unit: Reg::i64(), total: size });
+ return;
+ }
+ arg.make_indirect();
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/amdgpu.rs b/compiler/rustc_target/src/abi/call/amdgpu.rs
new file mode 100644
index 000000000..9be97476c
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/amdgpu.rs
@@ -0,0 +1,35 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<'a, Ty, C>(_cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ ret.extend_integer_width_to(32);
+}
+
+fn classify_arg<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.extend_integer_width_to(32);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/arm.rs b/compiler/rustc_target/src/abi/call/arm.rs
new file mode 100644
index 000000000..e66c2132b
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/arm.rs
@@ -0,0 +1,97 @@
+use crate::abi::call::{ArgAbi, Conv, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ let size = arg.layout.size;
+
+ // Ensure we have at most four uniquely addressable members.
+ if size > unit.size.checked_mul(4, cx).unwrap() {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => size.bits() == 64 || size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, vfp: bool)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ return;
+ }
+
+ if vfp {
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
+ ret.cast_to(uniform);
+ return;
+ }
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 32 {
+ ret.cast_to(Uniform { unit: Reg::i32(), total: size });
+ return;
+ }
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, vfp: bool)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(32);
+ return;
+ }
+
+ if vfp {
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
+ arg.cast_to(uniform);
+ return;
+ }
+ }
+
+ let align = arg.layout.align.abi.bytes();
+ let total = arg.layout.size;
+ arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ // If this is a target with a hard-float ABI, and the function is not explicitly
+ // `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
+ let vfp = cx.target_spec().llvm_target.ends_with("hf")
+ && fn_abi.conv != Conv::ArmAapcs
+ && !fn_abi.c_variadic;
+
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, vfp);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, vfp);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/avr.rs b/compiler/rustc_target/src/abi/call/avr.rs
new file mode 100644
index 000000000..c1f7a1e3a
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/avr.rs
@@ -0,0 +1,59 @@
+//! LLVM-frontend specific AVR calling convention implementation.
+//!
+//! # Current calling convention ABI
+//!
+//! Inherited from Clang's `clang::DefaultABIInfo` implementation - self described
+//! as
+//!
+//! > the default implementation for ABI specific details. This implementation
+//! > provides information which results in
+//! > self-consistent and sensible LLVM IR generation, but does not
+//! > conform to any particular ABI.
+//! >
+//! > - Doxygen Doxumentation of `clang::DefaultABIInfo`
+//!
+//! This calling convention may not match AVR-GCC in all cases.
+//!
+//! In the future, an AVR-GCC compatible argument classification ABI should be
+//! adopted in both Rust and Clang.
+//!
+//! *NOTE*: Currently, this module implements the same calling convention
+//! that clang with AVR currently does - the default, simple, unspecialized
+//! ABI implementation available to all targets. This ABI is not
+//! binary-compatible with AVR-GCC. Once LLVM [PR46140](https://bugs.llvm.org/show_bug.cgi?id=46140)
+//! is completed, this module should be updated to match so that both Clang
+//! and Rust emit code to the same AVR-GCC compatible ABI.
+//!
+//! In particular, both Clang and Rust may not have the same semantics
+//! when promoting arguments to indirect references as AVR-GCC. It is important
+//! to note that the core AVR ABI implementation within LLVM itself is ABI
+//! compatible with AVR-GCC - Rust and AVR-GCC only differ in the small amount
+//! of compiler frontend specific calling convention logic implemented here.
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret_ty<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect();
+ }
+}
+
+pub fn compute_abi_info<Ty>(fty: &mut FnAbi<'_, Ty>) {
+ if !fty.ret.is_ignore() {
+ classify_ret_ty(&mut fty.ret);
+ }
+
+ for arg in &mut fty.args {
+ if arg.is_ignore() {
+ continue;
+ }
+
+ classify_arg_ty(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/bpf.rs b/compiler/rustc_target/src/abi/call/bpf.rs
new file mode 100644
index 000000000..466c52553
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/bpf.rs
@@ -0,0 +1,31 @@
+// see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/BPF/BPFCallingConv.td
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() || ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() || arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/hexagon.rs b/compiler/rustc_target/src/abi/call/hexagon.rs
new file mode 100644
index 000000000..8028443b8
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/hexagon.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
new file mode 100644
index 000000000..58fdc00b6
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect_byval();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mips.rs b/compiler/rustc_target/src/abi/call/mips.rs
new file mode 100644
index 000000000..cc4431976
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mips.rs
@@ -0,0 +1,51 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{HasDataLayout, Size};
+
+fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ } else {
+ ret.make_indirect();
+ *offset += cx.data_layout().pointer_size;
+ }
+}
+
+fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+
+ if arg.layout.is_aggregate() {
+ arg.cast_to(Uniform { unit: Reg::i32(), total: size });
+ if !offset.is_aligned(align) {
+ arg.pad_with(Reg::i32());
+ }
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+
+ *offset = offset.align_to(align) + size.align_to(align);
+}
+
+pub fn compute_abi_info<Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
+where
+ C: HasDataLayout,
+{
+ let mut offset = Size::ZERO;
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, &mut offset);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, &mut offset);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs
new file mode 100644
index 000000000..cd54167aa
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mips64.rs
@@ -0,0 +1,167 @@
+use crate::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Reg, Uniform,
+};
+use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
+
+fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
+ // Always sign extend u32 values on 64-bit mips
+ if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, signed) = scalar.primitive() {
+ if !signed && i.size().bits() == 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(bits);
+}
+
+fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ match ret.layout.field(cx, i).abi {
+ abi::Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::F32 => Some(Reg::f32()),
+ abi::F64 => Some(Reg::f64()),
+ _ => None,
+ },
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ extend_integer_width_mips(ret, 64);
+ return;
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
+ // use of float registers to structures (not unions) containing exactly one or two
+ // float fields.
+
+ if let abi::FieldsShape::Arbitrary { .. } = ret.layout.fields {
+ if ret.layout.fields.count() == 1 {
+ if let Some(reg) = float_reg(cx, ret, 0) {
+ ret.cast_to(reg);
+ return;
+ }
+ } else if ret.layout.fields.count() == 2 {
+ if let Some(reg0) = float_reg(cx, ret, 0) {
+ if let Some(reg1) = float_reg(cx, ret, 1) {
+ ret.cast_to(CastTarget::pair(reg0, reg1));
+ return;
+ }
+ }
+ }
+ }
+
+ // Cast to a uniform int structure
+ ret.cast_to(Uniform { unit: Reg::i64(), total: size });
+ } else {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ extend_integer_width_mips(arg, 64);
+ return;
+ }
+
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let mut prefix = [None; 8];
+ let mut prefix_index = 0;
+
+ match arg.layout.fields {
+ abi::FieldsShape::Primitive => unreachable!(),
+ abi::FieldsShape::Array { .. } => {
+ // Arrays are passed indirectly
+ arg.make_indirect();
+ return;
+ }
+ abi::FieldsShape::Union(_) => {
+ // Unions and are always treated as a series of 64-bit integer chunks
+ }
+ abi::FieldsShape::Arbitrary { .. } => {
+ // Structures are split up into a series of 64-bit integer chunks, but any aligned
+ // doubles not part of another aggregate are passed as floats.
+ let mut last_offset = Size::ZERO;
+
+ for i in 0..arg.layout.fields.count() {
+ let field = arg.layout.field(cx, i);
+ let offset = arg.layout.fields.offset(i);
+
+ // We only care about aligned doubles
+ if let abi::Abi::Scalar(scalar) = field.abi {
+ if let abi::F64 = scalar.primitive() {
+ if offset.is_aligned(dl.f64_align.abi) {
+ // Insert enough integers to cover [last_offset, offset)
+ assert!(last_offset.is_aligned(dl.f64_align.abi));
+ for _ in 0..((offset - last_offset).bits() / 64)
+ .min((prefix.len() - prefix_index) as u64)
+ {
+ prefix[prefix_index] = Some(Reg::i64());
+ prefix_index += 1;
+ }
+
+ if prefix_index == prefix.len() {
+ break;
+ }
+
+ prefix[prefix_index] = Some(Reg::f64());
+ prefix_index += 1;
+ last_offset = offset + Reg::f64().size;
+ }
+ }
+ }
+ }
+ }
+ };
+
+ // Extract first 8 chunks as the prefix
+ let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
+ arg.cast_to(CastTarget {
+ prefix,
+ rest: Uniform { unit: Reg::i64(), total: rest_size },
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
new file mode 100644
index 000000000..577126a95
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -0,0 +1,734 @@
+use crate::abi::{self, Abi, Align, FieldsShape, Size};
+use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
+use crate::spec::{self, HasTargetSpec};
+use rustc_span::Symbol;
+use std::fmt;
+
+mod aarch64;
+mod amdgpu;
+mod arm;
+mod avr;
+mod bpf;
+mod hexagon;
+mod m68k;
+mod mips;
+mod mips64;
+mod msp430;
+mod nvptx;
+mod nvptx64;
+mod powerpc;
+mod powerpc64;
+mod riscv;
+mod s390x;
+mod sparc;
+mod sparc64;
+mod wasm;
+mod x86;
+mod x86_64;
+mod x86_win64;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum PassMode {
+ /// Ignore the argument.
+ ///
+ /// The argument is either uninhabited or a ZST.
+ Ignore,
+ /// Pass the argument directly.
+ ///
+ /// The argument has a layout abi of `Scalar`, `Vector` or in rare cases `Aggregate`.
+ Direct(ArgAttributes),
+ /// Pass a pair's elements directly in two arguments.
+ ///
+ /// The argument has a layout abi of `ScalarPair`.
+ Pair(ArgAttributes, ArgAttributes),
+ /// Pass the argument after casting it, to either
+ /// a single uniform or a pair of registers.
+ Cast(CastTarget),
+ /// Pass the argument indirectly via a hidden pointer.
+ /// The `extra_attrs` value, if any, is for the extra data (vtable or length)
+ /// which indicates that it refers to an unsized rvalue.
+ /// `on_stack` defines that the the value should be passed at a fixed
+ /// stack offset in accordance to the ABI rather than passed using a
+ /// pointer. This corresponds to the `byval` LLVM argument attribute.
+ Indirect { attrs: ArgAttributes, extra_attrs: Option<ArgAttributes>, on_stack: bool },
+}
+
+// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
+// of this module
+pub use attr_impl::ArgAttribute;
+
+#[allow(non_upper_case_globals)]
+#[allow(unused)]
+mod attr_impl {
+ // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
+ bitflags::bitflags! {
+ #[derive(Default, HashStable_Generic)]
+ pub struct ArgAttribute: u16 {
+ const NoAlias = 1 << 1;
+ const NoCapture = 1 << 2;
+ const NonNull = 1 << 3;
+ const ReadOnly = 1 << 4;
+ const InReg = 1 << 5;
+ // Due to past miscompiles in LLVM, we use a separate attribute for
+ // &mut arguments, so that the codegen backend can decide whether
+ // or not to actually emit the attribute. It can also be controlled
+ // with the `-Zmutable-noalias` debugging option.
+ const NoAliasMutRef = 1 << 6;
+ const NoUndef = 1 << 7;
+ }
+ }
+}
+
+/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
+/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
+/// not necessary to extend the argument, this enum is ignored.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum ArgExtension {
+ None,
+ Zext,
+ Sext,
+}
+
+/// A compact representation of LLVM attributes (at least those relevant for this module)
+/// that can be manipulated without interacting with LLVM's Attribute machinery.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct ArgAttributes {
+ pub regular: ArgAttribute,
+ pub arg_ext: ArgExtension,
+ /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
+ /// (corresponding to LLVM's dereferenceable and dereferenceable_or_null attributes).
+ pub pointee_size: Size,
+ pub pointee_align: Option<Align>,
+}
+
+impl ArgAttributes {
+ pub fn new() -> Self {
+ ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ }
+ }
+
+ pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
+ assert!(
+ self.arg_ext == ArgExtension::None || self.arg_ext == ext,
+ "cannot set {:?} when {:?} is already set",
+ ext,
+ self.arg_ext
+ );
+ self.arg_ext = ext;
+ self
+ }
+
+ pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
+ self.regular |= attr;
+ self
+ }
+
+ pub fn contains(&self, attr: ArgAttribute) -> bool {
+ self.regular.contains(attr)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum RegKind {
+ Integer,
+ Float,
+ Vector,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Reg {
+ pub kind: RegKind,
+ pub size: Size,
+}
+
+macro_rules! reg_ctor {
+ ($name:ident, $kind:ident, $bits:expr) => {
+ pub fn $name() -> Reg {
+ Reg { kind: RegKind::$kind, size: Size::from_bits($bits) }
+ }
+ };
+}
+
+impl Reg {
+ reg_ctor!(i8, Integer, 8);
+ reg_ctor!(i16, Integer, 16);
+ reg_ctor!(i32, Integer, 32);
+ reg_ctor!(i64, Integer, 64);
+ reg_ctor!(i128, Integer, 128);
+
+ reg_ctor!(f32, Float, 32);
+ reg_ctor!(f64, Float, 64);
+}
+
+impl Reg {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ let dl = cx.data_layout();
+ match self.kind {
+ RegKind::Integer => match self.size.bits() {
+ 1 => dl.i1_align.abi,
+ 2..=8 => dl.i8_align.abi,
+ 9..=16 => dl.i16_align.abi,
+ 17..=32 => dl.i32_align.abi,
+ 33..=64 => dl.i64_align.abi,
+ 65..=128 => dl.i128_align.abi,
+ _ => panic!("unsupported integer: {:?}", self),
+ },
+ RegKind::Float => match self.size.bits() {
+ 32 => dl.f32_align.abi,
+ 64 => dl.f64_align.abi,
+ _ => panic!("unsupported float: {:?}", self),
+ },
+ RegKind::Vector => dl.vector_align(self.size).abi,
+ }
+ }
+}
+
+/// An argument passed entirely registers with the
+/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Uniform {
+ pub unit: Reg,
+
+ /// The total size of the argument, which can be:
+ /// * equal to `unit.size` (one scalar/vector),
+ /// * a multiple of `unit.size` (an array of scalar/vectors),
+ /// * if `unit.kind` is `Integer`, the last element
+ /// can be shorter, i.e., `{ i64, i64, i32 }` for
+ /// 64-bit integers with a total size of 20 bytes.
+ pub total: Size,
+}
+
+impl From<Reg> for Uniform {
+ fn from(unit: Reg) -> Uniform {
+ Uniform { unit, total: unit.size }
+ }
+}
+
+impl Uniform {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ self.unit.align(cx)
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct CastTarget {
+ pub prefix: [Option<Reg>; 8],
+ pub rest: Uniform,
+ pub attrs: ArgAttributes,
+}
+
+impl From<Reg> for CastTarget {
+ fn from(unit: Reg) -> CastTarget {
+ CastTarget::from(Uniform::from(unit))
+ }
+}
+
+impl From<Uniform> for CastTarget {
+ fn from(uniform: Uniform) -> CastTarget {
+ CastTarget {
+ prefix: [None; 8],
+ rest: uniform,
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ }
+ }
+}
+
+impl CastTarget {
+ pub fn pair(a: Reg, b: Reg) -> CastTarget {
+ CastTarget {
+ prefix: [Some(a), None, None, None, None, None, None, None],
+ rest: Uniform::from(b),
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ }
+ }
+
+ pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
+ let mut size = self.rest.total;
+ for i in 0..self.prefix.iter().count() {
+ match self.prefix[i] {
+ Some(v) => size += Size { raw: v.size.bytes() },
+ None => {}
+ }
+ }
+ return size;
+ }
+
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ self.prefix
+ .iter()
+ .filter_map(|x| x.map(|reg| reg.align(cx)))
+ .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
+ acc.max(align)
+ })
+ }
+}
+
+/// Return value from the `homogeneous_aggregate` test function.
+#[derive(Copy, Clone, Debug)]
+pub enum HomogeneousAggregate {
+ /// Yes, all the "leaf fields" of this struct are passed in the
+ /// same way (specified in the `Reg` value).
+ Homogeneous(Reg),
+
+ /// There are no leaf fields at all.
+ NoData,
+}
+
+/// Error from the `homogeneous_aggregate` test function, indicating
+/// there are distinct leaf fields passed in different ways,
+/// or this is uninhabited.
+#[derive(Copy, Clone, Debug)]
+pub struct Heterogeneous;
+
+impl HomogeneousAggregate {
+ /// If this is a homogeneous aggregate, returns the homogeneous
+ /// unit, else `None`.
+ pub fn unit(self) -> Option<Reg> {
+ match self {
+ HomogeneousAggregate::Homogeneous(reg) => Some(reg),
+ HomogeneousAggregate::NoData => None,
+ }
+ }
+
+ /// Try to combine two `HomogeneousAggregate`s, e.g. from two fields in
+ /// the same `struct`. Only succeeds if only one of them has any data,
+ /// or both units are identical.
+ fn merge(self, other: HomogeneousAggregate) -> Result<HomogeneousAggregate, Heterogeneous> {
+ match (self, other) {
+ (x, HomogeneousAggregate::NoData) | (HomogeneousAggregate::NoData, x) => Ok(x),
+
+ (HomogeneousAggregate::Homogeneous(a), HomogeneousAggregate::Homogeneous(b)) => {
+ if a != b {
+ return Err(Heterogeneous);
+ }
+ Ok(self)
+ }
+ }
+ }
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ fn is_aggregate(&self) -> bool {
+ match self.abi {
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
+ }
+ }
+
+ /// Returns `Homogeneous` if this layout is an aggregate containing fields of
+ /// only a single type (e.g., `(u32, u32)`). Such aggregates are often
+ /// special-cased in ABIs.
+ ///
+ /// Note: We generally ignore fields of zero-sized type when computing
+ /// this value (see #56877).
+ ///
+ /// This is public so that it can be used in unit tests, but
+ /// should generally only be relevant to the ABI details of
+ /// specific targets.
+ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, Heterogeneous>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ {
+ match self.abi {
+ Abi::Uninhabited => Err(Heterogeneous),
+
+ // The primitive for this algorithm.
+ Abi::Scalar(scalar) => {
+ let kind = match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => RegKind::Integer,
+ abi::F32 | abi::F64 => RegKind::Float,
+ };
+ Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
+ }
+
+ Abi::Vector { .. } => {
+ assert!(!self.is_zst());
+ Ok(HomogeneousAggregate::Homogeneous(Reg {
+ kind: RegKind::Vector,
+ size: self.size,
+ }))
+ }
+
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ // Helper for computing `homogeneous_aggregate`, allowing a custom
+ // starting offset (used below for handling variants).
+ let from_fields_at =
+ |layout: Self,
+ start: Size|
+ -> Result<(HomogeneousAggregate, Size), Heterogeneous> {
+ let is_union = match layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Array { count, .. } => {
+ assert_eq!(start, Size::ZERO);
+
+ let result = if count > 0 {
+ layout.field(cx, 0).homogeneous_aggregate(cx)?
+ } else {
+ HomogeneousAggregate::NoData
+ };
+ return Ok((result, layout.size));
+ }
+ FieldsShape::Union(_) => true,
+ FieldsShape::Arbitrary { .. } => false,
+ };
+
+ let mut result = HomogeneousAggregate::NoData;
+ let mut total = start;
+
+ for i in 0..layout.fields.count() {
+ if !is_union && total != layout.fields.offset(i) {
+ return Err(Heterogeneous);
+ }
+
+ let field = layout.field(cx, i);
+
+ result = result.merge(field.homogeneous_aggregate(cx)?)?;
+
+ // Keep track of the offset (without padding).
+ let size = field.size;
+ if is_union {
+ total = total.max(size);
+ } else {
+ total += size;
+ }
+ }
+
+ Ok((result, total))
+ };
+
+ let (mut result, mut total) = from_fields_at(*self, Size::ZERO)?;
+
+ match &self.variants {
+ abi::Variants::Single { .. } => {}
+ abi::Variants::Multiple { variants, .. } => {
+ // Treat enum variants like union members.
+ // HACK(eddyb) pretend the `enum` field (discriminant)
+ // is at the start of every variant (otherwise the gap
+ // at the start of all variants would disqualify them).
+ //
+ // NB: for all tagged `enum`s (which include all non-C-like
+ // `enum`s with defined FFI representation), this will
+ // match the homogeneous computation on the equivalent
+ // `struct { tag; union { variant1; ... } }` and/or
+ // `union { struct { tag; variant1; } ... }`
+ // (the offsets of variant fields should be identical
+ // between the two for either to be a homogeneous aggregate).
+ let variant_start = total;
+ for variant_idx in variants.indices() {
+ let (variant_result, variant_total) =
+ from_fields_at(self.for_variant(cx, variant_idx), variant_start)?;
+
+ result = result.merge(variant_result)?;
+ total = total.max(variant_total);
+ }
+ }
+ }
+
+ // There needs to be no padding.
+ if total != self.size {
+ Err(Heterogeneous)
+ } else {
+ match result {
+ HomogeneousAggregate::Homogeneous(_) => {
+ assert_ne!(total, Size::ZERO);
+ }
+ HomogeneousAggregate::NoData => {
+ assert_eq!(total, Size::ZERO);
+ }
+ }
+ Ok(result)
+ }
+ }
+ }
+ }
+}
+
+/// Information about how to pass an argument to,
+/// or return a value from, a function, under some ABI.
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct ArgAbi<'a, Ty> {
+ pub layout: TyAndLayout<'a, Ty>,
+
+ /// Dummy argument, which is emitted before the real argument.
+ pub pad: Option<Reg>,
+
+ pub mode: PassMode,
+}
+
+impl<'a, Ty> ArgAbi<'a, Ty> {
+ pub fn new(
+ cx: &impl HasDataLayout,
+ layout: TyAndLayout<'a, Ty>,
+ scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
+ ) -> Self {
+ let mode = match layout.abi {
+ Abi::Uninhabited => PassMode::Ignore,
+ Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
+ Abi::ScalarPair(a, b) => PassMode::Pair(
+ scalar_attrs(&layout, a, Size::ZERO),
+ scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
+ ),
+ Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+ Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
+ };
+ ArgAbi { layout, pad: None, mode }
+ }
+
+ fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
+ let mut attrs = ArgAttributes::new();
+
+ // For non-immediate arguments the callee gets its own copy of
+ // the value on the stack, so there are no aliases. It's also
+ // program-invisible so can't possibly capture
+ attrs
+ .set(ArgAttribute::NoAlias)
+ .set(ArgAttribute::NoCapture)
+ .set(ArgAttribute::NonNull)
+ .set(ArgAttribute::NoUndef);
+ attrs.pointee_size = layout.size;
+ // FIXME(eddyb) We should be doing this, but at least on
+ // i686-pc-windows-msvc, it results in wrong stack offsets.
+ // attrs.pointee_align = Some(layout.align.abi);
+
+ let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
+
+ PassMode::Indirect { attrs, extra_attrs, on_stack: false }
+ }
+
+ pub fn make_indirect(&mut self) {
+ match self.mode {
+ PassMode::Direct(_) | PassMode::Pair(_, _) => {}
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: false } => return,
+ _ => panic!("Tried to make {:?} indirect", self.mode),
+ }
+
+ self.mode = Self::indirect_pass_mode(&self.layout);
+ }
+
+ pub fn make_indirect_byval(&mut self) {
+ self.make_indirect();
+ match self.mode {
+ PassMode::Indirect { attrs: _, extra_attrs: _, ref mut on_stack } => {
+ *on_stack = true;
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ pub fn extend_integer_width_to(&mut self, bits: u64) {
+ // Only integers have signedness
+ if let Abi::Scalar(scalar) = self.layout.abi {
+ if let abi::Int(i, signed) = scalar.primitive() {
+ if i.size().bits() < bits {
+ if let PassMode::Direct(ref mut attrs) = self.mode {
+ if signed {
+ attrs.ext(ArgExtension::Sext)
+ } else {
+ attrs.ext(ArgExtension::Zext)
+ };
+ }
+ }
+ }
+ }
+ }
+
+ pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
+ self.mode = PassMode::Cast(target.into());
+ }
+
+ pub fn pad_with(&mut self, reg: Reg) {
+ self.pad = Some(reg);
+ }
+
+ pub fn is_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { .. })
+ }
+
+ pub fn is_sized_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ })
+ }
+
+ pub fn is_unsized_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ })
+ }
+
+ pub fn is_ignore(&self) -> bool {
+ matches!(self.mode, PassMode::Ignore)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Conv {
+ // General language calling conventions, for which every target
+ // should have its own backend (e.g. LLVM) support.
+ C,
+ Rust,
+
+ /// For things unlikely to be called, where smaller caller codegen is
+ /// preferred over raw speed.
+ /// Stronger than just `#[cold]` because `fn` pointers might be incompatible.
+ RustCold,
+
+ // Target-specific calling conventions.
+ ArmAapcs,
+ CCmseNonSecureCall,
+
+ Msp430Intr,
+
+ PtxKernel,
+
+ X86Fastcall,
+ X86Intr,
+ X86Stdcall,
+ X86ThisCall,
+ X86VectorCall,
+
+ X86_64SysV,
+ X86_64Win64,
+
+ AmdGpuKernel,
+ AvrInterrupt,
+ AvrNonBlockingInterrupt,
+}
+
+/// Metadata describing how the arguments to a native function
+/// should be passed in order to respect the native ABI.
+///
+/// I will do my best to describe this structure, but these
+/// comments are reverse-engineered and may be inaccurate. -NDM
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct FnAbi<'a, Ty> {
+ /// The LLVM types of each argument.
+ pub args: Vec<ArgAbi<'a, Ty>>,
+
+ /// LLVM return type.
+ pub ret: ArgAbi<'a, Ty>,
+
+ pub c_variadic: bool,
+
+ /// The count of non-variadic arguments.
+ ///
+ /// Should only be different from args.len() when c_variadic is true.
+ /// This can be used to know whether an argument is variadic or not.
+ pub fixed_count: usize,
+
+ pub conv: Conv,
+
+ pub can_unwind: bool,
+}
+
+/// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum AdjustForForeignAbiError {
+ /// Target architecture doesn't support "foreign" (i.e. non-Rust) ABIs.
+ Unsupported { arch: Symbol, abi: spec::abi::Abi },
+}
+
+impl fmt::Display for AdjustForForeignAbiError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Unsupported { arch, abi } => {
+ write!(f, "target architecture {:?} does not support `extern {}` ABI", arch, abi)
+ }
+ }
+ }
+}
+
+impl<'a, Ty> FnAbi<'a, Ty> {
+ pub fn adjust_for_foreign_abi<C>(
+ &mut self,
+ cx: &C,
+ abi: spec::abi::Abi,
+ ) -> Result<(), AdjustForForeignAbiError>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+ {
+ if abi == spec::abi::Abi::X86Interrupt {
+ if let Some(arg) = self.args.first_mut() {
+ arg.make_indirect_byval();
+ }
+ return Ok(());
+ }
+
+ match &cx.target_spec().arch[..] {
+ "x86" => {
+ let flavor = if let spec::abi::Abi::Fastcall { .. }
+ | spec::abi::Abi::Vectorcall { .. } = abi
+ {
+ x86::Flavor::FastcallOrVectorcall
+ } else {
+ x86::Flavor::General
+ };
+ x86::compute_abi_info(cx, self, flavor);
+ }
+ "x86_64" => match abi {
+ spec::abi::Abi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
+ spec::abi::Abi::Win64 { .. } => x86_win64::compute_abi_info(self),
+ _ => {
+ if cx.target_spec().is_like_windows {
+ x86_win64::compute_abi_info(self)
+ } else {
+ x86_64::compute_abi_info(cx, self)
+ }
+ }
+ },
+ "aarch64" => aarch64::compute_abi_info(cx, self),
+ "amdgpu" => amdgpu::compute_abi_info(cx, self),
+ "arm" => arm::compute_abi_info(cx, self),
+ "avr" => avr::compute_abi_info(self),
+ "m68k" => m68k::compute_abi_info(self),
+ "mips" => mips::compute_abi_info(cx, self),
+ "mips64" => mips64::compute_abi_info(cx, self),
+ "powerpc" => powerpc::compute_abi_info(self),
+ "powerpc64" => powerpc64::compute_abi_info(cx, self),
+ "s390x" => s390x::compute_abi_info(cx, self),
+ "msp430" => msp430::compute_abi_info(self),
+ "sparc" => sparc::compute_abi_info(cx, self),
+ "sparc64" => sparc64::compute_abi_info(cx, self),
+ "nvptx" => nvptx::compute_abi_info(self),
+ "nvptx64" => {
+ if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::PtxKernel {
+ nvptx64::compute_ptx_kernel_abi_info(cx, self)
+ } else {
+ nvptx64::compute_abi_info(self)
+ }
+ }
+ "hexagon" => hexagon::compute_abi_info(self),
+ "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
+ "wasm32" | "wasm64" => {
+ if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::Wasm {
+ wasm::compute_wasm_abi_info(self)
+ } else {
+ wasm::compute_c_abi_info(cx, self)
+ }
+ }
+ "asmjs" => wasm::compute_c_abi_info(cx, self),
+ "bpf" => bpf::compute_abi_info(self),
+ arch => {
+ return Err(AdjustForForeignAbiError::Unsupported {
+ arch: Symbol::intern(arch),
+ abi,
+ });
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/msp430.rs b/compiler/rustc_target/src/abi/call/msp430.rs
new file mode 100644
index 000000000..0ba73657b
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/msp430.rs
@@ -0,0 +1,39 @@
+// Reference: MSP430 Embedded Application Binary Interface
+// https://www.ti.com/lit/an/slaa534a/slaa534a.pdf
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+// 3.5 Structures or Unions Passed and Returned by Reference
+//
+// "Structures (including classes) and unions larger than 32 bits are passed and
+// returned by reference. To pass a structure or union by reference, the caller
+// places its address in the appropriate location: either in a register or on
+// the stack, according to its position in the argument list. (..)"
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(16);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(16);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/nvptx.rs b/compiler/rustc_target/src/abi/call/nvptx.rs
new file mode 100644
index 000000000..428dd95bb
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/nvptx.rs
@@ -0,0 +1,33 @@
+// Reference: PTX Writer's Guide to Interoperability
+// https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/nvptx64.rs b/compiler/rustc_target/src/abi/call/nvptx64.rs
new file mode 100644
index 000000000..fc16f1c97
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/nvptx64.rs
@@ -0,0 +1,64 @@
+use crate::abi::call::{ArgAbi, FnAbi, PassMode, Reg, Size, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ }
+}
+
+fn classify_arg_kernel<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if matches!(arg.mode, PassMode::Pair(..)) && (arg.layout.is_adt() || arg.layout.is_tuple()) {
+ let align_bytes = arg.layout.align.abi.bytes();
+
+ let unit = match align_bytes {
+ 1 => Reg::i8(),
+ 2 => Reg::i16(),
+ 4 => Reg::i32(),
+ 8 => Reg::i64(),
+ 16 => Reg::i128(),
+ _ => unreachable!("Align is given as power of 2 no larger than 16 bytes"),
+ };
+ arg.cast_to(Uniform { unit, total: Size::from_bytes(2 * align_bytes) });
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
+
+pub fn compute_ptx_kernel_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.layout.is_unit() && !fn_abi.ret.layout.is_never() {
+ panic!("Kernels should not return anything other than () or !");
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg_kernel(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/powerpc.rs b/compiler/rustc_target/src/abi/call/powerpc.rs
new file mode 100644
index 000000000..27a5c6d2f
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/powerpc.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
new file mode 100644
index 000000000..c22ef9c8f
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -0,0 +1,141 @@
+// FIXME:
+// Alignment of 128 bit types is not currently handled, this will
+// need to be fixed when PowerPC vector support is added.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{Endian, HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum ABI {
+ ELFv1, // original ABI used for powerpc64 (big-endian)
+ ELFv2, // newer ABI used for powerpc64le and musl (both endians)
+}
+use ABI::*;
+
+fn is_homogeneous_aggregate<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ abi: ABI,
+) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ // ELFv1 only passes one-member aggregates transparently.
+ // ELFv2 passes up to eight uniquely addressable members.
+ if (abi == ELFv1 && arg.layout.size > unit.size)
+ || arg.layout.size > unit.size.checked_mul(8, cx).unwrap()
+ {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => arg.layout.size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: arg.layout.size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, abi: ABI)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(64);
+ return;
+ }
+
+ // The ELFv1 ABI doesn't return aggregates in registers
+ if abi == ELFv1 {
+ ret.make_indirect();
+ return;
+ }
+
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret, abi) {
+ ret.cast_to(uniform);
+ return;
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ let unit = if cx.data_layout().endian == Endian::Big {
+ Reg { kind: RegKind::Integer, size }
+ } else if bits <= 8 {
+ Reg::i8()
+ } else if bits <= 16 {
+ Reg::i16()
+ } else if bits <= 32 {
+ Reg::i32()
+ } else {
+ Reg::i64()
+ };
+
+ ret.cast_to(Uniform { unit, total: size });
+ return;
+ }
+
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg, abi) {
+ arg.cast_to(uniform);
+ return;
+ }
+
+ let size = arg.layout.size;
+ let (unit, total) = if size.bits() <= 64 {
+ // Aggregates smaller than a doubleword should appear in
+ // the least-significant bits of the parameter doubleword.
+ (Reg { kind: RegKind::Integer, size }, size)
+ } else {
+ // Aggregates larger than a doubleword should be padded
+ // at the tail to fill out a whole number of doublewords.
+ let reg_i64 = Reg::i64();
+ (reg_i64, size.align_to(reg_i64.align(cx)))
+ };
+
+ arg.cast_to(Uniform { unit, total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let abi = if cx.target_spec().env == "musl" {
+ ELFv2
+ } else {
+ match cx.data_layout().endian {
+ Endian::Big => ELFv1,
+ Endian::Little => ELFv2,
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, abi);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, abi);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
new file mode 100644
index 000000000..752b44f64
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -0,0 +1,348 @@
+// Reference: RISC-V ELF psABI specification
+// https://github.com/riscv/riscv-elf-psabi-doc
+//
+// Reference: Clang RISC-V ELF psABI lowering code
+// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
+
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+enum RegPassKind {
+ Float(Reg),
+ Integer(Reg),
+ Unknown,
+}
+
+#[derive(Copy, Clone)]
+enum FloatConv {
+ FloatPair(Reg, Reg),
+ Float(Reg),
+ MixedPair(Reg, Reg),
+}
+
+#[derive(Copy, Clone)]
+struct CannotUseFpConv;
+
+fn is_riscv_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+ match arg.layout.abi {
+ Abi::Vector { .. } => true,
+ _ => arg.layout.is_aggregate(),
+ }
+}
+
+fn should_use_fp_conv_helper<'a, Ty, C>(
+ cx: &C,
+ arg_layout: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ field1_kind: &mut RegPassKind,
+ field2_kind: &mut RegPassKind,
+) -> Result<(), CannotUseFpConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ match arg_layout.abi {
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => {
+ if arg_layout.size.bits() > xlen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ (RegPassKind::Float(_), RegPassKind::Unknown) => {
+ *field2_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ abi::F32 | abi::F64 => {
+ if arg_layout.size.bits() > flen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ (_, RegPassKind::Unknown) => {
+ *field2_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ },
+ Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Union(_) => {
+ if !arg_layout.is_zst() {
+ return Err(CannotUseFpConv);
+ }
+ }
+ FieldsShape::Array { count, .. } => {
+ for _ in 0..count {
+ let elem_layout = arg_layout.field(cx, 0);
+ should_use_fp_conv_helper(
+ cx,
+ &elem_layout,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ )?;
+ }
+ }
+ FieldsShape::Arbitrary { .. } => {
+ match arg_layout.variants {
+ abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
+ abi::Variants::Single { .. } => (),
+ }
+ for i in arg_layout.fields.index_by_increasing_offset() {
+ let field = arg_layout.field(cx, i);
+ should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
+ }
+ }
+ },
+ }
+ Ok(())
+}
+
+fn should_use_fp_conv<'a, Ty, C>(
+ cx: &C,
+ arg: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+) -> Option<FloatConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ let mut field1_kind = RegPassKind::Unknown;
+ let mut field2_kind = RegPassKind::Unknown;
+ if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
+ return None;
+ }
+ match (field1_kind, field2_kind) {
+ (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
+ (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ match conv {
+ FloatConv::Float(f) => {
+ arg.cast_to(f);
+ }
+ FloatConv::FloatPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ FloatConv::MixedPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ }
+ return false;
+ }
+
+ let total = arg.layout.size;
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_riscv_aggregate(arg) {
+ arg.make_indirect();
+ }
+ return true;
+ }
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+ if is_riscv_aggregate(arg) {
+ if total.bits() <= xlen {
+ arg.cast_to(xlen_reg);
+ } else {
+ arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
+ }
+ return false;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ extend_integer_width(arg, xlen);
+ false
+}
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ is_vararg: bool,
+ avail_gprs: &mut u64,
+ avail_fprs: &mut u64,
+) where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if !is_vararg {
+ match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
+ *avail_fprs -= 1;
+ arg.cast_to(f);
+ return;
+ }
+ Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
+ *avail_fprs -= 2;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
+ *avail_gprs -= 1;
+ *avail_fprs -= 1;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ _ => (),
+ }
+ }
+
+ let total = arg.layout.size;
+ let align = arg.layout.align.abi.bits();
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_riscv_aggregate(arg) {
+ arg.make_indirect();
+ }
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ let double_xlen_reg = match xlen {
+ 32 => Reg::i64(),
+ 64 => Reg::i128(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ if total.bits() > xlen {
+ let align_regs = align > xlen;
+ if is_riscv_aggregate(arg) {
+ arg.cast_to(Uniform {
+ unit: if align_regs { double_xlen_reg } else { xlen_reg },
+ total: Size::from_bits(xlen * 2),
+ });
+ }
+ if align_regs && is_vararg {
+ *avail_gprs -= *avail_gprs % 2;
+ }
+ if *avail_gprs >= 2 {
+ *avail_gprs -= 2;
+ } else {
+ *avail_gprs = 0;
+ }
+ return;
+ } else if is_riscv_aggregate(arg) {
+ arg.cast_to(xlen_reg);
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ if *avail_gprs >= 1 {
+ extend_integer_width(arg, xlen);
+ *avail_gprs -= 1;
+ }
+}
+
+fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
+ if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, _) = scalar.primitive() {
+ // 32-bit integers are always sign-extended
+ if i.size().bits() == 32 && xlen > 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(xlen);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let flen = match &cx.target_spec().llvm_abiname[..] {
+ "ilp32f" | "lp64f" => 32,
+ "ilp32d" | "lp64d" => 64,
+ _ => 0,
+ };
+ let xlen = cx.data_layout().pointer_size.bits();
+
+ let mut avail_gprs = 8;
+ let mut avail_fprs = 8;
+
+ if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+ avail_gprs -= 1;
+ }
+
+ for (i, arg) in fn_abi.args.iter_mut().enumerate() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(
+ cx,
+ arg,
+ xlen,
+ flen,
+ i >= fn_abi.fixed_count,
+ &mut avail_gprs,
+ &mut avail_fprs,
+ );
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs
new file mode 100644
index 000000000..13706e8c2
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/s390x.rs
@@ -0,0 +1,57 @@
+// FIXME: The assumes we're using the non-vector ABI, i.e., compiling
+// for a pre-z13 machine or using -mno-vx.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
+ ret.extend_integer_width_to(64);
+ } else {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ if arg.layout.is_single_fp_element(cx) {
+ match arg.layout.size.bytes() {
+ 4 => arg.cast_to(Reg::f32()),
+ 8 => arg.cast_to(Reg::f64()),
+ _ => arg.make_indirect(),
+ }
+ } else {
+ match arg.layout.size.bytes() {
+ 1 => arg.cast_to(Reg::i8()),
+ 2 => arg.cast_to(Reg::i16()),
+ 4 => arg.cast_to(Reg::i32()),
+ 8 => arg.cast_to(Reg::i64()),
+ _ => arg.make_indirect(),
+ }
+ }
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/sparc.rs b/compiler/rustc_target/src/abi/call/sparc.rs
new file mode 100644
index 000000000..cc4431976
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/sparc.rs
@@ -0,0 +1,51 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{HasDataLayout, Size};
+
+fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ } else {
+ ret.make_indirect();
+ *offset += cx.data_layout().pointer_size;
+ }
+}
+
+fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+
+ if arg.layout.is_aggregate() {
+ arg.cast_to(Uniform { unit: Reg::i32(), total: size });
+ if !offset.is_aligned(align) {
+ arg.pad_with(Reg::i32());
+ }
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+
+ *offset = offset.align_to(align) + size.align_to(align);
+}
+
+pub fn compute_abi_info<Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
+where
+ C: HasDataLayout,
+{
+ let mut offset = Size::ZERO;
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, &mut offset);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, &mut offset);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
new file mode 100644
index 000000000..cc3a0a699
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -0,0 +1,226 @@
+// FIXME: This needs an audit for correctness and completeness.
+
+use crate::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Reg, Uniform,
+};
+use crate::abi::{self, HasDataLayout, Scalar, Size, TyAbiInterface, TyAndLayout};
+
+#[derive(Clone, Debug)]
+pub struct Sdata {
+ pub prefix: [Option<Reg>; 8],
+ pub prefix_index: usize,
+ pub last_offset: Size,
+ pub has_float: bool,
+ pub arg_attribute: ArgAttribute,
+}
+
+fn arg_scalar<C>(cx: &C, scalar: &Scalar, offset: Size, mut data: Sdata) -> Sdata
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+
+ if !scalar.primitive().is_float() {
+ return data;
+ }
+
+ data.has_float = true;
+
+ if !data.last_offset.is_aligned(dl.f64_align.abi) && data.last_offset < offset {
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i32().size;
+ }
+
+ for _ in 0..((offset - data.last_offset).bits() / 64)
+ .min((data.prefix.len() - data.prefix_index) as u64)
+ {
+ data.prefix[data.prefix_index] = Some(Reg::i64());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i64().size;
+ }
+
+ if data.last_offset < offset {
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i32().size;
+ }
+
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+
+ if scalar.primitive() == abi::F32 {
+ data.arg_attribute = ArgAttribute::InReg;
+ data.prefix[data.prefix_index] = Some(Reg::f32());
+ data.last_offset = offset + Reg::f32().size;
+ } else {
+ data.prefix[data.prefix_index] = Some(Reg::f64());
+ data.last_offset = offset + Reg::f64().size;
+ }
+ data.prefix_index += 1;
+ return data;
+}
+
+fn arg_scalar_pair<C>(
+ cx: &C,
+ scalar1: &Scalar,
+ scalar2: &Scalar,
+ mut offset: Size,
+ mut data: Sdata,
+) -> Sdata
+where
+ C: HasDataLayout,
+{
+ data = arg_scalar(cx, &scalar1, offset, data);
+ match (scalar1.primitive(), scalar2.primitive()) {
+ (abi::F32, _) => offset += Reg::f32().size,
+ (_, abi::F64) => offset += Reg::f64().size,
+ (abi::Int(i, _signed), _) => offset += i.size(),
+ (abi::Pointer, _) => offset += Reg::i64().size,
+ _ => {}
+ }
+
+ if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
+ offset.raw += 4 - (offset.raw % 4);
+ }
+ data = arg_scalar(cx, &scalar2, offset, data);
+ return data;
+}
+
+fn parse_structure<'a, Ty, C>(
+ cx: &C,
+ layout: TyAndLayout<'a, Ty>,
+ mut data: Sdata,
+ mut offset: Size,
+) -> Sdata
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if let abi::FieldsShape::Union(_) = layout.fields {
+ return data;
+ }
+
+ match layout.abi {
+ abi::Abi::Scalar(scalar) => {
+ data = arg_scalar(cx, &scalar, offset, data);
+ }
+ abi::Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ if offset < layout.fields.offset(i) {
+ offset = layout.fields.offset(i);
+ }
+ data = parse_structure(cx, layout.field(cx, i), data.clone(), offset);
+ }
+ }
+ _ => {
+ if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
+ data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
+ }
+ }
+ }
+
+ return data;
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: Size)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ let total = arg.layout.size;
+ if total > in_registers_max {
+ arg.make_indirect();
+ return;
+ }
+
+ match arg.layout.fields {
+ abi::FieldsShape::Primitive => unreachable!(),
+ abi::FieldsShape::Array { .. } => {
+ // Arrays are passed indirectly
+ arg.make_indirect();
+ return;
+ }
+ abi::FieldsShape::Union(_) => {
+ // Unions and are always treated as a series of 64-bit integer chunks
+ }
+ abi::FieldsShape::Arbitrary { .. } => {
+ // Structures with floating point numbers need special care.
+
+ let mut data = parse_structure(
+ cx,
+ arg.layout,
+ Sdata {
+ prefix: [None; 8],
+ prefix_index: 0,
+ last_offset: Size::ZERO,
+ has_float: false,
+ arg_attribute: ArgAttribute::default(),
+ },
+ Size { raw: 0 },
+ );
+
+ if data.has_float {
+ // Structure { float, int, int } doesn't like to be handled like
+ // { float, long int }. Other way around it doesn't mind.
+ if data.last_offset < arg.layout.size
+ && (data.last_offset.raw % 8) != 0
+ && data.prefix_index < data.prefix.len()
+ {
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset += Reg::i32().size;
+ }
+
+ let mut rest_size = arg.layout.size - data.last_offset;
+ if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ rest_size = rest_size - Reg::i32().size;
+ }
+
+ arg.cast_to(CastTarget {
+ prefix: data.prefix,
+ rest: Uniform { unit: Reg::i64(), total: rest_size },
+ attrs: ArgAttributes {
+ regular: data.arg_attribute,
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ });
+ return;
+ }
+ }
+ }
+
+ arg.cast_to(Uniform { unit: Reg::i64(), total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, Size { raw: 16 });
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
new file mode 100644
index 000000000..3237cde10
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -0,0 +1,83 @@
+use crate::abi::call::{ArgAbi, FnAbi, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if val.layout.is_aggregate() {
+ if let Some(unit) = val.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()) {
+ let size = val.layout.size;
+ if unit.size == size {
+ val.cast_to(Uniform { unit, total: size });
+ return true;
+ }
+ }
+ }
+ false
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ ret.extend_integer_width_to(32);
+ if ret.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, ret) {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.extend_integer_width_to(32);
+ if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
+ arg.make_indirect_byval();
+ }
+}
+
+/// The purpose of this ABI is to match the C ABI (aka clang) exactly.
+pub fn compute_c_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
+
+/// The purpose of this ABI is for matching the WebAssembly standard. This
+/// intentionally diverges from the C ABI and is specifically crafted to take
+/// advantage of LLVM's support of multiple returns in WebAssembly.
+pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+
+ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ ret.extend_integer_width_to(32);
+ }
+
+ fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ arg.extend_integer_width_to(32);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
new file mode 100644
index 000000000..c7d59baf9
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -0,0 +1,117 @@
+use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+#[derive(PartialEq)]
+pub enum Flavor {
+ General,
+ FastcallOrVectorcall,
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: Flavor)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ if !fn_abi.ret.is_ignore() {
+ if fn_abi.ret.layout.is_aggregate() {
+ // Returning a structure. Most often, this will use
+ // a hidden first argument. On some platforms, though,
+ // small structs are returned as integers.
+ //
+ // Some links:
+ // https://www.angelcode.com/dev/callconv/callconv.html
+ // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
+ let t = cx.target_spec();
+ if t.abi_return_struct_as_int {
+ // According to Clang, everyone but MSVC returns single-element
+ // float aggregates directly in a floating-point register.
+ if !t.is_like_msvc && fn_abi.ret.layout.is_single_fp_element(cx) {
+ match fn_abi.ret.layout.size.bytes() {
+ 4 => fn_abi.ret.cast_to(Reg::f32()),
+ 8 => fn_abi.ret.cast_to(Reg::f64()),
+ _ => fn_abi.ret.make_indirect(),
+ }
+ } else {
+ match fn_abi.ret.layout.size.bytes() {
+ 1 => fn_abi.ret.cast_to(Reg::i8()),
+ 2 => fn_abi.ret.cast_to(Reg::i16()),
+ 4 => fn_abi.ret.cast_to(Reg::i32()),
+ 8 => fn_abi.ret.cast_to(Reg::i64()),
+ _ => fn_abi.ret.make_indirect(),
+ }
+ }
+ } else {
+ fn_abi.ret.make_indirect();
+ }
+ } else {
+ fn_abi.ret.extend_integer_width_to(32);
+ }
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ if arg.layout.is_aggregate() {
+ arg.make_indirect_byval();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+ }
+
+ if flavor == Flavor::FastcallOrVectorcall {
+ // Mark arguments as InReg like clang does it,
+ // so our fastcall/vectorcall is compatible with C/C++ fastcall/vectorcall.
+
+ // Clang reference: lib/CodeGen/TargetInfo.cpp
+ // See X86_32ABIInfo::shouldPrimitiveUseInReg(), X86_32ABIInfo::updateFreeRegs()
+
+ // IsSoftFloatABI is only set to true on ARM platforms,
+ // which in turn can't be x86?
+
+ let mut free_regs = 2;
+
+ for arg in &mut fn_abi.args {
+ let attrs = match arg.mode {
+ PassMode::Ignore
+ | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ continue;
+ }
+ PassMode::Direct(ref mut attrs) => attrs,
+ PassMode::Pair(..)
+ | PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
+ | PassMode::Cast(_) => {
+ unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
+ }
+ };
+
+ // At this point we know this must be a primitive of sorts.
+ let unit = arg.layout.homogeneous_aggregate(cx).unwrap().unit().unwrap();
+ assert_eq!(unit.size, arg.layout.size);
+ if unit.kind == RegKind::Float {
+ continue;
+ }
+
+ let size_in_regs = (arg.layout.size.bits() + 31) / 32;
+
+ if size_in_regs == 0 {
+ continue;
+ }
+
+ if size_in_regs > free_regs {
+ break;
+ }
+
+ free_regs -= size_in_regs;
+
+ if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
+ attrs.set(ArgAttribute::InReg);
+ }
+
+ if free_regs == 0 {
+ break;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
new file mode 100644
index 000000000..a52e01a49
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -0,0 +1,248 @@
+// The classification code for the x86_64 ABI is taken from the clay language
+// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
+
+use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
+use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+/// Classification of "eightbyte" components.
+// N.B., the order of the variants is from general to specific,
+// such that `unify(a, b)` is the "smaller" of `a` and `b`.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+enum Class {
+ Int,
+ Sse,
+ SseUp,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct Memory;
+
+// Currently supported vector size (AVX-512).
+const LARGEST_VECTOR_SIZE: usize = 512;
+const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &ArgAbi<'a, Ty>,
+) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ fn classify<'a, Ty, C>(
+ cx: &C,
+ layout: TyAndLayout<'a, Ty>,
+ cls: &mut [Option<Class>],
+ off: Size,
+ ) -> Result<(), Memory>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+ {
+ if !off.is_aligned(layout.align.abi) {
+ if !layout.is_zst() {
+ return Err(Memory);
+ }
+ return Ok(());
+ }
+
+ let mut c = match layout.abi {
+ Abi::Uninhabited => return Ok(()),
+
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => Class::Int,
+ abi::F32 | abi::F64 => Class::Sse,
+ },
+
+ Abi::Vector { .. } => Class::Sse,
+
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ let field_off = off + layout.fields.offset(i);
+ classify(cx, layout.field(cx, i), cls, field_off)?;
+ }
+
+ match &layout.variants {
+ abi::Variants::Single { .. } => {}
+ abi::Variants::Multiple { variants, .. } => {
+ // Treat enum variants like union members.
+ for variant_idx in variants.indices() {
+ classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
+ }
+ }
+ }
+
+ return Ok(());
+ }
+ };
+
+ // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
+ let first = (off.bytes() / 8) as usize;
+ let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
+ for cls in &mut cls[first..=last] {
+ *cls = Some(cls.map_or(c, |old| old.min(c)));
+
+ // Everything after the first Sse "eightbyte"
+ // component is the upper half of a register.
+ if c == Class::Sse {
+ c = Class::SseUp;
+ }
+ }
+
+ Ok(())
+ }
+
+ let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
+ if n > MAX_EIGHTBYTES {
+ return Err(Memory);
+ }
+
+ let mut cls = [None; MAX_EIGHTBYTES];
+ classify(cx, arg.layout, &mut cls, Size::ZERO)?;
+ if n > 2 {
+ if cls[0] != Some(Class::Sse) {
+ return Err(Memory);
+ }
+ if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
+ return Err(Memory);
+ }
+ } else {
+ let mut i = 0;
+ while i < n {
+ if cls[i] == Some(Class::SseUp) {
+ cls[i] = Some(Class::Sse);
+ } else if cls[i] == Some(Class::Sse) {
+ i += 1;
+ while i != n && cls[i] == Some(Class::SseUp) {
+ i += 1;
+ }
+ } else {
+ i += 1;
+ }
+ }
+ }
+
+ Ok(cls)
+}
+
+fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
+ if *i >= cls.len() {
+ return None;
+ }
+
+ match cls[*i] {
+ None => None,
+ Some(Class::Int) => {
+ *i += 1;
+ Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
+ }
+ Some(Class::Sse) => {
+ let vec_len =
+ 1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
+ *i += vec_len;
+ Some(if vec_len == 1 {
+ match size.bytes() {
+ 4 => Reg::f32(),
+ _ => Reg::f64(),
+ }
+ } else {
+ Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
+ })
+ }
+ Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
+ }
+}
+
+fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
+ let mut i = 0;
+ let lo = reg_component(cls, &mut i, size).unwrap();
+ let offset = Size::from_bytes(8) * (i as u64);
+ let mut target = CastTarget::from(lo);
+ if size > offset {
+ if let Some(hi) = reg_component(cls, &mut i, size - offset) {
+ target = CastTarget::pair(lo, hi);
+ }
+ }
+ assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
+ target
+}
+
+const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
+const MAX_SSE_REGS: usize = 8; // XMM0-7
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ let mut int_regs = MAX_INT_REGS;
+ let mut sse_regs = MAX_SSE_REGS;
+
+ let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
+ let mut cls_or_mem = classify_arg(cx, arg);
+
+ if is_arg {
+ if let Ok(cls) = cls_or_mem {
+ let mut needed_int = 0;
+ let mut needed_sse = 0;
+ for c in cls {
+ match c {
+ Some(Class::Int) => needed_int += 1,
+ Some(Class::Sse) => needed_sse += 1,
+ _ => {}
+ }
+ }
+ match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
+ (Some(left_int), Some(left_sse)) => {
+ int_regs = left_int;
+ sse_regs = left_sse;
+ }
+ _ => {
+ // Not enough registers for this argument, so it will be
+ // passed on the stack, but we only mark aggregates
+ // explicitly as indirect `byval` arguments, as LLVM will
+ // automatically put immediates on the stack itself.
+ if arg.layout.is_aggregate() {
+ cls_or_mem = Err(Memory);
+ }
+ }
+ }
+ }
+ }
+
+ match cls_or_mem {
+ Err(Memory) => {
+ if is_arg {
+ arg.make_indirect_byval();
+ } else {
+ // `sret` parameter thus one less integer register available
+ arg.make_indirect();
+ // NOTE(eddyb) return is handled first, so no registers
+ // should've been used yet.
+ assert_eq!(int_regs, MAX_INT_REGS);
+ int_regs -= 1;
+ }
+ }
+ Ok(ref cls) => {
+ // split into sized chunks passed individually
+ if arg.layout.is_aggregate() {
+ let size = arg.layout.size;
+ arg.cast_to(cast_target(cls, size))
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+ }
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ x86_64_arg_or_ret(&mut fn_abi.ret, false);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ x86_64_arg_or_ret(arg, true);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86_win64.rs b/compiler/rustc_target/src/abi/call/x86_win64.rs
new file mode 100644
index 000000000..2aad641b1
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86_win64.rs
@@ -0,0 +1,40 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg};
+use crate::abi::Abi;
+
+// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ let fixup = |a: &mut ArgAbi<'_, Ty>| {
+ match a.layout.abi {
+ Abi::Uninhabited => {}
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match a.layout.size.bits() {
+ 8 => a.cast_to(Reg::i8()),
+ 16 => a.cast_to(Reg::i16()),
+ 32 => a.cast_to(Reg::i32()),
+ 64 => a.cast_to(Reg::i64()),
+ _ => a.make_indirect(),
+ },
+ Abi::Vector { .. } => {
+ // FIXME(eddyb) there should be a size cap here
+ // (probably what clang calls "illegal vectors").
+ }
+ Abi::Scalar(_) => {
+ if a.layout.size.bytes() > 8 {
+ a.make_indirect();
+ } else {
+ a.extend_integer_width_to(32);
+ }
+ }
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ fixup(&mut fn_abi.ret);
+ }
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ fixup(arg);
+ }
+}