summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_target/src/abi/call
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
commit4547b622d8d29df964fa2914213088b148c498fc (patch)
tree9fc6b25f3c3add6b745be9a2400a6e96140046e9 /compiler/rustc_target/src/abi/call
parentReleasing progress-linux version 1.66.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-4547b622d8d29df964fa2914213088b148c498fc.tar.xz
rustc-4547b622d8d29df964fa2914213088b148c498fc.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_target/src/abi/call')
-rw-r--r--compiler/rustc_target/src/abi/call/loongarch.rs342
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs32
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs18
3 files changed, 382 insertions, 10 deletions
diff --git a/compiler/rustc_target/src/abi/call/loongarch.rs b/compiler/rustc_target/src/abi/call/loongarch.rs
new file mode 100644
index 000000000..d29b479de
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/loongarch.rs
@@ -0,0 +1,342 @@
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+enum RegPassKind {
+ Float(Reg),
+ Integer(Reg),
+ Unknown,
+}
+
+#[derive(Copy, Clone)]
+enum FloatConv {
+ FloatPair(Reg, Reg),
+ Float(Reg),
+ MixedPair(Reg, Reg),
+}
+
+#[derive(Copy, Clone)]
+struct CannotUseFpConv;
+
+fn is_loongarch_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+ match arg.layout.abi {
+ Abi::Vector { .. } => true,
+ _ => arg.layout.is_aggregate(),
+ }
+}
+
+fn should_use_fp_conv_helper<'a, Ty, C>(
+ cx: &C,
+ arg_layout: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ field1_kind: &mut RegPassKind,
+ field2_kind: &mut RegPassKind,
+) -> Result<(), CannotUseFpConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ match arg_layout.abi {
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => {
+ if arg_layout.size.bits() > xlen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ (RegPassKind::Float(_), RegPassKind::Unknown) => {
+ *field2_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ abi::F32 | abi::F64 => {
+ if arg_layout.size.bits() > flen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ (_, RegPassKind::Unknown) => {
+ *field2_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ },
+ Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Union(_) => {
+ if !arg_layout.is_zst() {
+ return Err(CannotUseFpConv);
+ }
+ }
+ FieldsShape::Array { count, .. } => {
+ for _ in 0..count {
+ let elem_layout = arg_layout.field(cx, 0);
+ should_use_fp_conv_helper(
+ cx,
+ &elem_layout,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ )?;
+ }
+ }
+ FieldsShape::Arbitrary { .. } => {
+ match arg_layout.variants {
+ abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
+ abi::Variants::Single { .. } => (),
+ }
+ for i in arg_layout.fields.index_by_increasing_offset() {
+ let field = arg_layout.field(cx, i);
+ should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
+ }
+ }
+ },
+ }
+ Ok(())
+}
+
+fn should_use_fp_conv<'a, Ty, C>(
+ cx: &C,
+ arg: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+) -> Option<FloatConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ let mut field1_kind = RegPassKind::Unknown;
+ let mut field2_kind = RegPassKind::Unknown;
+ if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
+ return None;
+ }
+ match (field1_kind, field2_kind) {
+ (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
+ (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ match conv {
+ FloatConv::Float(f) => {
+ arg.cast_to(f);
+ }
+ FloatConv::FloatPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ FloatConv::MixedPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ }
+ return false;
+ }
+
+ let total = arg.layout.size;
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_loongarch_aggregate(arg) {
+ arg.make_indirect();
+ }
+ return true;
+ }
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+ if is_loongarch_aggregate(arg) {
+ if total.bits() <= xlen {
+ arg.cast_to(xlen_reg);
+ } else {
+ arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
+ }
+ return false;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ extend_integer_width(arg, xlen);
+ false
+}
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ is_vararg: bool,
+ avail_gprs: &mut u64,
+ avail_fprs: &mut u64,
+) where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if !is_vararg {
+ match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
+ *avail_fprs -= 1;
+ arg.cast_to(f);
+ return;
+ }
+ Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
+ *avail_fprs -= 2;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
+ *avail_gprs -= 1;
+ *avail_fprs -= 1;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ _ => (),
+ }
+ }
+
+ let total = arg.layout.size;
+ let align = arg.layout.align.abi.bits();
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_loongarch_aggregate(arg) {
+ arg.make_indirect();
+ }
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ let double_xlen_reg = match xlen {
+ 32 => Reg::i64(),
+ 64 => Reg::i128(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ if total.bits() > xlen {
+ let align_regs = align > xlen;
+ if is_loongarch_aggregate(arg) {
+ arg.cast_to(Uniform {
+ unit: if align_regs { double_xlen_reg } else { xlen_reg },
+ total: Size::from_bits(xlen * 2),
+ });
+ }
+ if align_regs && is_vararg {
+ *avail_gprs -= *avail_gprs % 2;
+ }
+ if *avail_gprs >= 2 {
+ *avail_gprs -= 2;
+ } else {
+ *avail_gprs = 0;
+ }
+ return;
+ } else if is_loongarch_aggregate(arg) {
+ arg.cast_to(xlen_reg);
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ if *avail_gprs >= 1 {
+ extend_integer_width(arg, xlen);
+ *avail_gprs -= 1;
+ }
+}
+
+fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
+ if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, _) = scalar.primitive() {
+ // 32-bit integers are always sign-extended
+ if i.size().bits() == 32 && xlen > 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(xlen);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let xlen = cx.data_layout().pointer_size.bits();
+ let flen = match &cx.target_spec().llvm_abiname[..] {
+ "ilp32f" | "lp64f" => 32,
+ "ilp32d" | "lp64d" => 64,
+ _ => 0,
+ };
+
+ let mut avail_gprs = 8;
+ let mut avail_fprs = 8;
+
+ if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+ avail_gprs -= 1;
+ }
+
+ for (i, arg) in fn_abi.args.iter_mut().enumerate() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(
+ cx,
+ arg,
+ xlen,
+ flen,
+ i >= fn_abi.fixed_count as usize,
+ &mut avail_gprs,
+ &mut avail_fprs,
+ );
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 9e5f0e4d1..a5ffaebea 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -3,6 +3,7 @@ use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
use crate::spec::{self, HasTargetSpec};
use rustc_span::Symbol;
use std::fmt;
+use std::str::FromStr;
mod aarch64;
mod amdgpu;
@@ -10,6 +11,7 @@ mod arm;
mod avr;
mod bpf;
mod hexagon;
+mod loongarch;
mod m68k;
mod mips;
mod mips64;
@@ -260,7 +262,7 @@ impl CastTarget {
let mut size = self.rest.total;
for i in 0..self.prefix.iter().count() {
match self.prefix[i] {
- Some(v) => size += Size { raw: v.size.bytes() },
+ Some(v) => size += v.size,
None => {}
}
}
@@ -696,6 +698,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"amdgpu" => amdgpu::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"avr" => avr::compute_abi_info(self),
+ "loongarch64" => loongarch::compute_abi_info(cx, self),
"m68k" => m68k::compute_abi_info(self),
"mips" => mips::compute_abi_info(cx, self),
"mips64" => mips64::compute_abi_info(cx, self),
@@ -735,6 +738,33 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
}
+impl FromStr for Conv {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "C" => Ok(Conv::C),
+ "Rust" => Ok(Conv::Rust),
+ "RustCold" => Ok(Conv::Rust),
+ "ArmAapcs" => Ok(Conv::ArmAapcs),
+ "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
+ "Msp430Intr" => Ok(Conv::Msp430Intr),
+ "PtxKernel" => Ok(Conv::PtxKernel),
+ "X86Fastcall" => Ok(Conv::X86Fastcall),
+ "X86Intr" => Ok(Conv::X86Intr),
+ "X86Stdcall" => Ok(Conv::X86Stdcall),
+ "X86ThisCall" => Ok(Conv::X86ThisCall),
+ "X86VectorCall" => Ok(Conv::X86VectorCall),
+ "X86_64SysV" => Ok(Conv::X86_64SysV),
+ "X86_64Win64" => Ok(Conv::X86_64Win64),
+ "AmdGpuKernel" => Ok(Conv::AmdGpuKernel),
+ "AvrInterrupt" => Ok(Conv::AvrInterrupt),
+ "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
+ _ => Err(format!("'{}' is not a valid value for entry function call convetion.", s)),
+ }
+ }
+}
+
// Some types are used a lot. Make sure they don't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index 1b74959ad..c8b6ac5ae 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -78,7 +78,7 @@ fn arg_scalar_pair<C>(
where
C: HasDataLayout,
{
- data = arg_scalar(cx, &scalar1, offset, data);
+ data = arg_scalar(cx, scalar1, offset, data);
match (scalar1.primitive(), scalar2.primitive()) {
(abi::F32, _) => offset += Reg::f32().size,
(_, abi::F64) => offset += Reg::f64().size,
@@ -87,10 +87,10 @@ where
_ => {}
}
- if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
- offset.raw += 4 - (offset.raw % 4);
+ if (offset.bytes() % 4) != 0 && scalar2.primitive().is_float() {
+ offset += Size::from_bytes(4 - (offset.bytes() % 4));
}
- data = arg_scalar(cx, &scalar2, offset, data);
+ data = arg_scalar(cx, scalar2, offset, data);
return data;
}
@@ -169,14 +169,14 @@ where
has_float: false,
arg_attribute: ArgAttribute::default(),
},
- Size { raw: 0 },
+ Size::ZERO,
);
if data.has_float {
// Structure { float, int, int } doesn't like to be handled like
// { float, long int }. Other way around it doesn't mind.
if data.last_offset < arg.layout.size
- && (data.last_offset.raw % 8) != 0
+ && (data.last_offset.bytes() % 8) != 0
&& data.prefix_index < data.prefix.len()
{
data.prefix[data.prefix_index] = Some(Reg::i32());
@@ -185,7 +185,7 @@ where
}
let mut rest_size = arg.layout.size - data.last_offset;
- if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+ if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
data.prefix[data.prefix_index] = Some(Reg::i32());
rest_size = rest_size - Reg::i32().size;
}
@@ -214,13 +214,13 @@ where
C: HasDataLayout,
{
if !fn_abi.ret.is_ignore() {
- classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+ classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
}
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
- classify_arg(cx, arg, Size { raw: 16 });
+ classify_arg(cx, arg, Size::from_bytes(16));
}
}