summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_target/src/abi
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_target/src/abi')
-rw-r--r--compiler/rustc_target/src/abi/call/aarch64.rs43
-rw-r--r--compiler/rustc_target/src/abi/call/amdgpu.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/arm.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/avr.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/bpf.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/hexagon.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/m68k.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/mips.rs8
-rw-r--r--compiler/rustc_target/src/abi/call/mips64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs46
-rw-r--r--compiler/rustc_target/src/abi/call/msp430.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx.rs33
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx64.rs4
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/s390x.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/sparc.rs8
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs4
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs6
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/x86_win64.rs2
-rw-r--r--compiler/rustc_target/src/abi/mod.rs67
24 files changed, 131 insertions, 118 deletions
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
index 4613a459c..a84988fa7 100644
--- a/compiler/rustc_target/src/abi/call/aarch64.rs
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -1,6 +1,27 @@
use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, TyAbiInterface};
+/// Given integer-types M and register width N (e.g. M=u16 and N=32 bits), the
+/// `ParamExtension` policy specifies how a uM value should be treated when
+/// passed via register or stack-slot of width N. See also rust-lang/rust#97463.
+#[derive(Copy, Clone, PartialEq)]
+pub enum ParamExtension {
+ /// Indicates that when passing an i8/i16, either as a function argument or
+ /// as a return value, it must be sign-extended to 32 bits, and likewise a
+ /// u8/u16 must be zero-extended to 32-bits. (This variant is here to
+ /// accommodate Apple's deviation from the usual AArch64 ABI as defined by
+ /// ARM.)
+ ///
+ /// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
+ ExtendTo32Bits,
+
+ /// Indicates that no sign- nor zero-extension is performed: if a value of
+ /// type with bitwidth M is passed as function argument or return value,
+ /// then M bits are copied into the least significant M bits, and the
+ /// remaining bits of the register (or word of memory) are untouched.
+ NoExtension,
+}
+
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
where
Ty: TyAbiInterface<'a, C> + Copy,
@@ -24,13 +45,16 @@ where
})
}
-fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, param_policy: ParamExtension)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !ret.layout.is_aggregate() {
- ret.extend_integer_width_to(32);
+ match param_policy {
+ ParamExtension::ExtendTo32Bits => ret.extend_integer_width_to(32),
+ ParamExtension::NoExtension => {}
+ }
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
@@ -46,13 +70,16 @@ where
ret.make_indirect();
}
-fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, param_policy: ParamExtension)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !arg.layout.is_aggregate() {
- arg.extend_integer_width_to(32);
+ match param_policy {
+ ParamExtension::ExtendTo32Bits => arg.extend_integer_width_to(32),
+ ParamExtension::NoExtension => {}
+ }
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
@@ -68,19 +95,19 @@ where
arg.make_indirect();
}
-pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, param_policy: ParamExtension)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !fn_abi.ret.is_ignore() {
- classify_ret(cx, &mut fn_abi.ret);
+ classify_ret(cx, &mut fn_abi.ret, param_policy);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
- classify_arg(cx, arg);
+ classify_arg(cx, arg, param_policy);
}
}
diff --git a/compiler/rustc_target/src/abi/call/amdgpu.rs b/compiler/rustc_target/src/abi/call/amdgpu.rs
index 9be97476c..e30dead63 100644
--- a/compiler/rustc_target/src/abi/call/amdgpu.rs
+++ b/compiler/rustc_target/src/abi/call/amdgpu.rs
@@ -26,7 +26,7 @@ where
classify_ret(cx, &mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/arm.rs b/compiler/rustc_target/src/abi/call/arm.rs
index e66c2132b..1923ea588 100644
--- a/compiler/rustc_target/src/abi/call/arm.rs
+++ b/compiler/rustc_target/src/abi/call/arm.rs
@@ -88,7 +88,7 @@ where
classify_ret(cx, &mut fn_abi.ret, vfp);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/avr.rs b/compiler/rustc_target/src/abi/call/avr.rs
index c1f7a1e3a..e20f01355 100644
--- a/compiler/rustc_target/src/abi/call/avr.rs
+++ b/compiler/rustc_target/src/abi/call/avr.rs
@@ -49,7 +49,7 @@ pub fn compute_abi_info<Ty>(fty: &mut FnAbi<'_, Ty>) {
classify_ret_ty(&mut fty.ret);
}
- for arg in &mut fty.args {
+ for arg in fty.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/bpf.rs b/compiler/rustc_target/src/abi/call/bpf.rs
index 466c52553..780e7df43 100644
--- a/compiler/rustc_target/src/abi/call/bpf.rs
+++ b/compiler/rustc_target/src/abi/call/bpf.rs
@@ -22,7 +22,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/hexagon.rs b/compiler/rustc_target/src/abi/call/hexagon.rs
index 8028443b8..80a442048 100644
--- a/compiler/rustc_target/src/abi/call/hexagon.rs
+++ b/compiler/rustc_target/src/abi/call/hexagon.rs
@@ -21,7 +21,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
index 58fdc00b6..c1e0f54af 100644
--- a/compiler/rustc_target/src/abi/call/m68k.rs
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -21,7 +21,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/mips.rs b/compiler/rustc_target/src/abi/call/mips.rs
index cc4431976..edcd1bab8 100644
--- a/compiler/rustc_target/src/abi/call/mips.rs
+++ b/compiler/rustc_target/src/abi/call/mips.rs
@@ -22,10 +22,8 @@ where
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
- arg.cast_to(Uniform { unit: Reg::i32(), total: size });
- if !offset.is_aligned(align) {
- arg.pad_with(Reg::i32());
- }
+ let pad_i32 = !offset.is_aligned(align);
+ arg.cast_to_and_pad_i32(Uniform { unit: Reg::i32(), total: size }, pad_i32);
} else {
arg.extend_integer_width_to(32);
}
@@ -42,7 +40,7 @@ where
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs
index cd54167aa..2700f67b2 100644
--- a/compiler/rustc_target/src/abi/call/mips64.rs
+++ b/compiler/rustc_target/src/abi/call/mips64.rs
@@ -158,7 +158,7 @@ where
classify_ret(cx, &mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 577126a95..d2fb8c32f 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -14,7 +14,6 @@ mod m68k;
mod mips;
mod mips64;
mod msp430;
-mod nvptx;
mod nvptx64;
mod powerpc;
mod powerpc64;
@@ -27,7 +26,7 @@ mod x86;
mod x86_64;
mod x86_win64;
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub enum PassMode {
/// Ignore the argument.
///
@@ -41,9 +40,10 @@ pub enum PassMode {
///
/// The argument has a layout abi of `ScalarPair`.
Pair(ArgAttributes, ArgAttributes),
- /// Pass the argument after casting it, to either
- /// a single uniform or a pair of registers.
- Cast(CastTarget),
+ /// Pass the argument after casting it, to either a single uniform or a
+ /// pair of registers. The bool indicates if a `Reg::i32()` dummy argument
+ /// is emitted before the real argument.
+ Cast(Box<CastTarget>, bool),
/// Pass the argument indirectly via a hidden pointer.
/// The `extra_attrs` value, if any, is for the extra data (vtable or length)
/// which indicates that it refers to an unsized rvalue.
@@ -464,10 +464,6 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct ArgAbi<'a, Ty> {
pub layout: TyAndLayout<'a, Ty>,
-
- /// Dummy argument, which is emitted before the real argument.
- pub pad: Option<Reg>,
-
pub mode: PassMode,
}
@@ -487,7 +483,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
};
- ArgAbi { layout, pad: None, mode }
+ ArgAbi { layout, mode }
}
fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
@@ -549,11 +545,11 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
}
pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
- self.mode = PassMode::Cast(target.into());
+ self.mode = PassMode::Cast(Box::new(target.into()), false);
}
- pub fn pad_with(&mut self, reg: Reg) {
- self.pad = Some(reg);
+ pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
+ self.mode = PassMode::Cast(Box::new(target.into()), pad_i32);
}
pub fn is_indirect(&self) -> bool {
@@ -615,7 +611,7 @@ pub enum Conv {
#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct FnAbi<'a, Ty> {
/// The LLVM types of each argument.
- pub args: Vec<ArgAbi<'a, Ty>>,
+ pub args: Box<[ArgAbi<'a, Ty>]>,
/// LLVM return type.
pub ret: ArgAbi<'a, Ty>,
@@ -626,7 +622,7 @@ pub struct FnAbi<'a, Ty> {
///
/// Should only be different from args.len() when c_variadic is true.
/// This can be used to know whether an argument is variadic or not.
- pub fixed_count: usize,
+ pub fixed_count: u32,
pub conv: Conv,
@@ -689,7 +685,14 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
}
},
- "aarch64" => aarch64::compute_abi_info(cx, self),
+ "aarch64" => {
+ let param_policy = if cx.target_spec().is_like_osx {
+ aarch64::ParamExtension::ExtendTo32Bits
+ } else {
+ aarch64::ParamExtension::NoExtension
+ };
+ aarch64::compute_abi_info(cx, self, param_policy)
+ }
"amdgpu" => amdgpu::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"avr" => avr::compute_abi_info(self),
@@ -702,7 +705,6 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"msp430" => msp430::compute_abi_info(self),
"sparc" => sparc::compute_abi_info(cx, self),
"sparc64" => sparc64::compute_abi_info(cx, self),
- "nvptx" => nvptx::compute_abi_info(self),
"nvptx64" => {
if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::PtxKernel {
nvptx64::compute_ptx_kernel_abi_info(cx, self)
@@ -732,3 +734,13 @@ impl<'a, Ty> FnAbi<'a, Ty> {
Ok(())
}
}
+
+// Some types are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // These are in alphabetical order, which is easy to maintain.
+ static_assert_size!(ArgAbi<'_, usize>, 56);
+ static_assert_size!(FnAbi<'_, usize>, 80);
+}
diff --git a/compiler/rustc_target/src/abi/call/msp430.rs b/compiler/rustc_target/src/abi/call/msp430.rs
index 0ba73657b..33ef47be0 100644
--- a/compiler/rustc_target/src/abi/call/msp430.rs
+++ b/compiler/rustc_target/src/abi/call/msp430.rs
@@ -30,7 +30,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/nvptx.rs b/compiler/rustc_target/src/abi/call/nvptx.rs
deleted file mode 100644
index 428dd95bb..000000000
--- a/compiler/rustc_target/src/abi/call/nvptx.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// Reference: PTX Writer's Guide to Interoperability
-// https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
-
-use crate::abi::call::{ArgAbi, FnAbi};
-
-fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
- if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
- ret.make_indirect();
- } else {
- ret.extend_integer_width_to(32);
- }
-}
-
-fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
- if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
- arg.make_indirect();
- } else {
- arg.extend_integer_width_to(32);
- }
-}
-
-pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
- if !fn_abi.ret.is_ignore() {
- classify_ret(&mut fn_abi.ret);
- }
-
- for arg in &mut fn_abi.args {
- if arg.is_ignore() {
- continue;
- }
- classify_arg(arg);
- }
-}
diff --git a/compiler/rustc_target/src/abi/call/nvptx64.rs b/compiler/rustc_target/src/abi/call/nvptx64.rs
index fc16f1c97..4abe51cd6 100644
--- a/compiler/rustc_target/src/abi/call/nvptx64.rs
+++ b/compiler/rustc_target/src/abi/call/nvptx64.rs
@@ -38,7 +38,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
@@ -55,7 +55,7 @@ where
panic!("Kernels should not return anything other than () or !");
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/powerpc.rs b/compiler/rustc_target/src/abi/call/powerpc.rs
index 27a5c6d2f..70c32db0a 100644
--- a/compiler/rustc_target/src/abi/call/powerpc.rs
+++ b/compiler/rustc_target/src/abi/call/powerpc.rs
@@ -21,7 +21,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
index c22ef9c8f..359bb8fc0 100644
--- a/compiler/rustc_target/src/abi/call/powerpc64.rs
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -132,7 +132,7 @@ where
classify_ret(cx, &mut fn_abi.ret, abi);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
index 752b44f64..1cb360f83 100644
--- a/compiler/rustc_target/src/abi/call/riscv.rs
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -340,7 +340,7 @@ where
arg,
xlen,
flen,
- i >= fn_abi.fixed_count,
+ i >= fn_abi.fixed_count as usize,
&mut avail_gprs,
&mut avail_fprs,
);
diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs
index 13706e8c2..ea2369281 100644
--- a/compiler/rustc_target/src/abi/call/s390x.rs
+++ b/compiler/rustc_target/src/abi/call/s390x.rs
@@ -48,7 +48,7 @@ where
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/sparc.rs b/compiler/rustc_target/src/abi/call/sparc.rs
index cc4431976..edcd1bab8 100644
--- a/compiler/rustc_target/src/abi/call/sparc.rs
+++ b/compiler/rustc_target/src/abi/call/sparc.rs
@@ -22,10 +22,8 @@ where
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
- arg.cast_to(Uniform { unit: Reg::i32(), total: size });
- if !offset.is_aligned(align) {
- arg.pad_with(Reg::i32());
- }
+ let pad_i32 = !offset.is_aligned(align);
+ arg.cast_to_and_pad_i32(Uniform { unit: Reg::i32(), total: size }, pad_i32);
} else {
arg.extend_integer_width_to(32);
}
@@ -42,7 +40,7 @@ where
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index cc3a0a699..1b74959ad 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -217,7 +217,7 @@ where
classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
index 3237cde10..44427ee53 100644
--- a/compiler/rustc_target/src/abi/call/wasm.rs
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -50,7 +50,7 @@ where
classify_ret(cx, &mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
@@ -66,7 +66,7 @@ pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
index c7d59baf9..7c26335dc 100644
--- a/compiler/rustc_target/src/abi/call/x86.rs
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -49,7 +49,7 @@ where
}
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
@@ -72,7 +72,7 @@ where
let mut free_regs = 2;
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
let attrs = match arg.mode {
PassMode::Ignore
| PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
@@ -81,7 +81,7 @@ where
PassMode::Direct(ref mut attrs) => attrs,
PassMode::Pair(..)
| PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
- | PassMode::Cast(_) => {
+ | PassMode::Cast(..) => {
unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
}
};
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
index a52e01a49..c0c071a61 100644
--- a/compiler/rustc_target/src/abi/call/x86_64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -239,7 +239,7 @@ where
x86_64_arg_or_ret(&mut fn_abi.ret, false);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/x86_win64.rs b/compiler/rustc_target/src/abi/call/x86_win64.rs
index 2aad641b1..1aaf0e511 100644
--- a/compiler/rustc_target/src/abi/call/x86_win64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_win64.rs
@@ -31,7 +31,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
fixup(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 92ce4d91d..ec334e588 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -7,7 +7,7 @@ use crate::spec::Target;
use std::convert::{TryFrom, TryInto};
use std::fmt;
use std::iter::Step;
-use std::num::NonZeroUsize;
+use std::num::{NonZeroUsize, ParseIntError};
use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
use std::str::FromStr;
@@ -69,34 +69,46 @@ impl Default for TargetDataLayout {
}
}
+pub enum TargetDataLayoutErrors<'a> {
+ InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
+ InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
+ MissingAlignment { cause: &'a str },
+ InvalidAlignment { cause: &'a str, err: String },
+ InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
+ InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
+ InvalidBitsSize { err: String },
+}
+
impl TargetDataLayout {
- pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
+ pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
// Parse an address space index from a string.
- let parse_address_space = |s: &str, cause: &str| {
+ let parse_address_space = |s: &'a str, cause: &'a str| {
s.parse::<u32>().map(AddressSpace).map_err(|err| {
- format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
+ TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
})
};
// Parse a bit count from a string.
- let parse_bits = |s: &str, kind: &str, cause: &str| {
- s.parse::<u64>().map_err(|err| {
- format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
+ let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
+ s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
+ kind,
+ bit: s,
+ cause,
+ err,
})
};
// Parse a size string.
- let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
+ let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
// Parse an alignment string.
- let align = |s: &[&str], cause: &str| {
+ let align = |s: &[&'a str], cause: &'a str| {
if s.is_empty() {
- return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
+ return Err(TargetDataLayoutErrors::MissingAlignment { cause });
}
let align_from_bits = |bits| {
- Align::from_bits(bits).map_err(|err| {
- format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
- })
+ Align::from_bits(bits)
+ .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
};
let abi = parse_bits(s[0], "alignment", cause)?;
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
@@ -158,25 +170,24 @@ impl TargetDataLayout {
// Perform consistency checks against the Target information.
if dl.endian != target.endian {
- return Err(format!(
- "inconsistent target specification: \"data-layout\" claims \
- architecture is {}-endian, while \"target-endian\" is `{}`",
- dl.endian.as_str(),
- target.endian.as_str(),
- ));
+ return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
+ dl: dl.endian.as_str(),
+ target: target.endian.as_str(),
+ });
}
let target_pointer_width: u64 = target.pointer_width.into();
if dl.pointer_size.bits() != target_pointer_width {
- return Err(format!(
- "inconsistent target specification: \"data-layout\" claims \
- pointers are {}-bit, while \"target-pointer-width\" is `{}`",
- dl.pointer_size.bits(),
- target.pointer_width
- ));
+ return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
+ pointer_size: dl.pointer_size.bits(),
+ target: target.pointer_width,
+ });
}
- dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
+ dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
+ Ok(bits) => bits,
+ Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
+ };
Ok(dl)
}
@@ -1130,7 +1141,7 @@ pub enum TagEncoding {
/// Niche (values invalid for a type) encoding the discriminant:
/// Discriminant and variant index coincide.
- /// The variant `dataful_variant` contains a niche at an arbitrary
+ /// The variant `untagged_variant` contains a niche at an arbitrary
/// offset (field `tag_field` of the enum), which for a variant with
/// discriminant `d` is set to
/// `(d - niche_variants.start).wrapping_add(niche_start)`.
@@ -1139,7 +1150,7 @@ pub enum TagEncoding {
/// `None` has a null pointer for the second tuple field, and
/// `Some` is the identity function (with a non-null reference).
Niche {
- dataful_variant: VariantIdx,
+ untagged_variant: VariantIdx,
niche_variants: RangeInclusive<VariantIdx>,
niche_start: u128,
},