summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_target
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--compiler/rustc_target/Cargo.toml1
-rw-r--r--compiler/rustc_target/src/abi/call/loongarch.rs342
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs32
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs18
-rw-r--r--compiler/rustc_target/src/abi/mod.rs1330
-rw-r--r--compiler/rustc_target/src/json.rs25
-rw-r--r--compiler/rustc_target/src/lib.rs5
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_darwin.rs19
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios.rs20
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs7
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs22
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_tvos.rs7
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs22
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_nto_qnx_710.rs28
-rw-r--r--compiler/rustc_target/src/spec/abi.rs22
-rw-r--r--compiler/rustc_target/src/spec/aix_base.rs32
-rw-r--r--compiler/rustc_target/src/spec/apple/tests.rs35
-rw-r--r--compiler/rustc_target/src/spec/apple_base.rs157
-rw-r--r--compiler/rustc_target/src/spec/apple_sdk_base.rs72
-rw-r--r--compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv7_apple_ios.rs15
-rw-r--r--compiler/rustc_target/src/spec/armv7k_apple_watchos.rs10
-rw-r--r--compiler/rustc_target/src/spec/armv7s_apple_ios.rs7
-rw-r--r--compiler/rustc_target/src/spec/i386_apple_ios.rs16
-rw-r--r--compiler/rustc_target/src/spec/i686_apple_darwin.rs23
-rw-r--r--compiler/rustc_target/src/spec/linux_kernel_base.rs18
-rw-r--r--compiler/rustc_target/src/spec/mipsel_sony_psx.rs37
-rw-r--r--compiler/rustc_target/src/spec/mod.rs92
-rw-r--r--compiler/rustc_target/src/spec/nto_qnx_base.rs19
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs23
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs2
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs2
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs2
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs2
-rw-r--r--compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs2
-rw-r--r--compiler/rustc_target/src/spec/tests/tests_impl.rs9
-rw-r--r--compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs7
-rw-r--r--compiler/rustc_target/src/spec/wasm32_wasi.rs10
-rw-r--r--compiler/rustc_target/src/spec/wasm_base.rs4
-rw-r--r--compiler/rustc_target/src/spec/windows_gnullvm_base.rs8
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_darwin.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios.rs12
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_tvos.rs8
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs14
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_nto_qnx710.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs28
48 files changed, 981 insertions, 1640 deletions
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index fc37fdb1c..568c916a1 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -7,6 +7,7 @@ edition = "2021"
bitflags = "1.2.1"
tracing = "0.1"
serde_json = "1.0.59"
+rustc_abi = { path = "../rustc_abi" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_feature = { path = "../rustc_feature" }
rustc_index = { path = "../rustc_index" }
diff --git a/compiler/rustc_target/src/abi/call/loongarch.rs b/compiler/rustc_target/src/abi/call/loongarch.rs
new file mode 100644
index 000000000..d29b479de
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/loongarch.rs
@@ -0,0 +1,342 @@
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+enum RegPassKind {
+ Float(Reg),
+ Integer(Reg),
+ Unknown,
+}
+
+#[derive(Copy, Clone)]
+enum FloatConv {
+ FloatPair(Reg, Reg),
+ Float(Reg),
+ MixedPair(Reg, Reg),
+}
+
+#[derive(Copy, Clone)]
+struct CannotUseFpConv;
+
+fn is_loongarch_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+ match arg.layout.abi {
+ Abi::Vector { .. } => true,
+ _ => arg.layout.is_aggregate(),
+ }
+}
+
+fn should_use_fp_conv_helper<'a, Ty, C>(
+ cx: &C,
+ arg_layout: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ field1_kind: &mut RegPassKind,
+ field2_kind: &mut RegPassKind,
+) -> Result<(), CannotUseFpConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ match arg_layout.abi {
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => {
+ if arg_layout.size.bits() > xlen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ (RegPassKind::Float(_), RegPassKind::Unknown) => {
+ *field2_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ abi::F32 | abi::F64 => {
+ if arg_layout.size.bits() > flen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ (_, RegPassKind::Unknown) => {
+ *field2_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ },
+ Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Union(_) => {
+ if !arg_layout.is_zst() {
+ return Err(CannotUseFpConv);
+ }
+ }
+ FieldsShape::Array { count, .. } => {
+ for _ in 0..count {
+ let elem_layout = arg_layout.field(cx, 0);
+ should_use_fp_conv_helper(
+ cx,
+ &elem_layout,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ )?;
+ }
+ }
+ FieldsShape::Arbitrary { .. } => {
+ match arg_layout.variants {
+ abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
+ abi::Variants::Single { .. } => (),
+ }
+ for i in arg_layout.fields.index_by_increasing_offset() {
+ let field = arg_layout.field(cx, i);
+ should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
+ }
+ }
+ },
+ }
+ Ok(())
+}
+
+fn should_use_fp_conv<'a, Ty, C>(
+ cx: &C,
+ arg: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+) -> Option<FloatConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ let mut field1_kind = RegPassKind::Unknown;
+ let mut field2_kind = RegPassKind::Unknown;
+ if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
+ return None;
+ }
+ match (field1_kind, field2_kind) {
+ (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
+ (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ match conv {
+ FloatConv::Float(f) => {
+ arg.cast_to(f);
+ }
+ FloatConv::FloatPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ FloatConv::MixedPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ }
+ return false;
+ }
+
+ let total = arg.layout.size;
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_loongarch_aggregate(arg) {
+ arg.make_indirect();
+ }
+ return true;
+ }
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+ if is_loongarch_aggregate(arg) {
+ if total.bits() <= xlen {
+ arg.cast_to(xlen_reg);
+ } else {
+ arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
+ }
+ return false;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ extend_integer_width(arg, xlen);
+ false
+}
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ is_vararg: bool,
+ avail_gprs: &mut u64,
+ avail_fprs: &mut u64,
+) where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if !is_vararg {
+ match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
+ *avail_fprs -= 1;
+ arg.cast_to(f);
+ return;
+ }
+ Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
+ *avail_fprs -= 2;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
+ *avail_gprs -= 1;
+ *avail_fprs -= 1;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ _ => (),
+ }
+ }
+
+ let total = arg.layout.size;
+ let align = arg.layout.align.abi.bits();
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_loongarch_aggregate(arg) {
+ arg.make_indirect();
+ }
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ let double_xlen_reg = match xlen {
+ 32 => Reg::i64(),
+ 64 => Reg::i128(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ if total.bits() > xlen {
+ let align_regs = align > xlen;
+ if is_loongarch_aggregate(arg) {
+ arg.cast_to(Uniform {
+ unit: if align_regs { double_xlen_reg } else { xlen_reg },
+ total: Size::from_bits(xlen * 2),
+ });
+ }
+ if align_regs && is_vararg {
+ *avail_gprs -= *avail_gprs % 2;
+ }
+ if *avail_gprs >= 2 {
+ *avail_gprs -= 2;
+ } else {
+ *avail_gprs = 0;
+ }
+ return;
+ } else if is_loongarch_aggregate(arg) {
+ arg.cast_to(xlen_reg);
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ if *avail_gprs >= 1 {
+ extend_integer_width(arg, xlen);
+ *avail_gprs -= 1;
+ }
+}
+
+fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
+ if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, _) = scalar.primitive() {
+ // 32-bit integers are always sign-extended
+ if i.size().bits() == 32 && xlen > 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(xlen);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let xlen = cx.data_layout().pointer_size.bits();
+ let flen = match &cx.target_spec().llvm_abiname[..] {
+ "ilp32f" | "lp64f" => 32,
+ "ilp32d" | "lp64d" => 64,
+ _ => 0,
+ };
+
+ let mut avail_gprs = 8;
+ let mut avail_fprs = 8;
+
+ if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+ avail_gprs -= 1;
+ }
+
+ for (i, arg) in fn_abi.args.iter_mut().enumerate() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(
+ cx,
+ arg,
+ xlen,
+ flen,
+ i >= fn_abi.fixed_count as usize,
+ &mut avail_gprs,
+ &mut avail_fprs,
+ );
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 9e5f0e4d1..a5ffaebea 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -3,6 +3,7 @@ use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
use crate::spec::{self, HasTargetSpec};
use rustc_span::Symbol;
use std::fmt;
+use std::str::FromStr;
mod aarch64;
mod amdgpu;
@@ -10,6 +11,7 @@ mod arm;
mod avr;
mod bpf;
mod hexagon;
+mod loongarch;
mod m68k;
mod mips;
mod mips64;
@@ -260,7 +262,7 @@ impl CastTarget {
let mut size = self.rest.total;
for i in 0..self.prefix.iter().count() {
match self.prefix[i] {
- Some(v) => size += Size { raw: v.size.bytes() },
+ Some(v) => size += v.size,
None => {}
}
}
@@ -696,6 +698,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"amdgpu" => amdgpu::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"avr" => avr::compute_abi_info(self),
+ "loongarch64" => loongarch::compute_abi_info(cx, self),
"m68k" => m68k::compute_abi_info(self),
"mips" => mips::compute_abi_info(cx, self),
"mips64" => mips64::compute_abi_info(cx, self),
@@ -735,6 +738,33 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
}
+impl FromStr for Conv {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "C" => Ok(Conv::C),
+ "Rust" => Ok(Conv::Rust),
+ "RustCold" => Ok(Conv::Rust),
+ "ArmAapcs" => Ok(Conv::ArmAapcs),
+ "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
+ "Msp430Intr" => Ok(Conv::Msp430Intr),
+ "PtxKernel" => Ok(Conv::PtxKernel),
+ "X86Fastcall" => Ok(Conv::X86Fastcall),
+ "X86Intr" => Ok(Conv::X86Intr),
+ "X86Stdcall" => Ok(Conv::X86Stdcall),
+ "X86ThisCall" => Ok(Conv::X86ThisCall),
+ "X86VectorCall" => Ok(Conv::X86VectorCall),
+ "X86_64SysV" => Ok(Conv::X86_64SysV),
+ "X86_64Win64" => Ok(Conv::X86_64Win64),
+ "AmdGpuKernel" => Ok(Conv::AmdGpuKernel),
+ "AvrInterrupt" => Ok(Conv::AvrInterrupt),
+ "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
+ _ => Err(format!("'{}' is not a valid value for entry function call convetion.", s)),
+ }
+ }
+}
+
// Some types are used a lot. Make sure they don't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index 1b74959ad..c8b6ac5ae 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -78,7 +78,7 @@ fn arg_scalar_pair<C>(
where
C: HasDataLayout,
{
- data = arg_scalar(cx, &scalar1, offset, data);
+ data = arg_scalar(cx, scalar1, offset, data);
match (scalar1.primitive(), scalar2.primitive()) {
(abi::F32, _) => offset += Reg::f32().size,
(_, abi::F64) => offset += Reg::f64().size,
@@ -87,10 +87,10 @@ where
_ => {}
}
- if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
- offset.raw += 4 - (offset.raw % 4);
+ if (offset.bytes() % 4) != 0 && scalar2.primitive().is_float() {
+ offset += Size::from_bytes(4 - (offset.bytes() % 4));
}
- data = arg_scalar(cx, &scalar2, offset, data);
+ data = arg_scalar(cx, scalar2, offset, data);
return data;
}
@@ -169,14 +169,14 @@ where
has_float: false,
arg_attribute: ArgAttribute::default(),
},
- Size { raw: 0 },
+ Size::ZERO,
);
if data.has_float {
// Structure { float, int, int } doesn't like to be handled like
// { float, long int }. Other way around it doesn't mind.
if data.last_offset < arg.layout.size
- && (data.last_offset.raw % 8) != 0
+ && (data.last_offset.bytes() % 8) != 0
&& data.prefix_index < data.prefix.len()
{
data.prefix[data.prefix_index] = Some(Reg::i32());
@@ -185,7 +185,7 @@ where
}
let mut rest_size = arg.layout.size - data.last_offset;
- if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+ if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
data.prefix[data.prefix_index] = Some(Reg::i32());
rest_size = rest_size - Reg::i32().size;
}
@@ -214,13 +214,13 @@ where
C: HasDataLayout,
{
if !fn_abi.ret.is_ignore() {
- classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+ classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
}
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
- classify_arg(cx, arg, Size { raw: 16 });
+ classify_arg(cx, arg, Size::from_bytes(16));
}
}
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 7171ca7bf..53c9878ab 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -2,284 +2,16 @@ pub use Integer::*;
pub use Primitive::*;
use crate::json::{Json, ToJson};
-use crate::spec::Target;
-use std::convert::{TryFrom, TryInto};
use std::fmt;
-use std::iter::Step;
-use std::num::{NonZeroUsize, ParseIntError};
-use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
-use std::str::FromStr;
+use std::ops::Deref;
use rustc_data_structures::intern::Interned;
-use rustc_index::vec::{Idx, IndexVec};
use rustc_macros::HashStable_Generic;
pub mod call;
-/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
-/// for a target, which contains everything needed to compute layouts.
-pub struct TargetDataLayout {
- pub endian: Endian,
- pub i1_align: AbiAndPrefAlign,
- pub i8_align: AbiAndPrefAlign,
- pub i16_align: AbiAndPrefAlign,
- pub i32_align: AbiAndPrefAlign,
- pub i64_align: AbiAndPrefAlign,
- pub i128_align: AbiAndPrefAlign,
- pub f32_align: AbiAndPrefAlign,
- pub f64_align: AbiAndPrefAlign,
- pub pointer_size: Size,
- pub pointer_align: AbiAndPrefAlign,
- pub aggregate_align: AbiAndPrefAlign,
-
- /// Alignments for vector types.
- pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
-
- pub instruction_address_space: AddressSpace,
-
- /// Minimum size of #[repr(C)] enums (default I32 bits)
- pub c_enum_min_size: Integer,
-}
-
-impl Default for TargetDataLayout {
- /// Creates an instance of `TargetDataLayout`.
- fn default() -> TargetDataLayout {
- let align = |bits| Align::from_bits(bits).unwrap();
- TargetDataLayout {
- endian: Endian::Big,
- i1_align: AbiAndPrefAlign::new(align(8)),
- i8_align: AbiAndPrefAlign::new(align(8)),
- i16_align: AbiAndPrefAlign::new(align(16)),
- i32_align: AbiAndPrefAlign::new(align(32)),
- i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
- i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
- f32_align: AbiAndPrefAlign::new(align(32)),
- f64_align: AbiAndPrefAlign::new(align(64)),
- pointer_size: Size::from_bits(64),
- pointer_align: AbiAndPrefAlign::new(align(64)),
- aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
- vector_align: vec![
- (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
- (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
- ],
- instruction_address_space: AddressSpace::DATA,
- c_enum_min_size: Integer::I32,
- }
- }
-}
-
-pub enum TargetDataLayoutErrors<'a> {
- InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
- InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
- MissingAlignment { cause: &'a str },
- InvalidAlignment { cause: &'a str, err: String },
- InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
- InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
- InvalidBitsSize { err: String },
-}
-
-impl TargetDataLayout {
- pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
- // Parse an address space index from a string.
- let parse_address_space = |s: &'a str, cause: &'a str| {
- s.parse::<u32>().map(AddressSpace).map_err(|err| {
- TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
- })
- };
-
- // Parse a bit count from a string.
- let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
- s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
- kind,
- bit: s,
- cause,
- err,
- })
- };
-
- // Parse a size string.
- let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
-
- // Parse an alignment string.
- let align = |s: &[&'a str], cause: &'a str| {
- if s.is_empty() {
- return Err(TargetDataLayoutErrors::MissingAlignment { cause });
- }
- let align_from_bits = |bits| {
- Align::from_bits(bits)
- .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
- };
- let abi = parse_bits(s[0], "alignment", cause)?;
- let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
- Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
- };
-
- let mut dl = TargetDataLayout::default();
- let mut i128_align_src = 64;
- for spec in target.data_layout.split('-') {
- let spec_parts = spec.split(':').collect::<Vec<_>>();
-
- match &*spec_parts {
- ["e"] => dl.endian = Endian::Little,
- ["E"] => dl.endian = Endian::Big,
- [p] if p.starts_with('P') => {
- dl.instruction_address_space = parse_address_space(&p[1..], "P")?
- }
- ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
- ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
- ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
- [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
- dl.pointer_size = size(s, p)?;
- dl.pointer_align = align(a, p)?;
- }
- [s, ref a @ ..] if s.starts_with('i') => {
- let Ok(bits) = s[1..].parse::<u64>() else {
- size(&s[1..], "i")?; // For the user error.
- continue;
- };
- let a = align(a, s)?;
- match bits {
- 1 => dl.i1_align = a,
- 8 => dl.i8_align = a,
- 16 => dl.i16_align = a,
- 32 => dl.i32_align = a,
- 64 => dl.i64_align = a,
- _ => {}
- }
- if bits >= i128_align_src && bits <= 128 {
- // Default alignment for i128 is decided by taking the alignment of
- // largest-sized i{64..=128}.
- i128_align_src = bits;
- dl.i128_align = a;
- }
- }
- [s, ref a @ ..] if s.starts_with('v') => {
- let v_size = size(&s[1..], "v")?;
- let a = align(a, s)?;
- if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
- v.1 = a;
- continue;
- }
- // No existing entry, add a new one.
- dl.vector_align.push((v_size, a));
- }
- _ => {} // Ignore everything else.
- }
- }
-
- // Perform consistency checks against the Target information.
- if dl.endian != target.endian {
- return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
- dl: dl.endian.as_str(),
- target: target.endian.as_str(),
- });
- }
-
- let target_pointer_width: u64 = target.pointer_width.into();
- if dl.pointer_size.bits() != target_pointer_width {
- return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
- pointer_size: dl.pointer_size.bits(),
- target: target.pointer_width,
- });
- }
-
- dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
- Ok(bits) => bits,
- Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
- };
-
- Ok(dl)
- }
-
- /// Returns exclusive upper bound on object size.
- ///
- /// The theoretical maximum object size is defined as the maximum positive `isize` value.
- /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
- /// index every address within an object along with one byte past the end, along with allowing
- /// `isize` to store the difference between any two pointers into an object.
- ///
- /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
- /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
- /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
- /// address space on 64-bit ARMv8 and x86_64.
- #[inline]
- pub fn obj_size_bound(&self) -> u64 {
- match self.pointer_size.bits() {
- 16 => 1 << 15,
- 32 => 1 << 31,
- 64 => 1 << 47,
- bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
- }
- }
-
- #[inline]
- pub fn ptr_sized_integer(&self) -> Integer {
- match self.pointer_size.bits() {
- 16 => I16,
- 32 => I32,
- 64 => I64,
- bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
- }
- }
-
- #[inline]
- pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
- for &(size, align) in &self.vector_align {
- if size == vec_size {
- return align;
- }
- }
- // Default to natural alignment, which is what LLVM does.
- // That is, use the size, rounded up to a power of 2.
- AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
- }
-}
-
-pub trait HasDataLayout {
- fn data_layout(&self) -> &TargetDataLayout;
-}
-
-impl HasDataLayout for TargetDataLayout {
- #[inline]
- fn data_layout(&self) -> &TargetDataLayout {
- self
- }
-}
-
-/// Endianness of the target, which must match cfg(target-endian).
-#[derive(Copy, Clone, PartialEq)]
-pub enum Endian {
- Little,
- Big,
-}
-
-impl Endian {
- pub fn as_str(&self) -> &'static str {
- match self {
- Self::Little => "little",
- Self::Big => "big",
- }
- }
-}
-
-impl fmt::Debug for Endian {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(self.as_str())
- }
-}
-
-impl FromStr for Endian {
- type Err = String;
-
- fn from_str(s: &str) -> Result<Self, Self::Err> {
- match s {
- "little" => Ok(Self::Little),
- "big" => Ok(Self::Big),
- _ => Err(format!(r#"unknown endian: "{}""#, s)),
- }
- }
-}
+pub use rustc_abi::*;
impl ToJson for Endian {
fn to_json(&self) -> Json {
@@ -287,1025 +19,15 @@ impl ToJson for Endian {
}
}
-/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
-pub struct Size {
- raw: u64,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Size {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "Size({} bytes)", self.bytes())
- }
-}
-
-impl Size {
- pub const ZERO: Size = Size { raw: 0 };
-
- /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
- /// not a multiple of 8.
- pub fn from_bits(bits: impl TryInto<u64>) -> Size {
- let bits = bits.try_into().ok().unwrap();
- // Avoid potential overflow from `bits + 7`.
- Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
- }
-
- #[inline]
- pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
- let bytes: u64 = bytes.try_into().ok().unwrap();
- Size { raw: bytes }
- }
-
- #[inline]
- pub fn bytes(self) -> u64 {
- self.raw
- }
-
- #[inline]
- pub fn bytes_usize(self) -> usize {
- self.bytes().try_into().unwrap()
- }
-
- #[inline]
- pub fn bits(self) -> u64 {
- #[cold]
- fn overflow(bytes: u64) -> ! {
- panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
- }
-
- self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
- }
-
- #[inline]
- pub fn bits_usize(self) -> usize {
- self.bits().try_into().unwrap()
- }
-
- #[inline]
- pub fn align_to(self, align: Align) -> Size {
- let mask = align.bytes() - 1;
- Size::from_bytes((self.bytes() + mask) & !mask)
- }
-
- #[inline]
- pub fn is_aligned(self, align: Align) -> bool {
- let mask = align.bytes() - 1;
- self.bytes() & mask == 0
- }
-
- #[inline]
- pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
- let dl = cx.data_layout();
-
- let bytes = self.bytes().checked_add(offset.bytes())?;
-
- if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
- }
-
- #[inline]
- pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
- let dl = cx.data_layout();
-
- let bytes = self.bytes().checked_mul(count)?;
- if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
- }
-
- /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
- /// (i.e., if it is negative, fill with 1's on the left).
- #[inline]
- pub fn sign_extend(self, value: u128) -> u128 {
- let size = self.bits();
- if size == 0 {
- // Truncated until nothing is left.
- return 0;
- }
- // Sign-extend it.
- let shift = 128 - size;
- // Shift the unsigned value to the left, then shift back to the right as signed
- // (essentially fills with sign bit on the left).
- (((value << shift) as i128) >> shift) as u128
- }
-
- /// Truncates `value` to `self` bits.
- #[inline]
- pub fn truncate(self, value: u128) -> u128 {
- let size = self.bits();
- if size == 0 {
- // Truncated until nothing is left.
- return 0;
- }
- let shift = 128 - size;
- // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
- (value << shift) >> shift
- }
-
- #[inline]
- pub fn signed_int_min(&self) -> i128 {
- self.sign_extend(1_u128 << (self.bits() - 1)) as i128
- }
-
- #[inline]
- pub fn signed_int_max(&self) -> i128 {
- i128::MAX >> (128 - self.bits())
- }
-
- #[inline]
- pub fn unsigned_int_max(&self) -> u128 {
- u128::MAX >> (128 - self.bits())
- }
-}
-
-// Panicking addition, subtraction and multiplication for convenience.
-// Avoid during layout computation, return `LayoutError` instead.
-
-impl Add for Size {
- type Output = Size;
- #[inline]
- fn add(self, other: Size) -> Size {
- Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
- panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
- }))
- }
-}
-
-impl Sub for Size {
- type Output = Size;
- #[inline]
- fn sub(self, other: Size) -> Size {
- Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
- panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
- }))
- }
-}
-
-impl Mul<Size> for u64 {
- type Output = Size;
- #[inline]
- fn mul(self, size: Size) -> Size {
- size * self
- }
-}
-
-impl Mul<u64> for Size {
- type Output = Size;
- #[inline]
- fn mul(self, count: u64) -> Size {
- match self.bytes().checked_mul(count) {
- Some(bytes) => Size::from_bytes(bytes),
- None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
- }
- }
-}
-
-impl AddAssign for Size {
- #[inline]
- fn add_assign(&mut self, other: Size) {
- *self = *self + other;
- }
-}
-
-impl Step for Size {
- #[inline]
- fn steps_between(start: &Self, end: &Self) -> Option<usize> {
- u64::steps_between(&start.bytes(), &end.bytes())
- }
-
- #[inline]
- fn forward_checked(start: Self, count: usize) -> Option<Self> {
- u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
- }
-
- #[inline]
- fn forward(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::forward(start.bytes(), count))
- }
-
- #[inline]
- unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
- }
-
- #[inline]
- fn backward_checked(start: Self, count: usize) -> Option<Self> {
- u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
- }
-
- #[inline]
- fn backward(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::backward(start.bytes(), count))
- }
-
- #[inline]
- unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
- Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
- }
-}
-
-/// Alignment of a type in bytes (always a power of two).
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
-pub struct Align {
- pow2: u8,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Align {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "Align({} bytes)", self.bytes())
- }
-}
-
-impl Align {
- pub const ONE: Align = Align { pow2: 0 };
- pub const MAX: Align = Align { pow2: 29 };
-
- #[inline]
- pub fn from_bits(bits: u64) -> Result<Align, String> {
- Align::from_bytes(Size::from_bits(bits).bytes())
- }
-
- #[inline]
- pub fn from_bytes(align: u64) -> Result<Align, String> {
- // Treat an alignment of 0 bytes like 1-byte alignment.
- if align == 0 {
- return Ok(Align::ONE);
- }
-
- #[cold]
- fn not_power_of_2(align: u64) -> String {
- format!("`{}` is not a power of 2", align)
- }
-
- #[cold]
- fn too_large(align: u64) -> String {
- format!("`{}` is too large", align)
- }
-
- let mut bytes = align;
- let mut pow2: u8 = 0;
- while (bytes & 1) == 0 {
- pow2 += 1;
- bytes >>= 1;
- }
- if bytes != 1 {
- return Err(not_power_of_2(align));
- }
- if pow2 > Self::MAX.pow2 {
- return Err(too_large(align));
- }
-
- Ok(Align { pow2 })
- }
-
- #[inline]
- pub fn bytes(self) -> u64 {
- 1 << self.pow2
- }
-
- #[inline]
- pub fn bits(self) -> u64 {
- self.bytes() * 8
- }
-
- /// Computes the best alignment possible for the given offset
- /// (the largest power of two that the offset is a multiple of).
- ///
- /// N.B., for an offset of `0`, this happens to return `2^64`.
- #[inline]
- pub fn max_for_offset(offset: Size) -> Align {
- Align { pow2: offset.bytes().trailing_zeros() as u8 }
- }
-
- /// Lower the alignment, if necessary, such that the given offset
- /// is aligned to it (the offset is a multiple of the alignment).
- #[inline]
- pub fn restrict_for_offset(self, offset: Size) -> Align {
- self.min(Align::max_for_offset(offset))
- }
-}
-
-/// A pair of alignments, ABI-mandated and preferred.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
-pub struct AbiAndPrefAlign {
- pub abi: Align,
- pub pref: Align,
-}
-
-impl AbiAndPrefAlign {
- #[inline]
- pub fn new(align: Align) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: align, pref: align }
- }
-
- #[inline]
- pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
- }
-
- #[inline]
- pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
- }
-}
-
-/// Integers, also used for enum discriminants.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
-pub enum Integer {
- I8,
- I16,
- I32,
- I64,
- I128,
-}
-
-impl Integer {
- #[inline]
- pub fn size(self) -> Size {
- match self {
- I8 => Size::from_bytes(1),
- I16 => Size::from_bytes(2),
- I32 => Size::from_bytes(4),
- I64 => Size::from_bytes(8),
- I128 => Size::from_bytes(16),
- }
- }
-
- pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
- let dl = cx.data_layout();
-
- match self {
- I8 => dl.i8_align,
- I16 => dl.i16_align,
- I32 => dl.i32_align,
- I64 => dl.i64_align,
- I128 => dl.i128_align,
- }
- }
-
- /// Finds the smallest Integer type which can represent the signed value.
- #[inline]
- pub fn fit_signed(x: i128) -> Integer {
- match x {
- -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
- -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
- -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
- -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
- _ => I128,
- }
- }
-
- /// Finds the smallest Integer type which can represent the unsigned value.
- #[inline]
- pub fn fit_unsigned(x: u128) -> Integer {
- match x {
- 0..=0x0000_0000_0000_00ff => I8,
- 0..=0x0000_0000_0000_ffff => I16,
- 0..=0x0000_0000_ffff_ffff => I32,
- 0..=0xffff_ffff_ffff_ffff => I64,
- _ => I128,
- }
- }
-
- /// Finds the smallest integer with the given alignment.
- pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
- let dl = cx.data_layout();
-
- for candidate in [I8, I16, I32, I64, I128] {
- if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
- return Some(candidate);
- }
- }
- None
- }
-
- /// Find the largest integer with the given alignment or less.
- pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
- let dl = cx.data_layout();
-
- // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
- for candidate in [I64, I32, I16] {
- if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
- return candidate;
- }
- }
- I8
- }
-
- // FIXME(eddyb) consolidate this and other methods that find the appropriate
- // `Integer` given some requirements.
- #[inline]
- fn from_size(size: Size) -> Result<Self, String> {
- match size.bits() {
- 8 => Ok(Integer::I8),
- 16 => Ok(Integer::I16),
- 32 => Ok(Integer::I32),
- 64 => Ok(Integer::I64),
- 128 => Ok(Integer::I128),
- _ => Err(format!("rust does not support integers with {} bits", size.bits())),
- }
- }
-}
-
-/// Fundamental unit of memory access and layout.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Primitive {
- /// The `bool` is the signedness of the `Integer` type.
- ///
- /// One would think we would not care about such details this low down,
- /// but some ABIs are described in terms of C types and ISAs where the
- /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
- /// a negative integer passed by zero-extension will appear positive in
- /// the callee, and most operations on it will produce the wrong values.
- Int(Integer, bool),
- F32,
- F64,
- Pointer,
-}
-
-impl Primitive {
- pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
- let dl = cx.data_layout();
-
- match self {
- Int(i, _) => i.size(),
- F32 => Size::from_bits(32),
- F64 => Size::from_bits(64),
- Pointer => dl.pointer_size,
- }
- }
-
- pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
- let dl = cx.data_layout();
-
- match self {
- Int(i, _) => i.align(dl),
- F32 => dl.f32_align,
- F64 => dl.f64_align,
- Pointer => dl.pointer_align,
- }
- }
-
- // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
- #[inline]
- pub fn is_float(self) -> bool {
- matches!(self, F32 | F64)
- }
-
- // FIXME(eddyb) remove, it's completely unused.
- #[inline]
- pub fn is_int(self) -> bool {
- matches!(self, Int(..))
- }
-
- #[inline]
- pub fn is_ptr(self) -> bool {
- matches!(self, Pointer)
- }
-}
-
-/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
-///
-/// That is, for an i8 primitive, a range of `254..=2` means following
-/// sequence:
-///
-/// 254 (-2), 255 (-1), 0, 1, 2
-///
-/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-#[derive(HashStable_Generic)]
-pub struct WrappingRange {
- pub start: u128,
- pub end: u128,
-}
-
-impl WrappingRange {
- pub fn full(size: Size) -> Self {
- Self { start: 0, end: size.unsigned_int_max() }
- }
-
- /// Returns `true` if `v` is contained in the range.
- #[inline(always)]
- pub fn contains(&self, v: u128) -> bool {
- if self.start <= self.end {
- self.start <= v && v <= self.end
- } else {
- self.start <= v || v <= self.end
- }
- }
-
- /// Returns `self` with replaced `start`
- #[inline(always)]
- pub fn with_start(mut self, start: u128) -> Self {
- self.start = start;
- self
- }
-
- /// Returns `self` with replaced `end`
- #[inline(always)]
- pub fn with_end(mut self, end: u128) -> Self {
- self.end = end;
- self
- }
-
- /// Returns `true` if `size` completely fills the range.
- #[inline]
- pub fn is_full_for(&self, size: Size) -> bool {
- let max_value = size.unsigned_int_max();
- debug_assert!(self.start <= max_value && self.end <= max_value);
- self.start == (self.end.wrapping_add(1) & max_value)
- }
-}
-
-impl fmt::Debug for WrappingRange {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.start > self.end {
- write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
- } else {
- write!(fmt, "{}..={}", self.start, self.end)?;
- }
- Ok(())
- }
-}
-
-/// Information about one scalar component of a Rust type.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
-pub enum Scalar {
- Initialized {
- value: Primitive,
-
- // FIXME(eddyb) always use the shortest range, e.g., by finding
- // the largest space between two consecutive valid values and
- // taking everything else as the (shortest) valid range.
- valid_range: WrappingRange,
- },
- Union {
- /// Even for unions, we need to use the correct registers for the kind of
- /// values inside the union, so we keep the `Primitive` type around. We
- /// also use it to compute the size of the scalar.
- /// However, unions never have niches and even allow undef,
- /// so there is no `valid_range`.
- value: Primitive,
- },
-}
-
-impl Scalar {
- #[inline]
- pub fn is_bool(&self) -> bool {
- matches!(
- self,
- Scalar::Initialized {
- value: Int(I8, false),
- valid_range: WrappingRange { start: 0, end: 1 }
- }
- )
- }
-
- /// Get the primitive representation of this type, ignoring the valid range and whether the
- /// value is allowed to be undefined (due to being a union).
- pub fn primitive(&self) -> Primitive {
- match *self {
- Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
- }
- }
-
- pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
- self.primitive().align(cx)
- }
-
- pub fn size(self, cx: &impl HasDataLayout) -> Size {
- self.primitive().size(cx)
- }
-
- #[inline]
- pub fn to_union(&self) -> Self {
- Self::Union { value: self.primitive() }
- }
-
- #[inline]
- pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
- match *self {
- Scalar::Initialized { valid_range, .. } => valid_range,
- Scalar::Union { value } => WrappingRange::full(value.size(cx)),
- }
- }
-
- #[inline]
- /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
- pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
- match self {
- Scalar::Initialized { valid_range, .. } => valid_range,
- Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
- }
- }
-
- /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
- #[inline]
- pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
- match *self {
- Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
- Scalar::Union { .. } => true,
- }
- }
-
- /// Returns `true` if this type can be left uninit.
- #[inline]
- pub fn is_uninit_valid(&self) -> bool {
- match *self {
- Scalar::Initialized { .. } => false,
- Scalar::Union { .. } => true,
- }
- }
-}
-
-/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum FieldsShape {
- /// Scalar primitives and `!`, which never have fields.
- Primitive,
-
- /// All fields start at no offset. The `usize` is the field count.
- Union(NonZeroUsize),
-
- /// Array/vector-like placement, with all fields of identical types.
- Array { stride: Size, count: u64 },
-
- /// Struct-like placement, with precomputed offsets.
- ///
- /// Fields are guaranteed to not overlap, but note that gaps
- /// before, between and after all the fields are NOT always
- /// padding, and as such their contents may not be discarded.
- /// For example, enum variants leave a gap at the start,
- /// where the discriminant field in the enum layout goes.
- Arbitrary {
- /// Offsets for the first byte of each field,
- /// ordered to match the source definition order.
- /// This vector does not go in increasing order.
- // FIXME(eddyb) use small vector optimization for the common case.
- offsets: Vec<Size>,
-
- /// Maps source order field indices to memory order indices,
- /// depending on how the fields were reordered (if at all).
- /// This is a permutation, with both the source order and the
- /// memory order using the same (0..n) index ranges.
- ///
- /// Note that during computation of `memory_index`, sometimes
- /// it is easier to operate on the inverse mapping (that is,
- /// from memory order to source order), and that is usually
- /// named `inverse_memory_index`.
- ///
- // FIXME(eddyb) build a better abstraction for permutations, if possible.
- // FIXME(camlorn) also consider small vector optimization here.
- memory_index: Vec<u32>,
- },
-}
-
-impl FieldsShape {
- #[inline]
- pub fn count(&self) -> usize {
- match *self {
- FieldsShape::Primitive => 0,
- FieldsShape::Union(count) => count.get(),
- FieldsShape::Array { count, .. } => count.try_into().unwrap(),
- FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
- }
- }
-
- #[inline]
- pub fn offset(&self, i: usize) -> Size {
- match *self {
- FieldsShape::Primitive => {
- unreachable!("FieldsShape::offset: `Primitive`s have no fields")
- }
- FieldsShape::Union(count) => {
- assert!(
- i < count.get(),
- "tried to access field {} of union with {} fields",
- i,
- count
- );
- Size::ZERO
- }
- FieldsShape::Array { stride, count } => {
- let i = u64::try_from(i).unwrap();
- assert!(i < count);
- stride * i
- }
- FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
- }
- }
-
- #[inline]
- pub fn memory_index(&self, i: usize) -> usize {
- match *self {
- FieldsShape::Primitive => {
- unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
- }
- FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
- FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
- }
- }
-
- /// Gets source indices of the fields by increasing offsets.
- #[inline]
- pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
- let mut inverse_small = [0u8; 64];
- let mut inverse_big = vec![];
- let use_small = self.count() <= inverse_small.len();
-
- // We have to write this logic twice in order to keep the array small.
- if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
- if use_small {
- for i in 0..self.count() {
- inverse_small[memory_index[i] as usize] = i as u8;
- }
- } else {
- inverse_big = vec![0; self.count()];
- for i in 0..self.count() {
- inverse_big[memory_index[i] as usize] = i as u32;
- }
- }
- }
-
- (0..self.count()).map(move |i| match *self {
- FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
- FieldsShape::Arbitrary { .. } => {
- if use_small {
- inverse_small[i] as usize
- } else {
- inverse_big[i] as usize
- }
- }
- })
- }
-}
-
-/// An identifier that specifies the address space that some operation
-/// should operate on. Special address spaces have an effect on code generation,
-/// depending on the target and the address spaces it implements.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct AddressSpace(pub u32);
-
-impl AddressSpace {
- /// The default address space, corresponding to data space.
- pub const DATA: Self = AddressSpace(0);
-}
-
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Abi {
- Uninhabited,
- Scalar(Scalar),
- ScalarPair(Scalar, Scalar),
- Vector {
- element: Scalar,
- count: u64,
- },
- Aggregate {
- /// If true, the size is exact, otherwise it's only a lower bound.
- sized: bool,
- },
-}
-
-impl Abi {
- /// Returns `true` if the layout corresponds to an unsized type.
- #[inline]
- pub fn is_unsized(&self) -> bool {
- match *self {
- Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Aggregate { sized } => !sized,
- }
- }
-
- /// Returns `true` if this is a single signed integer scalar
- #[inline]
- pub fn is_signed(&self) -> bool {
- match self {
- Abi::Scalar(scal) => match scal.primitive() {
- Primitive::Int(_, signed) => signed,
- _ => false,
- },
- _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
- }
- }
-
- /// Returns `true` if this is an uninhabited type
- #[inline]
- pub fn is_uninhabited(&self) -> bool {
- matches!(*self, Abi::Uninhabited)
- }
-
- /// Returns `true` is this is a scalar type
- #[inline]
- pub fn is_scalar(&self) -> bool {
- matches!(*self, Abi::Scalar(_))
- }
-}
-
rustc_index::newtype_index! {
pub struct VariantIdx {
derive [HashStable_Generic]
}
}
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Variants<'a> {
- /// Single enum variants, structs/tuples, unions, and all non-ADTs.
- Single { index: VariantIdx },
-
- /// Enum-likes with more than one inhabited variant: each variant comes with
- /// a *discriminant* (usually the same as the variant index but the user can
- /// assign explicit discriminant values). That discriminant is encoded
- /// as a *tag* on the machine. The layout of each variant is
- /// a struct, and they all have space reserved for the tag.
- /// For enums, the tag is the sole field of the layout.
- Multiple {
- tag: Scalar,
- tag_encoding: TagEncoding,
- tag_field: usize,
- variants: IndexVec<VariantIdx, Layout<'a>>,
- },
-}
-
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum TagEncoding {
- /// The tag directly stores the discriminant, but possibly with a smaller layout
- /// (so converting the tag to the discriminant can require sign extension).
- Direct,
-
- /// Niche (values invalid for a type) encoding the discriminant:
- /// Discriminant and variant index coincide.
- /// The variant `untagged_variant` contains a niche at an arbitrary
- /// offset (field `tag_field` of the enum), which for a variant with
- /// discriminant `d` is set to
- /// `(d - niche_variants.start).wrapping_add(niche_start)`.
- ///
- /// For example, `Option<(usize, &T)>` is represented such that
- /// `None` has a null pointer for the second tuple field, and
- /// `Some` is the identity function (with a non-null reference).
- Niche {
- untagged_variant: VariantIdx,
- niche_variants: RangeInclusive<VariantIdx>,
- niche_start: u128,
- },
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub struct Niche {
- pub offset: Size,
- pub value: Primitive,
- pub valid_range: WrappingRange,
-}
-
-impl Niche {
- pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
- let Scalar::Initialized { value, valid_range } = scalar else { return None };
- let niche = Niche { offset, value, valid_range };
- if niche.available(cx) > 0 { Some(niche) } else { None }
- }
-
- pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
- let Self { value, valid_range: v, .. } = *self;
- let size = value.size(cx);
- assert!(size.bits() <= 128);
- let max_value = size.unsigned_int_max();
-
- // Find out how many values are outside the valid range.
- let niche = v.end.wrapping_add(1)..v.start;
- niche.end.wrapping_sub(niche.start) & max_value
- }
-
- pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
- assert!(count > 0);
-
- let Self { value, valid_range: v, .. } = *self;
- let size = value.size(cx);
- assert!(size.bits() <= 128);
- let max_value = size.unsigned_int_max();
-
- let niche = v.end.wrapping_add(1)..v.start;
- let available = niche.end.wrapping_sub(niche.start) & max_value;
- if count > available {
- return None;
- }
-
- // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
- // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
- // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
- // Having `None` in niche zero can enable some special optimizations.
- //
- // Bound selection criteria:
- // 1. Select closest to zero given wrapping semantics.
- // 2. Avoid moving past zero if possible.
- //
- // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
- // If niche zero is already reserved, the selection of bounds are of little interest.
- let move_start = |v: WrappingRange| {
- let start = v.start.wrapping_sub(count) & max_value;
- Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
- };
- let move_end = |v: WrappingRange| {
- let start = v.end.wrapping_add(1) & max_value;
- let end = v.end.wrapping_add(count) & max_value;
- Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
- };
- let distance_end_zero = max_value - v.end;
- if v.start > v.end {
- // zero is unavailable because wrapping occurs
- move_end(v)
- } else if v.start <= distance_end_zero {
- if count <= v.start {
- move_start(v)
- } else {
- // moved past zero, use other bound
- move_end(v)
- }
- } else {
- let end = v.end.wrapping_add(count) & max_value;
- let overshot_zero = (1..=v.end).contains(&end);
- if overshot_zero {
- // moved past zero, use other bound
- move_start(v)
- } else {
- move_end(v)
- }
- }
- }
-}
-
-#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
-pub struct LayoutS<'a> {
- /// Says where the fields are located within the layout.
- pub fields: FieldsShape,
-
- /// Encodes information about multi-variant layouts.
- /// Even with `Multiple` variants, a layout still has its own fields! Those are then
- /// shared between all variants. One of them will be the discriminant,
- /// but e.g. generators can have more.
- ///
- /// To access all fields of this layout, both `fields` and the fields of the active variant
- /// must be taken into account.
- pub variants: Variants<'a>,
-
- /// The `abi` defines how this data is passed between functions, and it defines
- /// value restrictions via `valid_range`.
- ///
- /// Note that this is entirely orthogonal to the recursive structure defined by
- /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
- /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
- /// have to be taken into account to find all fields of this layout.
- pub abi: Abi,
-
- /// The leaf scalar with the largest number of invalid values
- /// (i.e. outside of its `valid_range`), if it exists.
- pub largest_niche: Option<Niche>,
-
- pub align: AbiAndPrefAlign,
- pub size: Size,
-}
-
-impl<'a> LayoutS<'a> {
- pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
- let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
- let size = scalar.size(cx);
- let align = scalar.align(cx);
- LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Primitive,
- abi: Abi::Scalar(scalar),
- largest_niche,
- size,
- align,
- }
- }
-}
-
-impl<'a> fmt::Debug for LayoutS<'a> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // This is how `Layout` used to print before it become
- // `Interned<LayoutS>`. We print it like this to avoid having to update
- // expected output in a lot of tests.
- let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
- f.debug_struct("Layout")
- .field("size", size)
- .field("align", align)
- .field("abi", abi)
- .field("fields", fields)
- .field("largest_niche", largest_niche)
- .field("variants", variants)
- .finish()
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
#[rustc_pass_by_value]
-pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+pub struct Layout<'a>(pub Interned<'a, LayoutS<VariantIdx>>);
impl<'a> fmt::Debug for Layout<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -1319,7 +41,7 @@ impl<'a> Layout<'a> {
&self.0.0.fields
}
- pub fn variants(self) -> &'a Variants<'a> {
+ pub fn variants(self) -> &'a Variants<VariantIdx> {
&self.0.0.variants
}
@@ -1354,47 +76,12 @@ pub struct TyAndLayout<'a, Ty> {
}
impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
- type Target = &'a LayoutS<'a>;
- fn deref(&self) -> &&'a LayoutS<'a> {
+ type Target = &'a LayoutS<VariantIdx>;
+ fn deref(&self) -> &&'a LayoutS<VariantIdx> {
&self.layout.0.0
}
}
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum PointerKind {
- /// Most general case, we know no restrictions to tell LLVM.
- SharedMutable,
-
- /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
- Frozen,
-
- /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
- UniqueBorrowed,
-
- /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
- UniqueBorrowedPinned,
-
- /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
- /// nor `dereferenceable`.
- UniqueOwned,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct PointeeInfo {
- pub size: Size,
- pub align: Align,
- pub safe: Option<PointerKind>,
- pub address_space: AddressSpace,
-}
-
-/// Used in `might_permit_raw_init` to indicate the kind of initialisation
-/// that is checked to be valid
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum InitKind {
- Zero,
- UninitMitigated0x01Fill,
-}
-
/// Trait that needs to be implemented by the higher-level type representation
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
pub trait TyAbiInterface<'a, C>: Sized {
@@ -1490,6 +177,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
self.abi.is_unsized()
}
+ #[inline]
+ pub fn is_sized(&self) -> bool {
+ self.abi.is_sized()
+ }
+
/// Returns `true` if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
match self.abi {
diff --git a/compiler/rustc_target/src/json.rs b/compiler/rustc_target/src/json.rs
index b5d926352..75bb76a9d 100644
--- a/compiler/rustc_target/src/json.rs
+++ b/compiler/rustc_target/src/json.rs
@@ -89,3 +89,28 @@ impl<A: ToJson> ToJson for Option<A> {
}
}
}
+
+impl ToJson for crate::abi::call::Conv {
+ fn to_json(&self) -> Json {
+ let s = match self {
+ Self::C => "C",
+ Self::Rust => "Rust",
+ Self::RustCold => "RustCold",
+ Self::ArmAapcs => "ArmAapcs",
+ Self::CCmseNonSecureCall => "CCmseNonSecureCall",
+ Self::Msp430Intr => "Msp430Intr",
+ Self::PtxKernel => "PtxKernel",
+ Self::X86Fastcall => "X86Fastcall",
+ Self::X86Intr => "X86Intr",
+ Self::X86Stdcall => "X86Stdcall",
+ Self::X86ThisCall => "X86ThisCall",
+ Self::X86VectorCall => "X86VectorCall",
+ Self::X86_64SysV => "X86_64SysV",
+ Self::X86_64Win64 => "X86_64Win64",
+ Self::AmdGpuKernel => "AmdGpuKernel",
+ Self::AvrInterrupt => "AvrInterrupt",
+ Self::AvrNonBlockingInterrupt => "AvrNonBlockingInterrupt",
+ };
+ Json::String(s.to_owned())
+ }
+}
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index aaba0d7f0..b69a0a645 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -35,10 +35,7 @@ pub mod spec;
#[cfg(test)]
mod tests;
-/// Requirements for a `StableHashingContext` to be used in this crate.
-/// This is a hack to allow using the `HashStable_Generic` derive macro
-/// instead of implementing everything in `rustc_middle`.
-pub trait HashStableContext {}
+pub use rustc_abi::HashStableContext;
/// The name of rustc's own place to organize libraries.
///
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
index 6d919a4c2..e72cab629 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
@@ -1,26 +1,23 @@
+use super::apple_base::{macos_llvm_target, opts, Arch};
use crate::spec::{FramePointer, SanitizerSet, Target, TargetOptions};
pub fn target() -> Target {
- let arch = "arm64";
- let mut base = super::apple_base::opts("macos", arch, "");
+ let arch = Arch::Arm64;
+ let mut base = opts("macos", arch);
base.cpu = "apple-a14".into();
base.max_atomic_width = Some(128);
// FIXME: The leak sanitizer currently fails the tests, see #88132.
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::THREAD;
- base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
-
- // Clang automatically chooses a more specific target based on
- // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
- // correctly, we do too.
- let llvm_target = super::apple_base::macos_llvm_target(arch);
-
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
+ // correctly, we do too.
+ llvm_target: macos_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
- arch: "aarch64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
mcount: "\u{1}mcount".into(),
frame_pointer: FramePointer::NonLeaf,
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
index beb904239..b5f9eb125 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
@@ -1,19 +1,17 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{ios_llvm_target, opts, Arch};
use crate::spec::{FramePointer, Target, TargetOptions};
pub fn target() -> Target {
- // Clang automatically chooses a more specific target based on
- // IPHONEOS_DEPLOYMENT_TARGET.
- // This is required for the target to pick the right
- // MACH-O commands, so we do too.
- let arch = "arm64";
- let llvm_target = super::apple_base::ios_llvm_target(arch);
-
+ let arch = Arch::Arm64;
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the target to pick the right
+ // MACH-O commands, so we do too.
+ llvm_target: ios_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
- arch: "aarch64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+neon,+fp-armv8,+apple-a7".into(),
max_atomic_width: Some(128),
@@ -30,7 +28,7 @@ pub fn target() -> Target {
darwinpcs\0\
-Os\0"
.into(),
- ..opts("ios", Arch::Arm64)
+ ..opts("ios", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
index 2d2671549..0009972cf 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
@@ -1,17 +1,18 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target, TargetOptions};
pub fn target() -> Target {
let llvm_target = "arm64-apple-ios14.0-macabi";
- let mut base = opts("ios", Arch::Arm64_macabi);
+ let arch = Arch::Arm64_macabi;
+ let mut base = opts("ios", arch);
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-target", llvm_target]);
Target {
llvm_target: llvm_target.into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
- arch: "aarch64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+neon,+fp-armv8,+apple-a12".into(),
max_atomic_width: Some(128),
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
index b4e135f66..3374755e2 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
@@ -1,21 +1,17 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{ios_sim_llvm_target, opts, Arch};
use crate::spec::{FramePointer, Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("ios", Arch::Arm64_sim);
-
- // Clang automatically chooses a more specific target based on
- // IPHONEOS_DEPLOYMENT_TARGET.
- // This is required for the simulator target to pick the right
- // MACH-O commands, so we do too.
- let arch = "arm64";
- let llvm_target = super::apple_base::ios_sim_llvm_target(arch);
-
+ let arch = Arch::Arm64_sim;
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the simulator target to pick the right
+ // MACH-O commands, so we do too.
+ llvm_target: ios_sim_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
- arch: "aarch64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+neon,+fp-armv8,+apple-a7".into(),
max_atomic_width: Some(128),
@@ -32,7 +28,7 @@ pub fn target() -> Target {
darwinpcs\0\
-Os\0"
.into(),
- ..base
+ ..opts("ios", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs b/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
index 2e31d16dc..bb7c39ff2 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
@@ -1,18 +1,19 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{FramePointer, Target, TargetOptions};
pub fn target() -> Target {
+ let arch = Arch::Arm64;
Target {
llvm_target: "arm64-apple-tvos".into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
- arch: "aarch64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+neon,+fp-armv8,+apple-a7".into(),
max_atomic_width: Some(128),
forces_embed_bitcode: true,
frame_pointer: FramePointer::NonLeaf,
- ..opts("tvos", Arch::Arm64)
+ ..opts("tvos", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs b/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs
index 3059f4214..e4af4127c 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs
@@ -1,21 +1,17 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, watchos_sim_llvm_target, Arch};
use crate::spec::{FramePointer, Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("watchos", Arch::Arm64_sim);
-
- // Clang automatically chooses a more specific target based on
- // WATCHOS_DEPLOYMENT_TARGET.
- // This is required for the simulator target to pick the right
- // MACH-O commands, so we do too.
- let arch = "arm64";
- let llvm_target = super::apple_base::watchos_sim_llvm_target(arch);
-
+ let arch = Arch::Arm64_sim;
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // WATCHOS_DEPLOYMENT_TARGET.
+ // This is required for the simulator target to pick the right
+ // MACH-O commands, so we do too.
+ llvm_target: watchos_sim_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
- arch: "aarch64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+neon,+fp-armv8,+apple-a7".into(),
max_atomic_width: Some(128),
@@ -32,7 +28,7 @@ pub fn target() -> Target {
darwinpcs\0\
-Os\0"
.into(),
- ..base
+ ..opts("watchos", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_nto_qnx_710.rs b/compiler/rustc_target/src/spec/aarch64_unknown_nto_qnx_710.rs
new file mode 100644
index 000000000..916b6137b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_nto_qnx_710.rs
@@ -0,0 +1,28 @@
+use super::nto_qnx_base;
+use crate::spec::{Cc, LinkerFlavor, Lld, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-unknown".into(),
+ pointer_width: 64,
+ // from: https://llvm.org/docs/LangRef.html#data-layout
+ // e = little endian
+ // m:e = ELF mangling: Private symbols get a .L prefix
+ // i8:8:32 = 8-bit-integer, minimum_alignment=8, preferred_alignment=32
+ // i16:16:32 = 16-bit-integer, minimum_alignment=16, preferred_alignment=32
+ // i64:64 = 64-bit-integer, minimum_alignment=64, preferred_alignment=64
+ // i128:128 = 128-bit-integer, minimum_alignment=128, preferred_alignment=128
+ // n32:64 = 32 and 64 are native integer widths; Elements of this set are considered to support most general arithmetic operations efficiently.
+ // S128 = 128 bits are the natural alignment of the stack in bits.
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(128),
+ pre_link_args: TargetOptions::link_args(
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ &["-Vgcc_ntoaarch64le_cxx"],
+ ),
+ ..nto_qnx_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs
index ce45fa139..cb2a0c04c 100644
--- a/compiler/rustc_target/src/spec/abi.rs
+++ b/compiler/rustc_target/src/spec/abi.rs
@@ -40,6 +40,28 @@ pub enum Abi {
RustCold,
}
+impl Abi {
+ pub fn supports_varargs(self) -> bool {
+ // * C and Cdecl obviously support varargs.
+ // * C can be based on SysV64 or Win64, so they must support varargs.
+ // * EfiApi is based on Win64 or C, so it also supports it.
+ //
+ // * Stdcall does not, because it would be impossible for the callee to clean
+ // up the arguments. (callee doesn't know how many arguments are there)
+ // * Same for Fastcall, Vectorcall and Thiscall.
+ // * System can become Stdcall, so is also a no-no.
+ // * Other calling conventions are related to hardware or the compiler itself.
+ match self {
+ Self::C { .. }
+ | Self::Cdecl { .. }
+ | Self::Win64 { .. }
+ | Self::SysV64 { .. }
+ | Self::EfiApi => true,
+ _ => false,
+ }
+ }
+}
+
#[derive(Copy, Clone)]
pub struct AbiData {
abi: Abi,
diff --git a/compiler/rustc_target/src/spec/aix_base.rs b/compiler/rustc_target/src/spec/aix_base.rs
new file mode 100644
index 000000000..c71c4ba2c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aix_base.rs
@@ -0,0 +1,32 @@
+use crate::abi::Endian;
+use crate::spec::{crt_objects, cvs, Cc, CodeModel, LinkOutputKind, LinkerFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ abi: "vec-extabi".into(),
+ code_model: Some(CodeModel::Small),
+ cpu: "pwr7".into(),
+ os: "aix".into(),
+ vendor: "ibm".into(),
+ dynamic_linking: true,
+ endian: Endian::Big,
+ executables: true,
+ archive_format: "aix_big".into(),
+ families: cvs!["unix"],
+ has_rpath: false,
+ has_thread_local: true,
+ crt_static_respected: true,
+ linker_flavor: LinkerFlavor::Unix(Cc::No),
+ linker: Some("ld".into()),
+ eh_frame_header: false,
+ is_like_aix: true,
+ default_dwarf_version: 3,
+ function_sections: true,
+ pre_link_objects: crt_objects::new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["/usr/lib/crt0_64.o", "/usr/lib/crti_64.o"]),
+ (LinkOutputKind::DynamicPicExe, &["/usr/lib/crt0_64.o", "/usr/lib/crti_64.o"]),
+ ]),
+ dll_suffix: ".a".into(),
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/apple/tests.rs b/compiler/rustc_target/src/spec/apple/tests.rs
new file mode 100644
index 000000000..3c90a5e7e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/apple/tests.rs
@@ -0,0 +1,35 @@
+use crate::spec::{
+ aarch64_apple_darwin, aarch64_apple_ios_sim, aarch64_apple_watchos_sim, i686_apple_darwin,
+ x86_64_apple_darwin, x86_64_apple_ios, x86_64_apple_tvos, x86_64_apple_watchos_sim,
+};
+
+#[test]
+fn simulator_targets_set_abi() {
+ let all_sim_targets = [
+ x86_64_apple_ios::target(),
+ x86_64_apple_tvos::target(),
+ x86_64_apple_watchos_sim::target(),
+ aarch64_apple_ios_sim::target(),
+ // Note: There is currently no ARM64 tvOS simulator target
+ aarch64_apple_watchos_sim::target(),
+ ];
+
+ for target in all_sim_targets {
+ assert_eq!(target.abi, "sim")
+ }
+}
+
+#[test]
+fn macos_link_environment_unmodified() {
+ let all_macos_targets = [
+ aarch64_apple_darwin::target(),
+ i686_apple_darwin::target(),
+ x86_64_apple_darwin::target(),
+ ];
+
+ for target in all_macos_targets {
+ // macOS targets should only remove information for cross-compiling, but never
+ // for the host.
+ assert_eq!(target.link_env_remove, crate::spec::cvs!["IPHONEOS_DEPLOYMENT_TARGET"]);
+ }
+}
diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs
index 40bc59ca1..7f8160b5d 100644
--- a/compiler/rustc_target/src/spec/apple_base.rs
+++ b/compiler/rustc_target/src/spec/apple_base.rs
@@ -3,7 +3,78 @@ use std::{borrow::Cow, env};
use crate::spec::{cvs, Cc, DebuginfoKind, FramePointer, LinkArgs};
use crate::spec::{LinkerFlavor, Lld, SplitDebuginfo, StaticCow, TargetOptions};
-fn pre_link_args(os: &'static str, arch: &'static str, abi: &'static str) -> LinkArgs {
+#[cfg(test)]
+#[path = "apple/tests.rs"]
+mod tests;
+
+use Arch::*;
+#[allow(non_camel_case_types)]
+#[derive(Copy, Clone)]
+pub enum Arch {
+ Armv7,
+ Armv7k,
+ Armv7s,
+ Arm64,
+ Arm64_32,
+ I386,
+ I686,
+ X86_64,
+ X86_64_sim,
+ X86_64_macabi,
+ Arm64_macabi,
+ Arm64_sim,
+}
+
+impl Arch {
+ pub fn target_name(self) -> &'static str {
+ match self {
+ Armv7 => "armv7",
+ Armv7k => "armv7k",
+ Armv7s => "armv7s",
+ Arm64 | Arm64_macabi | Arm64_sim => "arm64",
+ Arm64_32 => "arm64_32",
+ I386 => "i386",
+ I686 => "i686",
+ X86_64 | X86_64_sim | X86_64_macabi => "x86_64",
+ }
+ }
+
+ pub fn target_arch(self) -> Cow<'static, str> {
+ Cow::Borrowed(match self {
+ Armv7 | Armv7k | Armv7s => "arm",
+ Arm64 | Arm64_32 | Arm64_macabi | Arm64_sim => "aarch64",
+ I386 | I686 => "x86",
+ X86_64 | X86_64_sim | X86_64_macabi => "x86_64",
+ })
+ }
+
+ fn target_abi(self) -> &'static str {
+ match self {
+ Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | I686 | X86_64 => "",
+ X86_64_macabi | Arm64_macabi => "macabi",
+ // x86_64-apple-ios is a simulator target, even though it isn't
+ // declared that way in the target like the other ones...
+ Arm64_sim | X86_64_sim => "sim",
+ }
+ }
+
+ fn target_cpu(self) -> &'static str {
+ match self {
+ Armv7 => "cortex-a8", // iOS7 is supported on iPhone 4 and higher
+ Armv7k => "cortex-a8",
+ Armv7s => "cortex-a9",
+ Arm64 => "apple-a7",
+ Arm64_32 => "apple-s4",
+ I386 | I686 => "yonah",
+ X86_64 | X86_64_sim => "core2",
+ X86_64_macabi => "core2",
+ Arm64_macabi => "apple-a12",
+ Arm64_sim => "apple-a12",
+ }
+ }
+}
+
+fn pre_link_args(os: &'static str, arch: Arch, abi: &'static str) -> LinkArgs {
let platform_name: StaticCow<str> = match abi {
"sim" => format!("{}-simulator", os).into(),
"macabi" => "mac-catalyst".into(),
@@ -19,6 +90,8 @@ fn pre_link_args(os: &'static str, arch: &'static str, abi: &'static str) -> Lin
}
.into();
+ let arch = arch.target_name();
+
let mut args = TargetOptions::link_args(
LinkerFlavor::Darwin(Cc::No, Lld::No),
&["-arch", arch, "-platform_version"],
@@ -35,24 +108,29 @@ fn pre_link_args(os: &'static str, arch: &'static str, abi: &'static str) -> Lin
args
}
-pub fn opts(os: &'static str, arch: &'static str, abi: &'static str) -> TargetOptions {
- // ELF TLS is only available in macOS 10.7+. If you try to compile for 10.6
+pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
+ // Static TLS is only available in macOS 10.7+. If you try to compile for 10.6
// either the linker will complain if it is used or the binary will end up
// segfaulting at runtime when run on 10.6. Rust by default supports macOS
// 10.7+, but there is a standard environment variable,
// MACOSX_DEPLOYMENT_TARGET, which is used to signal targeting older
// versions of macOS. For example compiling on 10.10 with
// MACOSX_DEPLOYMENT_TARGET set to 10.6 will cause the linker to generate
- // warnings about the usage of ELF TLS.
+ // warnings about the usage of static TLS.
//
- // Here we detect what version is being requested, defaulting to 10.7. ELF
+ // Here we detect what version is being requested, defaulting to 10.7. Static
// TLS is flagged as enabled if it looks to be supported. The architecture
// only matters for default deployment target which is 11.0 for ARM64 and
// 10.7 for everything else.
- let has_thread_local = macos_deployment_target("x86_64") >= (10, 7);
+ let has_thread_local = os == "macos" && macos_deployment_target(Arch::X86_64) >= (10, 7);
+
+ let abi = arch.target_abi();
TargetOptions {
+ abi: abi.into(),
os: os.into(),
+ cpu: arch.target_cpu().into(),
+ link_env_remove: link_env_remove(arch, os),
vendor: "apple".into(),
linker_flavor: LinkerFlavor::Darwin(Cc::Yes, Lld::No),
// macOS has -dead_strip, which doesn't rely on function_sections
@@ -103,46 +181,65 @@ fn deployment_target(var_name: &str) -> Option<(u32, u32)> {
.and_then(|(a, b)| a.parse::<u32>().and_then(|a| b.parse::<u32>().map(|b| (a, b))).ok())
}
-fn macos_default_deployment_target(arch: &str) -> (u32, u32) {
- if arch == "arm64" { (11, 0) } else { (10, 7) }
+fn macos_default_deployment_target(arch: Arch) -> (u32, u32) {
+ // Note: Arm64_sim is not included since macOS has no simulator.
+ if matches!(arch, Arm64 | Arm64_macabi) { (11, 0) } else { (10, 7) }
}
-fn macos_deployment_target(arch: &str) -> (u32, u32) {
+fn macos_deployment_target(arch: Arch) -> (u32, u32) {
deployment_target("MACOSX_DEPLOYMENT_TARGET")
.unwrap_or_else(|| macos_default_deployment_target(arch))
}
-fn macos_lld_platform_version(arch: &str) -> String {
+fn macos_lld_platform_version(arch: Arch) -> String {
let (major, minor) = macos_deployment_target(arch);
format!("{}.{}", major, minor)
}
-pub fn macos_llvm_target(arch: &str) -> String {
+pub fn macos_llvm_target(arch: Arch) -> String {
let (major, minor) = macos_deployment_target(arch);
- format!("{}-apple-macosx{}.{}.0", arch, major, minor)
+ format!("{}-apple-macosx{}.{}.0", arch.target_name(), major, minor)
}
-pub fn macos_link_env_remove() -> Vec<StaticCow<str>> {
- let mut env_remove = Vec::with_capacity(2);
- // Remove the `SDKROOT` environment variable if it's clearly set for the wrong platform, which
- // may occur when we're linking a custom build script while targeting iOS for example.
- if let Ok(sdkroot) = env::var("SDKROOT") {
- if sdkroot.contains("iPhoneOS.platform") || sdkroot.contains("iPhoneSimulator.platform") {
- env_remove.push("SDKROOT".into())
+fn link_env_remove(arch: Arch, os: &'static str) -> StaticCow<[StaticCow<str>]> {
+ // Apple platforms only officially support macOS as a host for any compilation.
+ //
+ // If building for macOS, we go ahead and remove any erronous environment state
+ // that's only applicable to cross-OS compilation. Always leave anything for the
+ // host OS alone though.
+ if os == "macos" {
+ let mut env_remove = Vec::with_capacity(2);
+ // Remove the `SDKROOT` environment variable if it's clearly set for the wrong platform, which
+ // may occur when we're linking a custom build script while targeting iOS for example.
+ if let Ok(sdkroot) = env::var("SDKROOT") {
+ if sdkroot.contains("iPhoneOS.platform") || sdkroot.contains("iPhoneSimulator.platform")
+ {
+ env_remove.push("SDKROOT".into())
+ }
+ }
+ // Additionally, `IPHONEOS_DEPLOYMENT_TARGET` must not be set when using the Xcode linker at
+ // "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld",
+ // although this is apparently ignored when using the linker at "/usr/bin/ld".
+ env_remove.push("IPHONEOS_DEPLOYMENT_TARGET".into());
+ env_remove.into()
+ } else {
+ // Otherwise if cross-compiling for a different OS/SDK, remove any part
+ // of the linking environment that's wrong and reversed.
+ match arch {
+ Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | I686 | X86_64 | X86_64_sim
+ | Arm64_sim => {
+ cvs!["MACOSX_DEPLOYMENT_TARGET"]
+ }
+ X86_64_macabi | Arm64_macabi => cvs!["IPHONEOS_DEPLOYMENT_TARGET"],
}
}
- // Additionally, `IPHONEOS_DEPLOYMENT_TARGET` must not be set when using the Xcode linker at
- // "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld",
- // although this is apparently ignored when using the linker at "/usr/bin/ld".
- env_remove.push("IPHONEOS_DEPLOYMENT_TARGET".into());
- env_remove
}
fn ios_deployment_target() -> (u32, u32) {
deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
}
-pub fn ios_llvm_target(arch: &str) -> String {
+pub fn ios_llvm_target(arch: Arch) -> String {
// Modern iOS tooling extracts information about deployment target
// from LC_BUILD_VERSION. This load command will only be emitted when
// we build with a version specific `llvm_target`, with the version
@@ -150,7 +247,7 @@ pub fn ios_llvm_target(arch: &str) -> String {
// to pick it up (since std and core are still built with the fallback
// of version 7.0 and hence emit the old LC_IPHONE_MIN_VERSION).
let (major, minor) = ios_deployment_target();
- format!("{}-apple-ios{}.{}.0", arch, major, minor)
+ format!("{}-apple-ios{}.{}.0", arch.target_name(), major, minor)
}
fn ios_lld_platform_version() -> String {
@@ -158,9 +255,9 @@ fn ios_lld_platform_version() -> String {
format!("{}.{}", major, minor)
}
-pub fn ios_sim_llvm_target(arch: &str) -> String {
+pub fn ios_sim_llvm_target(arch: Arch) -> String {
let (major, minor) = ios_deployment_target();
- format!("{}-apple-ios{}.{}.0-simulator", arch, major, minor)
+ format!("{}-apple-ios{}.{}.0-simulator", arch.target_name(), major, minor)
}
fn tvos_deployment_target() -> (u32, u32) {
@@ -181,7 +278,7 @@ fn watchos_lld_platform_version() -> String {
format!("{}.{}", major, minor)
}
-pub fn watchos_sim_llvm_target(arch: &str) -> String {
+pub fn watchos_sim_llvm_target(arch: Arch) -> String {
let (major, minor) = watchos_deployment_target();
- format!("{}-apple-watchos{}.{}.0-simulator", arch, major, minor)
+ format!("{}-apple-watchos{}.{}.0-simulator", arch.target_name(), major, minor)
}
diff --git a/compiler/rustc_target/src/spec/apple_sdk_base.rs b/compiler/rustc_target/src/spec/apple_sdk_base.rs
deleted file mode 100644
index 49e302676..000000000
--- a/compiler/rustc_target/src/spec/apple_sdk_base.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-use crate::spec::{cvs, TargetOptions};
-use std::borrow::Cow;
-
-use Arch::*;
-#[allow(non_camel_case_types)]
-#[derive(Copy, Clone)]
-pub enum Arch {
- Armv7,
- Armv7k,
- Armv7s,
- Arm64,
- Arm64_32,
- I386,
- X86_64,
- X86_64_macabi,
- Arm64_macabi,
- Arm64_sim,
-}
-
-fn target_arch_name(arch: Arch) -> &'static str {
- match arch {
- Armv7 => "armv7",
- Armv7k => "armv7k",
- Armv7s => "armv7s",
- Arm64 | Arm64_macabi | Arm64_sim => "arm64",
- Arm64_32 => "arm64_32",
- I386 => "i386",
- X86_64 | X86_64_macabi => "x86_64",
- }
-}
-
-fn target_abi(arch: Arch) -> &'static str {
- match arch {
- Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | X86_64 => "",
- X86_64_macabi | Arm64_macabi => "macabi",
- Arm64_sim => "sim",
- }
-}
-
-fn target_cpu(arch: Arch) -> &'static str {
- match arch {
- Armv7 => "cortex-a8", // iOS7 is supported on iPhone 4 and higher
- Armv7k => "cortex-a8",
- Armv7s => "cortex-a9",
- Arm64 => "apple-a7",
- Arm64_32 => "apple-s4",
- I386 => "yonah",
- X86_64 => "core2",
- X86_64_macabi => "core2",
- Arm64_macabi => "apple-a12",
- Arm64_sim => "apple-a12",
- }
-}
-
-fn link_env_remove(arch: Arch) -> Cow<'static, [Cow<'static, str>]> {
- match arch {
- Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | X86_64 | Arm64_sim => {
- cvs!["MACOSX_DEPLOYMENT_TARGET"]
- }
- X86_64_macabi | Arm64_macabi => cvs!["IPHONEOS_DEPLOYMENT_TARGET"],
- }
-}
-
-pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
- TargetOptions {
- abi: target_abi(arch).into(),
- cpu: target_cpu(arch).into(),
- link_env_remove: link_env_remove(arch),
- has_thread_local: false,
- ..super::apple_base::opts(os, target_arch_name(arch), target_abi(arch))
- }
-}
diff --git a/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs b/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
index cb7f5f2a5..52ee68e75 100644
--- a/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
+++ b/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
@@ -1,4 +1,4 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
@@ -12,6 +12,8 @@ pub fn target() -> Target {
features: "+neon,+fp-armv8,+apple-a7".into(),
max_atomic_width: Some(128),
forces_embed_bitcode: true,
+ dynamic_linking: false,
+ position_independent_executables: true,
// These arguments are not actually invoked - they just have
// to look right to pass App Store validation.
bitcode_llvm_cmdline: "-triple\0\
diff --git a/compiler/rustc_target/src/spec/armv7_apple_ios.rs b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
index 57fd74a36..3259c8547 100644
--- a/compiler/rustc_target/src/spec/armv7_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
@@ -1,18 +1,21 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{ios_llvm_target, opts, Arch};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
- let llvm_target = super::apple_base::ios_llvm_target("armv7");
-
+ let arch = Arch::Armv7;
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the target to pick the right
+ // MACH-O commands, so we do too.
+ llvm_target: ios_llvm_target(arch).into(),
pointer_width: 32,
data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".into(),
- arch: "arm".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+v7,+vfp3,+neon".into(),
max_atomic_width: Some(64),
- ..opts("ios", Arch::Armv7)
+ ..opts("ios", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs b/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs
index af5d1c2ff..6e1d00d1f 100644
--- a/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs
+++ b/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs
@@ -1,17 +1,19 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("watchos", Arch::Armv7k);
+ let arch = Arch::Armv7k;
Target {
llvm_target: "armv7k-apple-watchos".into(),
pointer_width: 32,
data_layout: "e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128".into(),
- arch: "arm".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+v7,+vfp4,+neon".into(),
max_atomic_width: Some(64),
forces_embed_bitcode: true,
+ dynamic_linking: false,
+ position_independent_executables: true,
// These arguments are not actually invoked - they just have
// to look right to pass App Store validation.
bitcode_llvm_cmdline: "-triple\0\
@@ -22,7 +24,7 @@ pub fn target() -> Target {
darwinpcs\0\
-Os\0"
.into(),
- ..base
+ ..opts("watchos", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
index cc17265b2..be4bc6758 100644
--- a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
@@ -1,16 +1,17 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
+ let arch = Arch::Armv7s;
Target {
llvm_target: "armv7s-apple-ios".into(),
pointer_width: 32,
data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".into(),
- arch: "arm".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
features: "+v7,+vfp4,+neon".into(),
max_atomic_width: Some(64),
- ..opts("ios", Arch::Armv7s)
+ ..opts("ios", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/i386_apple_ios.rs b/compiler/rustc_target/src/spec/i386_apple_ios.rs
index b85214a9c..581998161 100644
--- a/compiler/rustc_target/src/spec/i386_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/i386_apple_ios.rs
@@ -1,21 +1,23 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{ios_sim_llvm_target, opts, Arch};
use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("ios", Arch::I386);
- let llvm_target = super::apple_base::ios_sim_llvm_target("i386");
-
+ let arch = Arch::I386;
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the target to pick the right
+ // MACH-O commands, so we do too.
+ llvm_target: ios_sim_llvm_target(arch).into(),
pointer_width: 32,
data_layout: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
f64:32:64-f80:128-n8:16:32-S128"
.into(),
- arch: "x86".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
max_atomic_width: Some(64),
stack_probes: StackProbeType::X86,
- ..base
+ ..opts("ios", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/i686_apple_darwin.rs b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
index 15607c12e..ad22467ba 100644
--- a/compiler/rustc_target/src/spec/i686_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
@@ -1,28 +1,27 @@
+use super::apple_base::{macos_llvm_target, opts, Arch};
use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- // ld64 only understand i386 and not i686
- let mut base = super::apple_base::opts("macos", "i386", "");
- base.cpu = "yonah".into();
+ // ld64 only understands i386 and not i686
+ let arch = Arch::I386;
+ let mut base = opts("macos", arch);
base.max_atomic_width = Some(64);
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-m32"]);
- base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
base.stack_probes = StackProbeType::X86;
base.frame_pointer = FramePointer::Always;
- // Clang automatically chooses a more specific target based on
- // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
- // correctly, we do too.
- let arch = "i686";
- let llvm_target = super::apple_base::macos_llvm_target(&arch);
-
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
+ // correctly, we do too.
+ //
+ // While ld64 doesn't understand i686, LLVM does.
+ llvm_target: macos_llvm_target(Arch::I686).into(),
pointer_width: 32,
data_layout: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
f64:32:64-f80:128-n8:16:32-S128"
.into(),
- arch: "x86".into(),
+ arch: arch.target_arch(),
options: TargetOptions { mcount: "\u{1}mcount".into(), ..base },
}
}
diff --git a/compiler/rustc_target/src/spec/linux_kernel_base.rs b/compiler/rustc_target/src/spec/linux_kernel_base.rs
deleted file mode 100644
index f41533a95..000000000
--- a/compiler/rustc_target/src/spec/linux_kernel_base.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use crate::spec::TargetOptions;
-use crate::spec::{FramePointer, PanicStrategy, RelocModel, RelroLevel, StackProbeType};
-
-pub fn opts() -> TargetOptions {
- TargetOptions {
- env: "gnu".into(),
- disable_redzone: true,
- panic_strategy: PanicStrategy::Abort,
- stack_probes: StackProbeType::X86,
- frame_pointer: FramePointer::Always,
- position_independent_executables: true,
- needs_plt: true,
- relro_level: RelroLevel::Full,
- relocation_model: RelocModel::Static,
-
- ..Default::default()
- }
-}
diff --git a/compiler/rustc_target/src/spec/mipsel_sony_psx.rs b/compiler/rustc_target/src/spec/mipsel_sony_psx.rs
new file mode 100644
index 000000000..12a66efdd
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_sony_psx.rs
@@ -0,0 +1,37 @@
+use crate::spec::{cvs, Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsel-sony-psx".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+
+ options: TargetOptions {
+ os: "none".into(),
+ env: "psx".into(),
+ vendor: "sony".into(),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
+ cpu: "mips1".into(),
+ executables: true,
+ linker: Some("rust-lld".into()),
+ relocation_model: RelocModel::Static,
+ exe_suffix: ".exe".into(),
+
+ // PSX doesn't natively support floats.
+ features: "+soft-float".into(),
+
+ // This should be 16 bits, but LLVM incorrectly tries emitting MIPS-II SYNC instructions
+ // for atomic loads and stores. This crashes rustc so we have to disable the Atomic* API
+ // until this is fixed upstream. See https://reviews.llvm.org/D122427#3420144 for more
+ // info.
+ max_atomic_width: Some(0),
+
+ // PSX does not support trap-on-condition instructions.
+ llvm_args: cvs!["-mno-check-zero-division"],
+ llvm_abiname: "o32".into(),
+ panic_strategy: PanicStrategy::Abort,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 8909cf33a..d05b8aa42 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -34,7 +34,8 @@
//! the target's settings, though `target-feature` and `link-args` will *add*
//! to the list specified by the target, rather than replace.
-use crate::abi::Endian;
+use crate::abi::call::Conv;
+use crate::abi::{Endian, Integer, Size, TargetDataLayout, TargetDataLayoutErrors};
use crate::json::{Json, ToJson};
use crate::spec::abi::{lookup as lookup_abi, Abi};
use crate::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
@@ -57,9 +58,9 @@ use rustc_macros::HashStable_Generic;
pub mod abi;
pub mod crt_objects;
+mod aix_base;
mod android_base;
mod apple_base;
-mod apple_sdk_base;
mod avr_gnu_base;
mod bpf_base;
mod dragonfly_base;
@@ -71,11 +72,11 @@ mod illumos_base;
mod l4re_base;
mod linux_base;
mod linux_gnu_base;
-mod linux_kernel_base;
mod linux_musl_base;
mod linux_uclibc_base;
mod msvc_base;
mod netbsd_base;
+mod nto_qnx_base;
mod openbsd_base;
mod redox_base;
mod solaris_base;
@@ -115,7 +116,7 @@ pub enum Lld {
/// relevant now.
///
/// The second goal is to keep the number of flavors to the minimum if possible.
-/// LLD somewhat forces our hand here because that linker is self-sufficent only if its executable
+/// LLD somewhat forces our hand here because that linker is self-sufficient only if its executable
/// (`argv[0]`) is named in specific way, otherwise it doesn't work and requires a
/// `-flavor LLD_FLAVOR` argument to choose which logic to use. Our shipped `rust-lld` in
/// particular is not named in such specific way, so it needs the flavor option, so we make our
@@ -1003,7 +1004,7 @@ macro_rules! supported_targets {
$(
#[test] // `#[test]`
fn $module() {
- tests_impl::test_target(super::$module::target(), $triple);
+ tests_impl::test_target(super::$module::target());
}
)+
}
@@ -1027,6 +1028,7 @@ supported_targets! {
("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu),
("powerpc-unknown-linux-gnuspe", powerpc_unknown_linux_gnuspe),
("powerpc-unknown-linux-musl", powerpc_unknown_linux_musl),
+ ("powerpc64-ibm-aix", powerpc64_ibm_aix),
("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu),
("powerpc64-unknown-linux-musl", powerpc64_unknown_linux_musl),
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
@@ -1071,8 +1073,6 @@ supported_targets! {
("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
- ("x86_64-unknown-none-linuxkernel", x86_64_unknown_none_linuxkernel),
-
("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
("armv6-unknown-freebsd", armv6_unknown_freebsd),
("armv7-unknown-freebsd", armv7_unknown_freebsd),
@@ -1222,6 +1222,7 @@ supported_targets! {
("armv7a-kmc-solid_asp3-eabihf", armv7a_kmc_solid_asp3_eabihf),
("mipsel-sony-psp", mipsel_sony_psp),
+ ("mipsel-sony-psx", mipsel_sony_psx),
("mipsel-unknown-none", mipsel_unknown_none),
("thumbv4t-none-eabi", thumbv4t_none_eabi),
("armv4t-none-eabi", armv4t_none_eabi),
@@ -1245,6 +1246,9 @@ supported_targets! {
("x86_64-unknown-none", x86_64_unknown_none),
("mips64-openwrt-linux-musl", mips64_openwrt_linux_musl),
+
+ ("aarch64-unknown-nto-qnx710", aarch64_unknown_nto_qnx_710),
+ ("x86_64-pc-nto-qnx710", x86_64_pc_nto_qnx710),
}
/// Cow-Vec-Str: Cow<'static, [Cow<'static, str>]>
@@ -1313,6 +1317,35 @@ pub struct Target {
pub options: TargetOptions,
}
+impl Target {
+ pub fn parse_data_layout<'a>(&'a self) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
+ let mut dl = TargetDataLayout::parse_from_llvm_datalayout_string(&self.data_layout)?;
+
+ // Perform consistency checks against the Target information.
+ if dl.endian != self.endian {
+ return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
+ dl: dl.endian.as_str(),
+ target: self.endian.as_str(),
+ });
+ }
+
+ let target_pointer_width: u64 = self.pointer_width.into();
+ if dl.pointer_size.bits() != target_pointer_width {
+ return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
+ pointer_size: dl.pointer_size.bits(),
+ target: self.pointer_width,
+ });
+ }
+
+ dl.c_enum_min_size = match Integer::from_size(Size::from_bits(self.c_enum_min_bits)) {
+ Ok(bits) => bits,
+ Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
+ };
+
+ Ok(dl)
+ }
+}
+
pub trait HasTargetSpec {
fn target_spec(&self) -> &Target;
}
@@ -1452,6 +1485,9 @@ pub struct TargetOptions {
pub families: StaticCow<[StaticCow<str>]>,
/// Whether the target toolchain's ABI supports returning small structs as an integer.
pub abi_return_struct_as_int: bool,
+ /// Whether the target toolchain is like AIX's. Linker options on AIX are special and it uses
+ /// XCOFF as binary format. Defaults to false.
+ pub is_like_aix: bool,
/// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
/// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
/// Also indiates whether to use Apple-specific ABI changes, such as extending function
@@ -1526,9 +1562,9 @@ pub struct TargetOptions {
/// Flag indicating whether #[thread_local] is available for this target.
pub has_thread_local: bool,
- // This is mainly for easy compatibility with emscripten.
- // If we give emcc .o files that are actually .bc files it
- // will 'just work'.
+ /// This is mainly for easy compatibility with emscripten.
+ /// If we give emcc .o files that are actually .bc files it
+ /// will 'just work'.
pub obj_is_bitcode: bool,
/// Whether the target requires that emitted object code includes bitcode.
pub forces_embed_bitcode: bool,
@@ -1667,6 +1703,14 @@ pub struct TargetOptions {
/// Whether the target supports stack canary checks. `true` by default,
/// since this is most common among tier 1 and tier 2 targets.
pub supports_stack_protector: bool,
+
+ /// The name of entry function.
+ /// Default value is "main"
+ pub entry_name: StaticCow<str>,
+
+ /// The ABI of entry function.
+ /// Default value is `Conv::C`, i.e. C call convention
+ pub entry_abi: Conv,
}
/// Add arguments for the given flavor and also for its "twin" flavors
@@ -1807,6 +1851,7 @@ impl Default for TargetOptions {
staticlib_suffix: ".a".into(),
families: cvs![],
abi_return_struct_as_int: false,
+ is_like_aix: false,
is_like_osx: false,
is_like_solaris: false,
is_like_windows: false,
@@ -1883,6 +1928,8 @@ impl Default for TargetOptions {
c_enum_min_bits: 32,
generate_arange_section: true,
supports_stack_protector: true,
+ entry_name: "main".into(),
+ entry_abi: Conv::C,
}
}
}
@@ -1914,6 +1961,7 @@ impl Target {
Abi::Stdcall { unwind }
}
Abi::System { unwind } => Abi::C { unwind },
+ Abi::EfiApi if self.arch == "arm" => Abi::Aapcs { unwind: false },
Abi::EfiApi if self.arch == "x86_64" => Abi::Win64 { unwind: false },
Abi::EfiApi => Abi::C { unwind: false },
@@ -1940,8 +1988,10 @@ impl Target {
| PlatformIntrinsic
| Unadjusted
| Cdecl { .. }
- | EfiApi
| RustCold => true,
+ EfiApi => {
+ ["arm", "aarch64", "riscv32", "riscv64", "x86", "x86_64"].contains(&&self.arch[..])
+ }
X86Interrupt => ["x86", "x86_64"].contains(&&self.arch[..]),
Aapcs { .. } => "arm" == self.arch,
CCmseNonSecureCall => ["arm", "aarch64"].contains(&&self.arch[..]),
@@ -2400,6 +2450,18 @@ impl Target {
}
}
} );
+ ($key_name:ident, Conv) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match Conv::from_str(s) {
+ Ok(c) => {
+ base.$key_name = c;
+ Some(Ok(()))
+ }
+ Err(e) => Some(Err(e))
+ }
+ })).unwrap_or(Ok(()))
+ } );
}
if let Some(j) = obj.remove("target-endian") {
@@ -2461,6 +2523,7 @@ impl Target {
key!(staticlib_suffix);
key!(families, TargetFamilies);
key!(abi_return_struct_as_int, bool);
+ key!(is_like_aix, bool);
key!(is_like_osx, bool);
key!(is_like_solaris, bool);
key!(is_like_windows, bool);
@@ -2519,6 +2582,8 @@ impl Target {
key!(c_enum_min_bits, u64);
key!(generate_arange_section, bool);
key!(supports_stack_protector, bool);
+ key!(entry_name);
+ key!(entry_abi, Conv)?;
if base.is_builtin {
// This can cause unfortunate ICEs later down the line.
@@ -2593,7 +2658,7 @@ impl Target {
// Additionally look in the sysroot under `lib/rustlib/<triple>/target.json`
// as a fallback.
- let rustlib_path = crate::target_rustlib_path(&sysroot, &target_triple);
+ let rustlib_path = crate::target_rustlib_path(sysroot, target_triple);
let p = PathBuf::from_iter([
Path::new(sysroot),
Path::new(&rustlib_path),
@@ -2712,6 +2777,7 @@ impl ToJson for Target {
target_option_val!(staticlib_suffix);
target_option_val!(families, "target-family");
target_option_val!(abi_return_struct_as_int);
+ target_option_val!(is_like_aix);
target_option_val!(is_like_osx);
target_option_val!(is_like_solaris);
target_option_val!(is_like_windows);
@@ -2769,6 +2835,8 @@ impl ToJson for Target {
target_option_val!(c_enum_min_bits);
target_option_val!(generate_arange_section);
target_option_val!(supports_stack_protector);
+ target_option_val!(entry_name);
+ target_option_val!(entry_abi);
if let Some(abi) = self.default_adjusted_cabi {
d.insert("default-adjusted-cabi".into(), Abi::name(abi).to_json());
diff --git a/compiler/rustc_target/src/spec/nto_qnx_base.rs b/compiler/rustc_target/src/spec/nto_qnx_base.rs
new file mode 100644
index 000000000..6fb581ef5
--- /dev/null
+++ b/compiler/rustc_target/src/spec/nto_qnx_base.rs
@@ -0,0 +1,19 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ crt_static_respected: true,
+ dynamic_linking: true,
+ env: "nto71".into(),
+ executables: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ has_thread_local: false,
+ linker: Some("qcc".into()),
+ os: "nto".into(),
+ position_independent_executables: true,
+ static_position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs b/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs
new file mode 100644
index 000000000..e3eb9bccd
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64_ibm_aix.rs
@@ -0,0 +1,23 @@
+use crate::spec::{Cc, LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::aix_base::opts();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(
+ LinkerFlavor::Unix(Cc::No),
+ &[
+ "-b64".into(),
+ "-bpT:0x100000000".into(),
+ "-bpD:0x110000000".into(),
+ "-bcdtors:all:0:s".into(),
+ ],
+ );
+
+ Target {
+ llvm_target: "powerpc64-ibm-aix".into(),
+ pointer_width: 64,
+ data_layout: "E-m:a-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ arch: "powerpc64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs
index 0539eca6c..8281bac10 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs
@@ -4,7 +4,7 @@ pub fn target() -> Target {
Target {
llvm_target: "riscv64-unknown-freebsd".into(),
pointer_width: 64,
- data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
arch: "riscv64".into(),
options: TargetOptions {
code_model: Some(CodeModel::Medium),
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
index 7d1bf228c..90dccb280 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
@@ -4,7 +4,7 @@ pub fn target() -> Target {
Target {
llvm_target: "riscv64-unknown-linux-gnu".into(),
pointer_width: 64,
- data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
arch: "riscv64".into(),
options: TargetOptions {
code_model: Some(CodeModel::Medium),
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
index f04f8a48b..1a56c78e6 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
@@ -4,7 +4,7 @@ pub fn target() -> Target {
Target {
llvm_target: "riscv64-unknown-linux-musl".into(),
pointer_width: 64,
- data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
arch: "riscv64".into(),
options: TargetOptions {
code_model: Some(CodeModel::Medium),
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
index 67806d578..409b0b269 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
@@ -3,7 +3,7 @@ use crate::spec::{RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
- data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
llvm_target: "riscv64".into(),
pointer_width: 64,
arch: "riscv64".into(),
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs
index cd10f3afa..ade9d7762 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs
@@ -4,7 +4,7 @@ pub fn target() -> Target {
Target {
llvm_target: "riscv64-unknown-openbsd".into(),
pointer_width: 64,
- data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
arch: "riscv64".into(),
options: TargetOptions {
code_model: Some(CodeModel::Medium),
diff --git a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
index f371e09be..87aba9171 100644
--- a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
@@ -3,7 +3,7 @@ use crate::spec::{RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
- data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
llvm_target: "riscv64".into(),
pointer_width: 64,
arch: "riscv64".into(),
diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs
index 172da0ed5..e0ecf8037 100644
--- a/compiler/rustc_target/src/spec/tests/tests_impl.rs
+++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs
@@ -2,15 +2,15 @@ use super::super::*;
use std::assert_matches::assert_matches;
// Test target self-consistency and JSON encoding/decoding roundtrip.
-pub(super) fn test_target(mut target: Target, triple: &str) {
+pub(super) fn test_target(mut target: Target) {
let recycled_target = Target::from_json(target.to_json()).map(|(j, _)| j);
target.update_to_cli();
- target.check_consistency(triple);
+ target.check_consistency();
assert_eq!(recycled_target, Ok(target));
}
impl Target {
- fn check_consistency(&self, triple: &str) {
+ fn check_consistency(&self) {
assert_eq!(self.is_like_osx, self.vendor == "apple");
assert_eq!(self.is_like_solaris, self.os == "solaris" || self.os == "illumos");
assert_eq!(self.is_like_windows, self.os == "windows" || self.os == "uefi");
@@ -129,8 +129,7 @@ impl Target {
if self.dynamic_linking && !(self.is_like_wasm && self.os != "emscripten") {
assert_eq!(self.relocation_model, RelocModel::Pic);
}
- // PIEs are supported but not enabled by default with linuxkernel target.
- if self.position_independent_executables && !triple.ends_with("-linuxkernel") {
+ if self.position_independent_executables {
assert_eq!(self.relocation_model, RelocModel::Pic);
}
// The UEFI targets do not support dynamic linking but still require PIC (#101377).
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
index 8dad941b5..06529c2e4 100644
--- a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
@@ -33,12 +33,6 @@ pub fn target() -> Target {
// For now this target just never has an entry symbol no matter the output
// type, so unconditionally pass this.
"--no-entry",
- // Rust really needs a way for users to specify exports and imports in
- // the source code. --export-dynamic isn't the right tool for this job,
- // however it does have the side effect of automatically exporting a lot
- // of symbols, which approximates what people want when compiling for
- // wasm32-unknown-unknown expect, so use it for now.
- "--export-dynamic",
],
);
options.add_pre_link_args(
@@ -48,7 +42,6 @@ pub fn target() -> Target {
// otherwise
"--target=wasm32-unknown-unknown",
"-Wl,--no-entry",
- "-Wl,--export-dynamic",
],
);
diff --git a/compiler/rustc_target/src/spec/wasm32_wasi.rs b/compiler/rustc_target/src/spec/wasm32_wasi.rs
index 93a956403..a0476d542 100644
--- a/compiler/rustc_target/src/spec/wasm32_wasi.rs
+++ b/compiler/rustc_target/src/spec/wasm32_wasi.rs
@@ -72,7 +72,8 @@
//! best we can with this target. Don't start relying on too much here unless
//! you know what you're getting in to!
-use super::{crt_objects, wasm_base, Cc, LinkerFlavor, Target};
+use super::crt_objects::{self, LinkSelfContainedDefault};
+use super::{wasm_base, Cc, LinkerFlavor, Target};
pub fn target() -> Target {
let mut options = wasm_base::options();
@@ -83,6 +84,9 @@ pub fn target() -> Target {
options.pre_link_objects_self_contained = crt_objects::pre_wasi_self_contained();
options.post_link_objects_self_contained = crt_objects::post_wasi_self_contained();
+ // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
+ options.link_self_contained = LinkSelfContainedDefault::True;
+
// Right now this is a bit of a workaround but we're currently saying that
// the target by default has a static crt which we're taking as a signal
// for "use the bundled crt". If that's turned off then the system's crt
@@ -100,6 +104,10 @@ pub fn target() -> Target {
// `args::args()` makes the WASI API calls itself.
options.main_needs_argc_argv = false;
+ // And, WASI mangles the name of "main" to distinguish between different
+ // signatures.
+ options.entry_name = "__main_void".into();
+
Target {
llvm_target: "wasm32-wasi".into(),
pointer_width: 32,
diff --git a/compiler/rustc_target/src/spec/wasm_base.rs b/compiler/rustc_target/src/spec/wasm_base.rs
index 528a84a8b..625d3b37c 100644
--- a/compiler/rustc_target/src/spec/wasm_base.rs
+++ b/compiler/rustc_target/src/spec/wasm_base.rs
@@ -1,4 +1,3 @@
-use super::crt_objects::LinkSelfContainedDefault;
use super::{cvs, Cc, LinkerFlavor, PanicStrategy, RelocModel, TargetOptions, TlsModel};
pub fn options() -> TargetOptions {
@@ -95,9 +94,6 @@ pub fn options() -> TargetOptions {
pre_link_args,
- // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
- link_self_contained: LinkSelfContainedDefault::True,
-
// This has no effect in LLVM 8 or prior, but in LLVM 9 and later when
// PIC code is implemented this has quite a drastic effect if it stays
// at the default, `pic`. In an effort to keep wasm binaries as minimal
diff --git a/compiler/rustc_target/src/spec/windows_gnullvm_base.rs b/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
index 58210c75a..cada28652 100644
--- a/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
+++ b/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
@@ -1,4 +1,5 @@
-use crate::spec::{cvs, Cc, LinkerFlavor, Lld, TargetOptions};
+use crate::spec::{cvs, Cc, DebuginfoKind, LinkerFlavor, Lld, SplitDebuginfo, TargetOptions};
+use std::borrow::Cow;
pub fn opts() -> TargetOptions {
// We cannot use `-nodefaultlibs` because compiler-rt has to be passed
@@ -36,7 +37,10 @@ pub fn opts() -> TargetOptions {
eh_frame_header: false,
no_default_libraries: false,
has_thread_local: true,
-
+ // FIXME(davidtwco): Support Split DWARF on Windows GNU - may require LLVM changes to
+ // output DWO, despite using DWARF, doesn't use ELF..
+ debuginfo_kind: DebuginfoKind::Pdb,
+ supported_split_debuginfo: Cow::Borrowed(&[SplitDebuginfo::Off]),
..Default::default()
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
index 087be1b95..9a3e7a805 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
@@ -1,29 +1,26 @@
+use super::apple_base::{macos_llvm_target, opts, Arch};
use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, SanitizerSet};
use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let arch = "x86_64";
- let mut base = super::apple_base::opts("macos", arch, "");
- base.cpu = "core2".into();
- base.max_atomic_width = Some(128); // core2 support cmpxchg16b
+ let arch = Arch::X86_64;
+ let mut base = opts("macos", arch);
+ base.max_atomic_width = Some(128); // core2 supports cmpxchg16b
base.frame_pointer = FramePointer::Always;
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-m64"]);
- base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
base.stack_probes = StackProbeType::X86;
base.supported_sanitizers =
SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::LEAK | SanitizerSet::THREAD;
- // Clang automatically chooses a more specific target based on
- // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
- // correctly, we do too.
- let llvm_target = super::apple_base::macos_llvm_target(&arch);
-
Target {
- llvm_target: llvm_target.into(),
+ // Clang automatically chooses a more specific target based on
+ // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
+ // correctly, we do too.
+ llvm_target: macos_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.into(),
- arch: arch.into(),
+ arch: arch.target_arch(),
options: TargetOptions { mcount: "\u{1}mcount".into(), ..base },
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
index e6143025d..fbd3ebd4d 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
@@ -1,20 +1,18 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{ios_sim_llvm_target, opts, Arch};
use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("ios", Arch::X86_64);
- let llvm_target = super::apple_base::ios_sim_llvm_target("x86_64");
-
+ let arch = Arch::X86_64_sim;
Target {
- llvm_target: llvm_target.into(),
+ llvm_target: ios_sim_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.into(),
- arch: "x86_64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
max_atomic_width: Some(64),
stack_probes: StackProbeType::X86,
- ..base
+ ..opts("ios", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
index 13259205a..0f3f85199 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
@@ -1,10 +1,11 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let llvm_target = "x86_64-apple-ios13.0-macabi";
- let mut base = opts("ios", Arch::X86_64_macabi);
+ let arch = Arch::X86_64_macabi;
+ let mut base = opts("ios", arch);
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-target", llvm_target]);
Target {
@@ -12,7 +13,7 @@ pub fn target() -> Target {
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.into(),
- arch: "x86_64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
max_atomic_width: Some(64),
stack_probes: StackProbeType::X86,
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
index 3d54da086..550ce0b9c 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
@@ -1,17 +1,17 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, Arch};
use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("tvos", Arch::X86_64);
+ let arch = Arch::X86_64_sim;
Target {
llvm_target: "x86_64-apple-tvos".into(),
pointer_width: 64,
data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".into(),
- arch: "x86_64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
max_atomic_width: Some(64),
stack_probes: StackProbeType::X86,
- ..base
+ ..opts("tvos", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs b/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
index e499b1985..75ce02cba 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
@@ -1,18 +1,14 @@
-use super::apple_sdk_base::{opts, Arch};
+use super::apple_base::{opts, watchos_sim_llvm_target, Arch};
use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let base = opts("watchos", Arch::X86_64);
-
- let arch = "x86_64";
- let llvm_target = super::apple_base::watchos_sim_llvm_target(arch);
-
+ let arch = Arch::X86_64_sim;
Target {
- llvm_target: llvm_target.into(),
+ llvm_target: watchos_sim_llvm_target(arch).into(),
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.into(),
- arch: "x86_64".into(),
+ arch: arch.target_arch(),
options: TargetOptions {
max_atomic_width: Some(64),
stack_probes: StackProbeType::X86,
@@ -28,7 +24,7 @@ pub fn target() -> Target {
darwinpcs\0\
-Os\0"
.into(),
- ..base
+ ..opts("watchos", arch)
},
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_nto_qnx710.rs b/compiler/rustc_target/src/spec/x86_64_pc_nto_qnx710.rs
new file mode 100644
index 000000000..e9b3acee2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_pc_nto_qnx710.rs
@@ -0,0 +1,21 @@
+use super::nto_qnx_base;
+use crate::spec::{Cc, LinkerFlavor, Lld, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "x86_64-pc-unknown".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: TargetOptions {
+ cpu: "x86-64".into(),
+ max_atomic_width: Some(64),
+ pre_link_args: TargetOptions::link_args(
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ &["-Vgcc_ntox86_64_cxx"],
+ ),
+ ..nto_qnx_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
deleted file mode 100644
index ebd9636ff..000000000
--- a/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-// This defines the amd64 target for the Linux Kernel. See the linux-kernel-base module for
-// generic Linux kernel options.
-
-use crate::spec::{Cc, CodeModel, LinkerFlavor, Lld, Target};
-
-pub fn target() -> Target {
- let mut base = super::linux_kernel_base::opts();
- base.cpu = "x86-64".into();
- base.max_atomic_width = Some(64);
- base.features =
- "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float".into();
- base.code_model = Some(CodeModel::Kernel);
- base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
-
- Target {
- // FIXME: Some dispute, the linux-on-clang folks think this should use
- // "Linux". We disagree because running *on* Linux is nothing like
- // running *as" linux, and historically the "os" component as has always
- // been used to mean the "on" part.
- llvm_target: "x86_64-unknown-none-elf".into(),
- pointer_width: 64,
- data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
- .into(),
- arch: "x86_64".into(),
-
- options: base,
- }
-}