summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm/src/abi.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/abi.rs')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs99
1 files changed, 75 insertions, 24 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index c6a7dc95d..9e834b83d 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,7 +211,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
- } else if let PassMode::Cast(cast, _) = &self.mode {
+ } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
@@ -274,12 +274,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_)
- | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
- | PassMode::Cast(..) => {
+ | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+ | PassMode::Cast { .. } => {
let next_arg = next();
self.store(bx, next_arg, dst);
}
@@ -332,7 +332,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
let llreturn_ty = match &self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
- PassMode::Cast(cast, _) => cast.llvm_type(cx),
+ PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
PassMode::Indirect { .. } => {
llargument_tys.push(cx.type_ptr());
cx.type_void()
@@ -340,29 +340,78 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
};
for arg in args {
+ // Note that the exact number of arguments pushed here is carefully synchronized with
+ // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+ // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
let llarg_ty = match &arg.mode {
PassMode::Ignore => continue,
- PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+ PassMode::Direct(_) => {
+ // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+ // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+ // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
+ // aggregates...
+ if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+ assert!(
+ arg.layout.is_sized(),
+ "`PassMode::Direct` for unsized type: {}",
+ arg.layout.ty
+ );
+ // This really shouldn't happen, since `immediate_llvm_type` will use
+ // `layout.fields` to turn this Rust type into an LLVM type. This means all
+ // sorts of Rust type details leak into the ABI. However wasm sadly *does*
+ // currently use this mode so we have to allow it -- but we absolutely
+ // shouldn't let any more targets do that.
+ // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+ assert!(
+ matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
+ "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+ arg.layout,
+ );
+ }
+ arg.layout.immediate_llvm_type(cx)
+ }
PassMode::Pair(..) => {
+ // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+ // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+ // LLVM IR.
+ assert!(
+ matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
+ "PassMode::Pair for type {}",
+ arg.layout.ty
+ );
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+ // `Indirect` with metadata is only for unsized types, and doesn't work with
+ // on-stack passing.
+ assert!(arg.layout.is_unsized() && !on_stack);
+ // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+ // Any two ABI-compatible unsized types have the same metadata type and
+ // moreover the same metadata value leads to the same dynamic size and
+ // alignment, so this respects ABI compatibility.
let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+ assert!(arg.layout.is_sized());
+ cx.type_ptr()
+ }
+ PassMode::Cast { cast, pad_i32 } => {
+ // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+ assert!(arg.layout.is_sized());
// add padding
if *pad_i32 {
llargument_tys.push(Reg::i32().llvm_type(cx));
}
+ // Compute the LLVM type we use for this function from the cast type.
+ // We assume here that ABI-compatible Rust types have the same cast type.
cast.llvm_type(cx)
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
};
llargument_tys.push(llarg_ty);
}
@@ -405,13 +454,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
- PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast, _) => {
+ PassMode::Cast { cast, pad_i32: _ } => {
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
_ => {}
@@ -419,25 +468,25 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
for arg in self.args.iter() {
match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(attrs);
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
}
PassMode::Direct(attrs)
- | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+ | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply(attrs);
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
assert!(!on_stack);
apply(attrs);
- apply(extra_attrs);
+ apply(meta_attrs);
}
PassMode::Pair(a, b) => {
apply(a);
apply(b);
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Cast { cast, pad_i32 } => {
if *pad_i32 {
apply(&ArgAttributes::new());
}
@@ -467,13 +516,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
}
- PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(bx.cx, attrs);
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast, _) => {
+ PassMode::Cast { cast, pad_i32: _ } => {
cast.attrs.apply_attrs_to_callsite(
llvm::AttributePlace::ReturnValue,
&bx.cx,
@@ -495,7 +544,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
for arg in self.args.iter() {
match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs);
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
attributes::apply_to_callsite(
@@ -505,18 +554,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
);
}
PassMode::Direct(attrs)
- | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+ | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply(bx.cx, attrs);
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
apply(bx.cx, attrs);
- apply(bx.cx, extra_attrs);
+ apply(bx.cx, meta_attrs);
}
PassMode::Pair(a, b) => {
apply(bx.cx, a);
apply(bx.cx, b);
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Cast { cast, pad_i32 } => {
if *pad_i32 {
apply(bx.cx, &ArgAttributes::new());
}
@@ -571,7 +620,9 @@ impl From<Conv> for llvm::CallConv {
Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
llvm::CCallConv
}
- Conv::RustCold => llvm::ColdCallConv,
+ Conv::Cold => llvm::ColdCallConv,
+ Conv::PreserveMost => llvm::PreserveMost,
+ Conv::PreserveAll => llvm::PreserveAll,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::AvrInterrupt => llvm::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,