summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs63
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs27
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs78
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs559
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs84
23 files changed, 541 insertions, 417 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index a6fd2a7de..28be6d033 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -34,13 +34,6 @@ pub trait ArgAttributesExt {
);
}
-fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
- // LLVM prior to version 12 had known miscompiles in the presence of
- // noalias attributes (see #54878), but we don't support earlier
- // versions at all anymore. We now enable mutable noalias by default.
- cx.tcx.sess.opts.unstable_opts.mutable_noalias.unwrap_or(true)
-}
-
const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
[(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
@@ -88,9 +81,6 @@ fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'
attrs.push(llattr.create_attr(cx.llcx));
}
}
- if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
- attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
- }
} else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
// If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
// memory sanitizer's behavior.
@@ -231,7 +221,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
- // adaptation code has cast it into some scalar type. The
+ // adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index fed56cdd4..668d92927 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -88,7 +88,8 @@ pub(crate) unsafe fn codegen(
callee,
args.as_ptr(),
args.len() as c_uint,
- None,
+ [].as_ptr(),
+ 0 as c_uint,
);
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
@@ -132,8 +133,15 @@ pub(crate) unsafe fn codegen(
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
- let ret =
- llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None);
+ let ret = llvm::LLVMRustBuildCall(
+ llbuilder,
+ ty,
+ callee,
+ args.as_ptr(),
+ args.len() as c_uint,
+ [].as_ptr(),
+ 0 as c_uint,
+ );
llvm::LLVMSetTailCall(ret, True);
llvm::LLVMBuildRetVoid(llbuilder);
llvm::LLVMDisposeBuilder(llbuilder);
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 219a4f8fa..52c8b5179 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -144,7 +144,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
// We prefer the latter because it matches the behavior of
// Clang.
if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
- constraints.push(format!("{}", reg_to_llvm(reg, Some(&in_value.layout))));
+ constraints.push(reg_to_llvm(reg, Some(&in_value.layout)).to_string());
} else {
constraints.push(format!("{}", op_idx[&idx]));
}
@@ -445,7 +445,7 @@ pub(crate) fn inline_asm_call<'ll>(
};
// Store mark in a metadata node so we can map LLVM errors
- // back to source locations. See #17552.
+ // back to source locations. See #17552.
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(
bx.llcx,
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index f3bdacf60..95baa95b0 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -102,10 +102,10 @@ pub fn uwtable_attr(llcx: &llvm::Context) -> &Attribute {
pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
let mut fp = cx.sess().target.frame_pointer;
+ let opts = &cx.sess().opts;
// "mcount" function relies on stack pointer.
// See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
- if cx.sess().instrument_mcount() || matches!(cx.sess().opts.cg.force_frame_pointers, Some(true))
- {
+ if opts.unstable_opts.instrument_mcount || matches!(opts.cg.force_frame_pointers, Some(true)) {
fp = FramePointer::Always;
}
let attr_value = match fp {
@@ -119,7 +119,7 @@ pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attr
/// Tell LLVM what instrument function to insert.
#[inline]
fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
- if cx.sess().instrument_mcount() {
+ if cx.sess().opts.unstable_opts.instrument_mcount {
// Similar to `clang -pg` behavior. Handled by the
// `post-inline-ee-instrument` LLVM pass.
@@ -137,6 +137,14 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribu
}
}
+fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ if !cx.sess().opts.unstable_opts.no_jump_tables {
+ return None;
+ }
+
+ Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
+}
+
fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
// Currently stack probes seem somewhat incompatible with the address
// sanitizer and thread sanitizer. With asan we're already protected from
@@ -293,6 +301,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
// FIXME: none of these three functions interact with source level attributes.
to_add.extend(frame_pointer_type_attr(cx));
to_add.extend(instrument_function_attr(cx));
+ to_add.extend(nojumptables_attr(cx));
to_add.extend(probestack_attr(cx));
to_add.extend(stackprotector_attr(cx));
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 2cf2f585f..58ca87524 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -153,7 +153,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
// The binutils linker used on -windows-gnu targets cannot read the import
// libraries generated by LLVM: in our attempts, the linker produced an .EXE
// that loaded but crashed with an AV upon calling one of the imported
- // functions. Therefore, use binutils to create the import library instead,
+ // functions. Therefore, use binutils to create the import library instead,
// by writing a .DEF file to the temp dir and calling binutils's dlltool.
let def_file_path =
tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def");
@@ -227,7 +227,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
// All import names are Rust identifiers and therefore cannot contain \0 characters.
// FIXME: when support for #[link_name] is implemented, ensure that the import names
- // still don't contain any \0 characters. Also need to check that the names don't
+ // still don't contain any \0 characters. Also need to check that the names don't
// contain substrings like " @" or "NONAME" that are keywords or otherwise reserved
// in definition files.
let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> =
@@ -441,7 +441,7 @@ fn find_binutils_dlltool(sess: &Session) -> OsString {
}
// The user didn't specify the location of the dlltool binary, and we weren't able
- // to find the appropriate one on the PATH. Just return the name of the tool
+ // to find the appropriate one on the PATH. Just return the name of the tool
// and let the invocation fail with a hopefully useful error message.
tool_name
}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 3fa21355b..6c0faf37a 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -133,6 +133,10 @@ fn prepare_lto(
}
}
+ // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
+ // __llvm_profile_runtime, therefore we won't know until link time if this symbol
+ // should have default visibility.
+ symbols_below_threshold.push(CString::new("__llvm_profile_counter_bias").unwrap());
Ok((symbols_below_threshold, upstream_modules))
}
@@ -206,7 +210,7 @@ pub(crate) fn run_thin(
}
pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
- let name = module.name.clone();
+ let name = module.name;
let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
(name, buffer)
}
@@ -421,7 +425,7 @@ fn thin_lto(
info!("going for that thin, thin LTO");
let green_modules: FxHashMap<_, _> =
- cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+ cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();
let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
let mut thin_buffers = Vec::with_capacity(modules.len());
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 97d0de47b..b2af9f31e 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -203,7 +203,7 @@ pub fn target_machine_factory(
sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
- let asm_comments = sess.asm_comments();
+ let asm_comments = sess.opts.unstable_opts.asm_comments;
let relax_elf_relocations =
sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
@@ -909,7 +909,7 @@ unsafe fn embed_bitcode(
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
// This is required to satisfy `dllimport` references to static data in .rlibs
-// when using MSVC linker. We do this only for data, as linker can fix up
+// when using MSVC linker. We do this only for data, as linker can fix up
// code references on its own.
// See #26591, #27438
fn create_msvc_imps(
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 77dd15ef4..5e98deae4 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -20,6 +20,7 @@ use rustc_middle::ty::layout::{
};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::Span;
+use rustc_symbol_mangling::typeid::kcfi_typeid_for_fnabi;
use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
use rustc_target::spec::{HasTargetSpec, Target};
use std::borrow::Cow;
@@ -225,9 +226,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
debug!("invoke {:?} with args ({:?})", llfn, args);
let args = self.check_call("invoke", llty, llfn, args);
- let bundle = funclet.map(|funclet| funclet.bundle());
- let bundle = bundle.as_ref().map(|b| &*b.raw);
+ let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+ let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+ let mut bundles = vec![funclet_bundle];
+
+ // Set KCFI operand bundle
+ let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
+ let kcfi_bundle =
+ if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
+ let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
+ Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+ } else {
+ None
+ };
+ if kcfi_bundle.is_some() {
+ let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+ bundles.push(kcfi_bundle);
+ }
+ bundles.retain(|bundle| bundle.is_some());
let invoke = unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
@@ -237,7 +254,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args.len() as c_uint,
then,
catch,
- bundle,
+ bundles.as_ptr(),
+ bundles.len() as c_uint,
UNNAMED,
)
};
@@ -483,7 +501,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
layout: TyAndLayout<'tcx>,
offset: Size,
) {
- if !scalar.is_always_valid(bx) {
+ if !scalar.is_uninit_valid() {
bx.noundef_metadata(load);
}
@@ -961,15 +979,20 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
- fn cleanup_landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value) -> &'ll Value {
+ fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
+ let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */);
unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True);
}
- landing_pad
+ (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
}
- fn resume(&mut self, exn: &'ll Value) {
+ fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
+ let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
+ let mut exn = self.const_undef(ty);
+ exn = self.insert_value(exn, exn0, 0);
+ exn = self.insert_value(exn, exn1, 1);
unsafe {
llvm::LLVMBuildResume(self.llbuilder, exn);
}
@@ -1143,7 +1166,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
- None,
+ [].as_ptr(),
+ 0 as c_uint,
);
}
}
@@ -1159,9 +1183,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
debug!("call {:?} with args ({:?})", llfn, args);
let args = self.check_call("call", llty, llfn, args);
- let bundle = funclet.map(|funclet| funclet.bundle());
- let bundle = bundle.as_ref().map(|b| &*b.raw);
+ let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+ let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+ let mut bundles = vec![funclet_bundle];
+
+ // Set KCFI operand bundle
+ let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
+ let kcfi_bundle =
+ if self.tcx.sess.is_sanitizer_kcfi_enabled() && fn_abi.is_some() && is_indirect_call {
+ let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap());
+ Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+ } else {
+ None
+ };
+ if kcfi_bundle.is_some() {
+ let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+ bundles.push(kcfi_bundle);
+ }
+ bundles.retain(|bundle| bundle.is_some());
let call = unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
@@ -1169,7 +1209,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
- bundle,
+ bundles.as_ptr(),
+ bundles.len() as c_uint,
)
};
if let Some(fn_abi) = fn_abi {
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 70ff5c961..f1d01a460 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -49,8 +49,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
let llptrty = fn_abi.ptr_to_llvm_type(cx);
// This is subtle and surprising, but sometimes we have to bitcast
- // the resulting fn pointer. The reason has to do with external
- // functions. If you have two crates that both bind the same C
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 3626aa901..16467b614 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -140,7 +140,7 @@ pub fn codegen_static_initializer<'ll, 'tcx>(
fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
- // which can force it to be smaller. Rust doesn't support this yet.
+ // which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(min),
@@ -171,7 +171,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
// Declare an internal global `extern_with_linkage_foo` which
- // is initialized with the address of `foo`. If `foo` is
+ // is initialized with the address of `foo`. If `foo` is
// discarded during linking (for example, if `foo` has weak
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 4dcc7cd54..d9ccba07a 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -3,7 +3,6 @@ use crate::back::write::to_llvm_code_model;
use crate::callee::get_fn;
use crate::coverageinfo;
use crate::debuginfo;
-use crate::errors::BranchProtectionRequiresAArch64;
use crate::llvm;
use crate::llvm_util;
use crate::type_::Type;
@@ -250,6 +249,11 @@ pub unsafe fn create_module<'ll>(
);
}
+ if sess.is_sanitizer_kcfi_enabled() {
+ let kcfi = "kcfi\0".as_ptr().cast();
+ llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
+ }
+
// Control Flow Guard is currently only supported by the MSVC linker on Windows.
if sess.target.is_like_msvc {
match sess.opts.cg.control_flow_guard {
@@ -276,34 +280,43 @@ pub unsafe fn create_module<'ll>(
}
if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
- if sess.target.arch != "aarch64" {
- sess.emit_err(BranchProtectionRequiresAArch64);
+ let behavior = if llvm_version >= (15, 0, 0) {
+ llvm::LLVMModFlagBehavior::Min
} else {
+ llvm::LLVMModFlagBehavior::Error
+ };
+
+ if sess.target.arch == "aarch64" {
llvm::LLVMRustAddModuleFlag(
llmod,
- llvm::LLVMModFlagBehavior::Error,
+ behavior,
"branch-target-enforcement\0".as_ptr().cast(),
bti.into(),
);
llvm::LLVMRustAddModuleFlag(
llmod,
- llvm::LLVMModFlagBehavior::Error,
+ behavior,
"sign-return-address\0".as_ptr().cast(),
pac_ret.is_some().into(),
);
let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
llvm::LLVMRustAddModuleFlag(
llmod,
- llvm::LLVMModFlagBehavior::Error,
+ behavior,
"sign-return-address-all\0".as_ptr().cast(),
pac_opts.leaf.into(),
);
llvm::LLVMRustAddModuleFlag(
llmod,
- llvm::LLVMModFlagBehavior::Error,
+ behavior,
"sign-return-address-with-bkey\0".as_ptr().cast(),
u32::from(pac_opts.key == PAuthKey::B),
);
+ } else {
+ bug!(
+ "branch-protection used on non-AArch64 target; \
+ this should be checked in rustc_session."
+ );
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 86580d05d..22c61248b 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -8,7 +8,7 @@ use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def::DefKind;
-use rustc_hir::def_id::DefIdSet;
+use rustc_hir::def_id::DefId;
use rustc_llvm::RustString;
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@@ -30,7 +30,7 @@ use std::ffi::CString;
/// implementing this Rust version, and though the format documentation is very explicit and
/// detailed, some undocumented details in Clang's implementation (that may or may not be important)
/// were also replicated for Rust's Coverage Map.
-pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
let tcx = cx.tcx;
// Ensure the installed version of LLVM supports at least Coverage Map
@@ -284,14 +284,14 @@ fn save_function_record(
/// "code coverage dead code cgu" during the partitioning process. This prevents us from generating
/// code regions for the same function more than once which can lead to linker errors regarding
/// duplicate symbols.
-fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
let tcx = cx.tcx;
let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics();
- let eligible_def_ids: DefIdSet = tcx
+ let eligible_def_ids: Vec<DefId> = tcx
.mir_keys(())
.iter()
.filter_map(|local_def_id| {
@@ -317,7 +317,9 @@ fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
let codegenned_def_ids = tcx.codegened_and_inlined_items(());
- for &non_codegenned_def_id in eligible_def_ids.difference(codegenned_def_ids) {
+ for non_codegenned_def_id in
+ eligible_def_ids.into_iter().filter(|id| !codegenned_def_ids.contains(id))
+ {
let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
// If a function is marked `#[no_coverage]`, then skip generating a
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index d87117dff..b6eb5ee18 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -27,9 +27,7 @@ use rustc_codegen_ssa::traits::*;
use rustc_fs_util::path_to_c_string;
use rustc_hir::def::CtorKind;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::bug;
-use rustc_middle::mir::{self, GeneratorLayout};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::{
@@ -113,7 +111,7 @@ macro_rules! return_if_di_node_created_in_meantime {
/// Extract size and alignment from a TyAndLayout.
#[inline]
-fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) {
+fn size_and_align_of(ty_and_layout: TyAndLayout<'_>) -> (Size, Align) {
(ty_and_layout.size, ty_and_layout.align.abi)
}
@@ -784,10 +782,10 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
codegen_unit_name: &str,
debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
) -> &'ll DIDescriptor {
- let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
- Some(ref path) => path.clone(),
- None => PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()),
- };
+ let mut name_in_debuginfo = tcx
+ .sess
+ .local_crate_source_file()
+ .unwrap_or_else(|| PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()));
// To avoid breaking split DWARF, we need to ensure that each codegen unit
// has a unique `DW_AT_name`. This is because there's a remote chance that
@@ -1026,33 +1024,6 @@ fn build_struct_type_di_node<'ll, 'tcx>(
// Tuples
//=-----------------------------------------------------------------------------
-/// Returns names of captured upvars for closures and generators.
-///
-/// Here are some examples:
-/// - `name__field1__field2` when the upvar is captured by value.
-/// - `_ref__name__field` when the upvar is captured by reference.
-///
-/// For generators this only contains upvars that are shared by all states.
-fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'_>, def_id: DefId) -> SmallVec<String> {
- let body = tcx.optimized_mir(def_id);
-
- body.var_debug_info
- .iter()
- .filter_map(|var| {
- let is_ref = match var.value {
- mir::VarDebugInfoContents::Place(place) if place.local == mir::Local::new(1) => {
- // The projection is either `[.., Field, Deref]` or `[.., Field]`. It
- // implies whether the variable is captured by value or by reference.
- matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref)
- }
- _ => return None,
- };
- let prefix = if is_ref { "_ref__" } else { "" };
- Some(prefix.to_owned() + var.name.as_str())
- })
- .collect()
-}
-
/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or generator.
/// For a generator, this will handle upvars shared by all states.
fn build_upvar_field_di_nodes<'ll, 'tcx>(
@@ -1083,7 +1054,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
.all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
);
- let capture_names = closure_saved_names_of_captured_variables(cx.tcx, def_id);
+ let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
let layout = cx.layout_of(closure_or_generator_ty);
up_var_tys
@@ -1229,43 +1200,6 @@ fn build_union_type_di_node<'ll, 'tcx>(
)
}
-// FIXME(eddyb) maybe precompute this? Right now it's computed once
-// per generator monomorphization, but it doesn't depend on substs.
-fn generator_layout_and_saved_local_names<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
-) -> (&'tcx GeneratorLayout<'tcx>, IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>) {
- let body = tcx.optimized_mir(def_id);
- let generator_layout = body.generator_layout().unwrap();
- let mut generator_saved_local_names = IndexVec::from_elem(None, &generator_layout.field_tys);
-
- let state_arg = mir::Local::new(1);
- for var in &body.var_debug_info {
- let mir::VarDebugInfoContents::Place(place) = &var.value else { continue };
- if place.local != state_arg {
- continue;
- }
- match place.projection[..] {
- [
- // Deref of the `Pin<&mut Self>` state argument.
- mir::ProjectionElem::Field(..),
- mir::ProjectionElem::Deref,
- // Field of a variant of the state.
- mir::ProjectionElem::Downcast(_, variant),
- mir::ProjectionElem::Field(field, _),
- ] => {
- let name = &mut generator_saved_local_names
- [generator_layout.variant_fields[variant][field]];
- if name.is_none() {
- name.replace(var.name);
- }
- }
- _ => {}
- }
- }
- (generator_layout, generator_saved_local_names)
-}
-
/// Computes the type parameters for a type, if any, for the given metadata.
fn build_generic_type_param_di_nodes<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index 53e8a291d..69443b9b8 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -22,9 +22,9 @@ use crate::{
common::CodegenCx,
debuginfo::{
metadata::{
- build_field_di_node, closure_saved_names_of_captured_variables,
+ build_field_di_node,
enums::{tag_base_type, DiscrResult},
- file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+ file_metadata, size_and_align_of, type_di_node,
type_map::{self, Stub, UniqueTypeId},
unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
UNKNOWN_LINE_NUMBER,
@@ -677,9 +677,9 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
};
let (generator_layout, state_specific_upvar_names) =
- generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+ cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
- let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+ let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index becbccc43..93419d27a 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -4,9 +4,8 @@ use crate::{
common::CodegenCx,
debuginfo::{
metadata::{
- closure_saved_names_of_captured_variables,
enums::tag_base_type,
- file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+ file_metadata, size_and_align_of, type_di_node,
type_map::{self, Stub, StubInfo, UniqueTypeId},
unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS,
UNKNOWN_LINE_NUMBER,
@@ -157,7 +156,7 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
),
|cx, generator_type_di_node| {
let (generator_layout, state_specific_upvar_names) =
- generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+ cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
bug!(
@@ -167,7 +166,7 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
};
let common_upvar_names =
- closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+ cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
// Build variant struct types
let variant_struct_type_di_nodes: SmallVec<_> = variants
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index dc21a02ce..6a575095f 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -20,7 +20,7 @@ use crate::type_::Type;
use crate::value::Value;
use rustc_codegen_ssa::traits::TypeMembershipMethods;
use rustc_middle::ty::Ty;
-use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi};
use smallvec::SmallVec;
/// Declare a function.
@@ -136,6 +136,11 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
self.set_type_metadata(llfn, typeid);
}
+ if self.tcx.sess.is_sanitizer_kcfi_enabled() {
+ let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
+ self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+ }
+
llfn
}
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index af9f31fc3..b46209972 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -52,10 +52,6 @@ pub(crate) struct SymbolAlreadyDefined<'a> {
}
#[derive(Diagnostic)]
-#[diag(codegen_llvm_branch_protection_requires_aarch64)]
-pub(crate) struct BranchProtectionRequiresAArch64;
-
-#[derive(Diagnostic)]
#[diag(codegen_llvm_invalid_minimum_alignment)]
pub(crate) struct InvalidMinimumAlignment {
pub err: String,
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 2f5dd519b..a6a75eff9 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -8,8 +8,8 @@ use crate::va_arg::emit_va_arg;
use crate::value::Value;
use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
-use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
@@ -284,15 +284,11 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
_ => bug!(),
},
None => {
- span_invalid_monomorphization_error(
- tcx.sess,
+ tcx.sess.emit_err(InvalidMonomorphization::BasicIntegerType {
span,
- &format!(
- "invalid monomorphization of `{}` intrinsic: \
- expected basic integer type, found `{}`",
- name, ty
- ),
- );
+ name,
+ ty,
+ });
return;
}
}
@@ -424,7 +420,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
typeid: &'ll Value,
) -> Self::Value {
let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
- self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid])
+ let type_checked_load =
+ self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
+ self.extract_value(type_checked_load, 0)
}
fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
@@ -565,7 +563,7 @@ fn codegen_msvc_try<'ll>(
// module.
//
// When modifying, make sure that the type_name string exactly matches
- // the one used in src/libpanic_unwind/seh.rs.
+ // the one used in library/panic_unwind/src/seh.rs.
let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
let type_name = bx.const_bytes(b"rust_panic\0");
let type_info =
@@ -656,7 +654,7 @@ fn codegen_gnu_try<'ll>(
// Type indicator for the exception being thrown.
//
// The first value in this tuple is a pointer to the exception object
- // being thrown. The second value is a "selector" indicating which of
+ // being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
bx.switch_to_block(catch);
@@ -720,7 +718,7 @@ fn codegen_emcc_try<'ll>(
// Type indicator for the exception being thrown.
//
// The first value in this tuple is a pointer to the exception object
- // being thrown. The second value is a "selector" indicating which of
+ // being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
bx.switch_to_block(catch);
let tydesc = bx.eh_catch_typeinfo();
@@ -836,40 +834,24 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
llret_ty: &'ll Type,
span: Span,
) -> Result<&'ll Value, ()> {
- // macros for error handling:
- #[allow(unused_macro_rules)]
- macro_rules! emit_error {
- ($msg: tt) => {
- emit_error!($msg, )
- };
- ($msg: tt, $($fmt: tt)*) => {
- span_invalid_monomorphization_error(
- bx.sess(), span,
- &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
- name, $($fmt)*));
- }
- }
-
macro_rules! return_error {
- ($($fmt: tt)*) => {
- {
- emit_error!($($fmt)*);
- return Err(());
- }
- }
+ ($diag: expr) => {{
+ bx.sess().emit_err($diag);
+ return Err(());
+ }};
}
macro_rules! require {
- ($cond: expr, $($fmt: tt)*) => {
+ ($cond: expr, $diag: expr) => {
if !$cond {
- return_error!($($fmt)*);
+ return_error!($diag);
}
};
}
macro_rules! require_simd {
- ($ty: expr, $position: expr) => {
- require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ ($ty: expr, $diag: expr) => {
+ require!($ty.is_simd(), $diag)
};
}
@@ -879,7 +861,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let arg_tys = sig.inputs();
if name == sym::simd_select_bitmask {
- require_simd!(arg_tys[1], "argument");
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+ );
+
let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let expected_int_bits = (len.max(8) - 1).next_power_of_two();
@@ -900,12 +886,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
bx.load(int_ty, ptr, Align::ONE)
}
- _ => return_error!(
- "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
+ _ => return_error!(InvalidMonomorphization::InvalidBitmask {
+ span,
+ name,
mask_ty,
expected_int_bits,
expected_bytes
- ),
+ }),
};
let i1 = bx.type_i1();
@@ -917,7 +904,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
// every intrinsic below takes a SIMD vector as its first argument
- require_simd!(arg_tys[0], "input");
+ require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] });
let in_ty = arg_tys[0];
let comparison = match name {
@@ -932,23 +919,24 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
if let Some(cmp_op) = comparison {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphization::ReturnLengthInputType {
+ span,
+ name,
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ }
);
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
- "expected return type with integer elements, found `{}` with non-integer `{}`",
- ret_ty,
- out_ty
+ InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
);
return Ok(compare_simd_types(
@@ -973,10 +961,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
span_bug!(span, "could not evaluate shuffle index array length")
})
}
- _ => return_error!(
- "simd_shuffle index must be an array of `u32`, got `{}`",
- args[2].layout.ty
- ),
+ _ => return_error!(InvalidMonomorphization::SimdShuffle {
+ span,
+ name,
+ ty: args[2].layout.ty
+ }),
}
} else {
stripped.parse().unwrap_or_else(|_| {
@@ -984,23 +973,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
})
};
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
out_len == n,
- "expected return type of length {}, found `{}` with length {}",
- n,
- ret_ty,
- out_len
+ InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
);
require!(
in_elem == out_ty,
- "expected return element type `{}` (element of input `{}`), \
- found `{}` with element type `{}`",
- in_elem,
- in_ty,
- ret_ty,
- out_ty
+ InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
);
let total_len = u128::from(in_len) * 2;
@@ -1013,15 +994,20 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let val = bx.const_get_elt(vector, i as u64);
match bx.const_to_opt_u128(val, true) {
None => {
- emit_error!("shuffle index #{} is not a constant", arg_idx);
+ bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexNotConstant {
+ span,
+ name,
+ arg_idx,
+ });
None
}
Some(idx) if idx >= total_len => {
- emit_error!(
- "shuffle index #{} is out of bounds (limit {})",
+ bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+ span,
+ name,
arg_idx,
- total_len
- );
+ total_len,
+ });
None
}
Some(idx) => Some(bx.const_i32(idx as i32)),
@@ -1042,10 +1028,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_insert {
require!(
in_elem == arg_tys[2],
- "expected inserted type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- arg_tys[2]
+ InvalidMonomorphization::InsertedType {
+ span,
+ name,
+ in_elem,
+ in_ty,
+ out_ty: arg_tys[2]
+ }
);
return Ok(bx.insert_element(
args[0].immediate(),
@@ -1056,10 +1045,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_extract {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
}
@@ -1067,17 +1053,18 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
- require_simd!(arg_tys[1], "argument");
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+ );
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
- "mismatched lengths: mask length `{}` != other vector length `{}`",
- m_len,
- v_len
+ InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
);
match m_elem_ty.kind() {
ty::Int(_) => {}
- _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
}
// truncate the mask to a vector of i1s
let i1 = bx.type_i1();
@@ -1109,11 +1096,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
- _ => return_error!(
- "vector argument `{}`'s element type `{}`, expected integer element type",
+ _ => return_error!(InvalidMonomorphization::VectorArgument {
+ span,
+ name,
in_ty,
in_elem
- ),
+ }),
};
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
@@ -1148,12 +1136,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
return Ok(bx.load(array_ty, ptr, Align::ONE));
}
- _ => return_error!(
- "cannot return `{}`, expected `u{}` or `[u8; {}]`",
+ _ => return_error!(InvalidMonomorphization::CannotReturn {
+ span,
+ name,
ret_ty,
expected_int_bits,
expected_bytes
- ),
+ }),
}
}
@@ -1166,25 +1155,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
span: Span,
args: &[OperandRef<'tcx, &'ll Value>],
) -> Result<&'ll Value, ()> {
- #[allow(unused_macro_rules)]
- macro_rules! emit_error {
- ($msg: tt) => {
- emit_error!($msg, )
- };
- ($msg: tt, $($fmt: tt)*) => {
- span_invalid_monomorphization_error(
- bx.sess(), span,
- &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
- name, $($fmt)*));
- }
- }
macro_rules! return_error {
- ($($fmt: tt)*) => {
- {
- emit_error!($($fmt)*);
- return Err(());
- }
- }
+ ($diag: expr) => {{
+ bx.sess().emit_err($diag);
+ return Err(());
+ }};
}
let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
@@ -1192,16 +1167,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
match f.bit_width() {
32 => ("f32", elem_ty),
64 => ("f64", elem_ty),
- _ => {
- return_error!(
- "unsupported element type `{}` of floating-point vector `{}`",
- f.name_str(),
- in_ty
- );
- }
+ _ => return_error!(InvalidMonomorphization::FloatingPointVector {
+ span,
+ name,
+ f_ty: *f,
+ in_ty,
+ }),
}
} else {
- return_error!("`{}` is not a floating-point type", in_ty);
+ return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
};
let vec_ty = bx.type_vector(elem_ty, in_len);
@@ -1223,7 +1197,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
- _ => return_error!("unrecognized intrinsic `{}`", name),
+ _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
@@ -1317,37 +1291,48 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
- require_simd!(ret_ty, "return");
+ require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+ );
+ require_simd!(
+ arg_tys[2],
+ InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+ );
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
// Of the same length:
let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "second",
- in_len,
- in_ty,
- arg_tys[1],
- out_len
+ InvalidMonomorphization::SecondArgumentLength {
+ span,
+ name,
+ in_len,
+ in_ty,
+ arg_ty: arg_tys[1],
+ out_len
+ }
);
require!(
in_len == out_len2,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "third",
- in_len,
- in_ty,
- arg_tys[2],
- out_len2
+ InvalidMonomorphization::ThirdArgumentLength {
+ span,
+ name,
+ in_len,
+ in_ty,
+ arg_ty: arg_tys[2],
+ out_len: out_len2
+ }
);
// The return type must match the first argument type
- require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
+ require!(
+ ret_ty == in_ty,
+ InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
+ );
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
@@ -1374,15 +1359,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
_ => {
require!(
false,
- "expected element type `{}` of second argument `{}` \
- to be a pointer to the element type `{}` of the first \
- argument `{}`, found `{}` != `*_ {}`",
- element_ty1,
- arg_tys[1],
- in_elem,
- in_ty,
- element_ty1,
- in_elem
+ InvalidMonomorphization::ExpectedElementType {
+ span,
+ name,
+ expected_element: element_ty1,
+ second_arg: arg_tys[1],
+ in_elem,
+ in_ty,
+ mutability: ExpectedPointerMutability::Not,
+ }
);
unreachable!();
}
@@ -1398,10 +1383,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
_ => {
require!(
false,
- "expected element type `{}` of third argument `{}` \
- to be a signed integer type",
- element_ty2,
- arg_tys[2]
+ InvalidMonomorphization::ThirdArgElementType {
+ span,
+ name,
+ expected_element: element_ty2,
+ third_arg: arg_tys[2]
+ }
);
}
}
@@ -1450,32 +1437,40 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
+ require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+ require_simd!(
+ arg_tys[1],
+ InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+ );
+ require_simd!(
+ arg_tys[2],
+ InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+ );
// Of the same length:
let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == element_len1,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "second",
- in_len,
- in_ty,
- arg_tys[1],
- element_len1
+ InvalidMonomorphization::SecondArgumentLength {
+ span,
+ name,
+ in_len,
+ in_ty,
+ arg_ty: arg_tys[1],
+ out_len: element_len1
+ }
);
require!(
in_len == element_len2,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "third",
- in_len,
- in_ty,
- arg_tys[2],
- element_len2
+ InvalidMonomorphization::ThirdArgumentLength {
+ span,
+ name,
+ in_len,
+ in_ty,
+ arg_ty: arg_tys[2],
+ out_len: element_len2
+ }
);
// This counts how many pointers
@@ -1506,15 +1501,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
_ => {
require!(
false,
- "expected element type `{}` of second argument `{}` \
- to be a pointer to the element type `{}` of the first \
- argument `{}`, found `{}` != `*mut {}`",
- element_ty1,
- arg_tys[1],
- in_elem,
- in_ty,
- element_ty1,
- in_elem
+ InvalidMonomorphization::ExpectedElementType {
+ span,
+ name,
+ expected_element: element_ty1,
+ second_arg: arg_tys[1],
+ in_elem,
+ in_ty,
+ mutability: ExpectedPointerMutability::Mut,
+ }
);
unreachable!();
}
@@ -1529,10 +1524,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
_ => {
require!(
false,
- "expected element type `{}` of third argument `{}` \
- be a signed integer type",
- element_ty2,
- arg_tys[2]
+ InvalidMonomorphization::ThirdArgElementType {
+ span,
+ name,
+ expected_element: element_ty2,
+ third_arg: arg_tys[2]
+ }
);
}
}
@@ -1579,10 +1576,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::$name {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
@@ -1605,25 +1599,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
32 => bx.const_real(bx.type_f32(), $identity),
64 => bx.const_real(bx.type_f64(), $identity),
v => return_error!(
- r#"
-unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
- sym::$name,
- in_ty,
- in_elem,
- v,
- ret_ty
+ InvalidMonomorphization::UnsupportedSymbolOfSize {
+ span,
+ name,
+ symbol: sym::$name,
+ in_ty,
+ in_elem,
+ size: v,
+ ret_ty
+ }
),
}
};
Ok(bx.$float_reduce(acc, args[0].immediate()))
}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+ span,
+ name,
+ symbol: sym::$name,
in_ty,
in_elem,
ret_ty
- ),
+ }),
};
}
};
@@ -1651,22 +1648,20 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
if name == sym::$name {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+ span,
+ name,
+ symbol: sym::$name,
in_ty,
in_elem,
ret_ty
- ),
+ }),
};
}
};
@@ -1684,22 +1679,20 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
let input = if !$boolean {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
args[0].immediate()
} else {
match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+ span,
+ name,
+ symbol: sym::$name,
in_ty,
in_elem,
ret_ty
- ),
+ }),
}
// boolean reductions operate on vectors of i1s:
@@ -1712,13 +1705,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
let r = bx.$red(input);
Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
+ _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+ span,
+ name,
+ symbol: sym::$name,
in_ty,
in_elem,
ret_ty
- ),
+ }),
};
}
};
@@ -1731,16 +1725,18 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
bitwise_red!(simd_reduce_any: vector_reduce_or, true);
if name == sym::simd_cast_ptr {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphization::ReturnLengthInputType {
+ span,
+ name,
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ }
);
match in_elem.kind() {
@@ -1749,9 +1745,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
});
assert!(!check_sized); // we are in codegen, so we shouldn't see these types
- require!(metadata.is_unit(), "cannot cast fat pointer `{}`", in_elem)
+ require!(
+ metadata.is_unit(),
+ InvalidMonomorphization::CastFatPointer { span, name, ty: in_elem }
+ );
+ }
+ _ => {
+ return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
}
- _ => return_error!("expected pointer, got `{}`", in_elem),
}
match out_elem.kind() {
ty::RawPtr(p) => {
@@ -1759,9 +1760,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
});
assert!(!check_sized); // we are in codegen, so we shouldn't see these types
- require!(metadata.is_unit(), "cannot cast to fat pointer `{}`", out_elem)
+ require!(
+ metadata.is_unit(),
+ InvalidMonomorphization::CastFatPointer { span, name, ty: out_elem }
+ );
+ }
+ _ => {
+ return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
}
- _ => return_error!("expected pointer, got `{}`", out_elem),
}
if in_elem == out_elem {
@@ -1772,66 +1778,76 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
}
if name == sym::simd_expose_addr {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphization::ReturnLengthInputType {
+ span,
+ name,
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ }
);
match in_elem.kind() {
ty::RawPtr(_) => {}
- _ => return_error!("expected pointer, got `{}`", in_elem),
+ _ => {
+ return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
+ }
}
match out_elem.kind() {
ty::Uint(ty::UintTy::Usize) => {}
- _ => return_error!("expected `usize`, got `{}`", out_elem),
+ _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
}
return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
}
if name == sym::simd_from_exposed_addr {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphization::ReturnLengthInputType {
+ span,
+ name,
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ }
);
match in_elem.kind() {
ty::Uint(ty::UintTy::Usize) => {}
- _ => return_error!("expected `usize`, got `{}`", in_elem),
+ _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
}
match out_elem.kind() {
ty::RawPtr(_) => {}
- _ => return_error!("expected pointer, got `{}`", out_elem),
+ _ => {
+ return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
+ }
}
return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
}
if name == sym::simd_cast || name == sym::simd_as {
- require_simd!(ret_ty, "return");
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphization::ReturnLengthInputType {
+ span,
+ name,
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ }
);
// casting cares about nominal type, not just structural type
if in_elem == out_elem {
@@ -1910,11 +1926,14 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
}
require!(
false,
- "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
- in_ty,
- in_elem,
- ret_ty,
- out_elem
+ InvalidMonomorphization::UnsupportedCast {
+ span,
+ name,
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ }
);
}
macro_rules! arith_binary {
@@ -1926,10 +1945,10 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
})*
_ => {},
}
- require!(false,
- "unsupported operation on `{}` with element `{}`",
- in_ty,
- in_elem)
+ require!(
+ false,
+ InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+ );
})*
}
}
@@ -1957,10 +1976,10 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
})*
_ => {},
}
- require!(false,
- "unsupported operation on `{}` with element `{}`",
- in_ty,
- in_elem)
+ require!(
+ false,
+ InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+ );
})*
}
}
@@ -1998,12 +2017,12 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
_ => {
- return_error!(
- "expected element type `{}` of vector type `{}` \
- to be a signed or unsigned integer type",
- arg_tys[0].simd_size_and_type(bx.tcx()).1,
- arg_tys[0]
- );
+ return_error!(InvalidMonomorphization::ExpectedVectorElementType {
+ span,
+ name,
+ expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ vector_type: arg_tys[0]
+ });
}
};
let llvm_intrinsic = &format!(
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 8a9392255..8b4861962 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -79,6 +79,7 @@ pub enum LLVMModFlagBehavior {
Append = 5,
AppendUnique = 6,
Max = 7,
+ Min = 8,
}
// Consts for the LLVM CallConv type, pre-cast to usize.
@@ -427,6 +428,7 @@ pub enum MetadataType {
MD_type = 19,
MD_vcall_visibility = 28,
MD_noundef = 29,
+ MD_kcfi_type = 36,
}
/// LLVMRustAsmDialect
@@ -1063,6 +1065,7 @@ extern "C" {
pub fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
+ pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>;
// Operations on constants of any type
pub fn LLVMConstNull(Ty: &Type) -> &Value;
@@ -1273,7 +1276,8 @@ extern "C" {
NumArgs: c_uint,
Then: &'a BasicBlock,
Catch: &'a BasicBlock,
- Bundle: Option<&OperandBundleDef<'a>>,
+ OpBundles: *const Option<&OperandBundleDef<'a>>,
+ NumOpBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildLandingPad<'a>(
@@ -1643,7 +1647,8 @@ extern "C" {
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
- Bundle: Option<&OperandBundleDef<'a>>,
+ OpBundles: *const Option<&OperandBundleDef<'a>>,
+ NumOpBundles: c_uint,
) -> &'a Value;
pub fn LLVMRustBuildMemCpy<'a>(
B: &Builder<'a>,
@@ -2385,11 +2390,11 @@ extern "C" {
pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
- pub fn LLVMRustBuildOperandBundleDef<'a>(
+ pub fn LLVMRustBuildOperandBundleDef(
Name: *const c_char,
- Inputs: *const &'a Value,
+ Inputs: *const &'_ Value,
NumInputs: c_uint,
- ) -> &'a mut OperandBundleDef<'a>;
+ ) -> &mut OperandBundleDef<'_>;
pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 2fa602520..79b243f73 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -81,10 +81,10 @@ unsafe fn configure_llvm(sess: &Session) {
};
// Set the llvm "program name" to make usage and invalid argument messages more clear.
add("rustc -Cllvm-args=\"...\" with", true);
- if sess.time_llvm_passes() {
+ if sess.opts.unstable_opts.time_llvm_passes {
add("-time-passes", false);
}
- if sess.print_llvm_passes() {
+ if sess.opts.unstable_opts.print_llvm_passes {
add("-debug-pass=Structure", false);
}
if sess.target.generate_arange_section
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 5772b7e1d..ff111d96f 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -316,4 +316,19 @@ impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
)
}
}
+
+ fn set_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) {
+ let kcfi_type_metadata = self.const_u32(kcfi_typeid);
+ unsafe {
+ llvm::LLVMGlobalSetMetadata(
+ function,
+ llvm::MD_kcfi_type as c_uint,
+ llvm::LLVMMDNodeInContext2(
+ self.llcx,
+ &llvm::LLVMValueAsMetadata(kcfi_type_metadata),
+ 1,
+ ),
+ )
+ }
+ }
}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 182adf817..75cd5df97 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -352,10 +352,10 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
let scalar = [a, b][index];
// Make sure to return the same type `immediate_llvm_type` would when
- // dealing with an immediate pair. This means that `(bool, bool)` is
+ // dealing with an immediate pair. This means that `(bool, bool)` is
// effectively represented as `{i8, i8}` in memory and two `i1`s as an
// immediate, just like `bool` is typically `i8` in memory and only `i1`
- // when immediate. We need to load/store `bool` as `i8` to avoid
+ // when immediate. We need to load/store `bool` as `i8` to avoid
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
if immediate && scalar.is_bool() {
return cx.type_i1();
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index ceb3d5a84..b19398e68 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -175,6 +175,89 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
val
}
+fn emit_s390x_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Implementation of the s390x ELF ABI calling convention for va_args see
+ // https://github.com/IBM/s390x-abi (chapter 1.2.4)
+ let va_list_addr = list.immediate();
+ let va_list_layout = list.deref(bx.cx).layout;
+ let va_list_ty = va_list_layout.llvm_type(bx);
+ let layout = bx.cx.layout_of(target_ty);
+
+ let in_reg = bx.append_sibling_block("va_arg.in_reg");
+ let in_mem = bx.append_sibling_block("va_arg.in_mem");
+ let end = bx.append_sibling_block("va_arg.end");
+
+ // FIXME: vector ABI not yet supported.
+ let target_ty_size = bx.cx.size_of(target_ty).bytes();
+ let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+ let unpadded_size = if indirect { 8 } else { target_ty_size };
+ let padded_size = 8;
+ let padding = padded_size - unpadded_size;
+
+ let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
+ let (max_regs, reg_count_field, reg_save_index, reg_padding) =
+ if gpr_type { (5, 0, 2, padding) } else { (4, 1, 16, 0) };
+
+ // Check whether the value was passed in a register or in memory.
+ let reg_count = bx.struct_gep(
+ va_list_ty,
+ va_list_addr,
+ va_list_layout.llvm_field_index(bx.cx, reg_count_field),
+ );
+ let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
+ let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
+ bx.cond_br(use_regs, in_reg, in_mem);
+
+ // Emit code to load the value if it was passed in a register.
+ bx.switch_to_block(in_reg);
+
+ // Work out the address of the value in the register save area.
+ let reg_ptr =
+ bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
+ let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
+ let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
+ let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
+ let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
+
+ // Update the register count.
+ let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
+ bx.store(new_reg_count_v, reg_count, Align::from_bytes(8).unwrap());
+ bx.br(end);
+
+ // Emit code to load the value if it was passed in memory.
+ bx.switch_to_block(in_mem);
+
+ // Work out the address of the value in the argument overflow area.
+ let arg_ptr =
+ bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
+ let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+ let arg_off = bx.const_u64(padding);
+ let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
+
+ // Update the argument overflow area pointer.
+ let arg_size = bx.cx().const_u64(padded_size);
+ let new_arg_ptr_v = bx.inbounds_gep(bx.type_i8(), arg_ptr_v, &[arg_size]);
+ bx.store(new_arg_ptr_v, arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+ bx.br(end);
+
+ // Return the appropriate result.
+ bx.switch_to_block(end);
+ let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+ let val_type = layout.llvm_type(bx);
+ let val_addr = if indirect {
+ let ptr_type = bx.cx.type_ptr_to(val_type);
+ let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type));
+ bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi)
+ } else {
+ bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type))
+ };
+ bx.load(val_type, val_addr, layout.align.abi)
+}
+
pub(super) fn emit_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
addr: OperandRef<'tcx, &'ll Value>,
@@ -200,6 +283,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
}
"aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
+ "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
// Windows x86_64
"x86_64" if target.is_like_windows => {
let target_ty_size = bx.cx.size_of(target_ty).bytes();