summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm')
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml36
-rw-r--r--compiler/rustc_codegen_llvm/README.md7
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs599
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs157
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs1037
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs449
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs383
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs936
-rw-r--r--compiler/rustc_codegen_llvm/src/back/profiling.rs58
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs1212
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs173
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs1508
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs194
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs359
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs577
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs1014
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs334
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs385
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs126
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/doc.md131
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs120
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs1618
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs514
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs437
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs441
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs267
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs614
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs48
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs99
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs146
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs1924
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs442
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs105
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs213
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs2547
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs318
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs562
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs150
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs319
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs418
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs214
-rw-r--r--compiler/rustc_codegen_llvm/src/value.rs32
42 files changed, 21223 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
new file mode 100644
index 000000000..f9a5463ef
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -0,0 +1,36 @@
+[package]
+name = "rustc_codegen_llvm"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+test = false
+doctest = false
+
+[dependencies]
+bitflags = "1.0"
+cstr = "0.2"
+libc = "0.2"
+libloading = "0.7.1"
+measureme = "10.0.0"
+tracing = "0.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc-demangle = "0.1.21"
+rustc_attr = { path = "../rustc_attr" }
+rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_llvm = { path = "../rustc_llvm" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_session = { path = "../rustc_session" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
+rustc_target = { path = "../rustc_target" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_codegen_llvm/README.md b/compiler/rustc_codegen_llvm/README.md
new file mode 100644
index 000000000..afec60d01
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/README.md
@@ -0,0 +1,7 @@
+The `codegen` crate contains the code to convert from MIR into LLVM IR,
+and then from LLVM IR into machine code. In general it contains code
+that runs towards the end of the compilation process.
+
+For more information about how codegen works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/backend/codegen.html
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
new file mode 100644
index 000000000..9eb3574e7
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -0,0 +1,599 @@
+use crate::attributes;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm::{self, Attribute, AttributePlace};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::LayoutOf;
+pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+use rustc_middle::ty::Ty;
+use rustc_session::config;
+use rustc_target::abi::call::ArgAbi;
+pub use rustc_target::abi::call::*;
+use rustc_target::abi::{self, HasDataLayout, Int};
+pub use rustc_target::spec::abi::Abi;
+
+use libc::c_uint;
+use smallvec::SmallVec;
+
+pub trait ArgAttributesExt {
+ fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
+ fn apply_attrs_to_callsite(
+ &self,
+ idx: AttributePlace,
+ cx: &CodegenCx<'_, '_>,
+ callsite: &Value,
+ );
+}
+
+fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
+ // LLVM prior to version 12 had known miscompiles in the presence of
+ // noalias attributes (see #54878), but we don't support earlier
+ // versions at all anymore. We now enable mutable noalias by default.
+ cx.tcx.sess.opts.unstable_opts.mutable_noalias.unwrap_or(true)
+}
+
+const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
+ [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
+
+const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
+ (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
+ (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
+ (ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
+ (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
+ (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
+];
+
+fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
+ let mut regular = this.regular;
+
+ let mut attrs = SmallVec::new();
+
+ // ABI-affecting attributes must always be applied
+ for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
+ if regular.contains(attr) {
+ attrs.push(llattr.create_attr(cx.llcx));
+ }
+ }
+ if let Some(align) = this.pointee_align {
+ attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
+ }
+ match this.arg_ext {
+ ArgExtension::None => {}
+ ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
+ ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
+ }
+
+ // Only apply remaining attributes when optimizing
+ if cx.sess().opts.optimize != config::OptLevel::No {
+ let deref = this.pointee_size.bytes();
+ if deref != 0 {
+ if regular.contains(ArgAttribute::NonNull) {
+ attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
+ } else {
+ attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
+ }
+ regular -= ArgAttribute::NonNull;
+ }
+ for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
+ if regular.contains(attr) {
+ attrs.push(llattr.create_attr(cx.llcx));
+ }
+ }
+ if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
+ attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
+ }
+ }
+
+ attrs
+}
+
+impl ArgAttributesExt for ArgAttributes {
+ fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
+ let attrs = get_attrs(self, cx);
+ attributes::apply_to_llfn(llfn, idx, &attrs);
+ }
+
+ fn apply_attrs_to_callsite(
+ &self,
+ idx: AttributePlace,
+ cx: &CodegenCx<'_, '_>,
+ callsite: &Value,
+ ) {
+ let attrs = get_attrs(self, cx);
+ attributes::apply_to_callsite(callsite, idx, &attrs);
+ }
+}
+
+pub trait LlvmType {
+ fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
+}
+
+impl LlvmType for Reg {
+ fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+ match self.kind {
+ RegKind::Integer => cx.type_ix(self.size.bits()),
+ RegKind::Float => match self.size.bits() {
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
+ _ => bug!("unsupported float: {:?}", self),
+ },
+ RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
+ }
+ }
+}
+
+impl LlvmType for CastTarget {
+ fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+ let rest_ll_unit = self.rest.unit.llvm_type(cx);
+ let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ self.rest.total.bytes() / self.rest.unit.size.bytes(),
+ self.rest.total.bytes() % self.rest.unit.size.bytes(),
+ )
+ };
+
+ if self.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if self.rest.total <= self.rest.unit.size {
+ return rest_ll_unit;
+ }
+
+ // Simplify to array when all chunks are the same size and type
+ if rem_bytes == 0 {
+ return cx.type_array(rest_ll_unit, rest_count);
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args: Vec<_> = self
+ .prefix
+ .iter()
+ .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
+ .chain((0..rest_count).map(|_| rest_ll_unit))
+ .collect();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(self.rest.unit.kind, RegKind::Integer);
+ args.push(cx.type_ix(rem_bytes * 8));
+ }
+
+ cx.type_struct(&args, false)
+ }
+}
+
+pub trait ArgAbiExt<'ll, 'tcx> {
+ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn store(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ );
+ fn store_fn_arg(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ );
+}
+
+impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ /// Gets the LLVM type for a place of the original Rust type of
+ /// this argument/return, i.e., the result of `type_of::type_of`.
+ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ self.layout.llvm_type(cx)
+ }
+
+ /// Stores a direct/indirect value described by this ArgAbi into a
+ /// place for the original Rust type of this argument/return.
+ /// Can be used for both storing formal arguments into Rust variables
+ /// or results of call/invoke instructions into their destinations.
+ fn store(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ if self.is_ignore() {
+ return;
+ }
+ if self.is_sized_indirect() {
+ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+ } else if self.is_unsized_indirect() {
+ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+ } else if let PassMode::Cast(cast) = self.mode {
+ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+ let can_store_through_cast_ptr = false;
+ if can_store_through_cast_ptr {
+ let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
+ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+ bx.store(val, cast_dst, self.layout.align.abi);
+ } else {
+ // The actual return type is a struct, but the ABI
+ // adaptation code has cast it into some scalar type. The
+ // code that follows is the only reliable way I have
+ // found to do a transform like i64 -> {i32,i32}.
+ // Basically we dump the data onto the stack then memcpy it.
+ //
+ // Other approaches I tried:
+ // - Casting rust ret pointer to the foreign type and using Store
+ // is (a) unsafe if size of foreign type > size of rust type and
+ // (b) runs afoul of strict aliasing rules, yielding invalid
+ // assembly under -O (specifically, the store gets removed).
+ // - Truncating foreign type to correct integral type and then
+ // bitcasting to the struct type yields invalid cast errors.
+
+ // We instead thus allocate some scratch space...
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+
+ // ... where we first store the value...
+ bx.store(val, llscratch, scratch_align);
+
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(self.layout.size.bytes()),
+ MemFlags::empty(),
+ );
+
+ bx.lifetime_end(llscratch, scratch_size);
+ }
+ } else {
+ OperandValue::Immediate(val).store(bx, dst);
+ }
+ }
+
+ fn store_fn_arg(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ let mut next = || {
+ let val = llvm::get_param(bx.llfn(), *idx as c_uint);
+ *idx += 1;
+ val
+ };
+ match self.mode {
+ PassMode::Ignore => {}
+ PassMode::Pair(..) => {
+ OperandValue::Pair(next(), next()).store(bx, dst);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+ }
+ PassMode::Direct(_)
+ | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
+ | PassMode::Cast(_) => {
+ let next_arg = next();
+ self.store(bx, next_arg, dst);
+ }
+ }
+ }
+}
+
+impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+ fn store_fn_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, Self::Value>,
+ ) {
+ arg_abi.store_fn_arg(self, idx, dst)
+ }
+ fn store_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ arg_abi.store(self, val, dst)
+ }
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ arg_abi.memory_ty(self)
+ }
+}
+
+pub trait FnAbiLlvmExt<'ll, 'tcx> {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn llvm_cconv(&self) -> llvm::CallConv;
+ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
+ fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
+}
+
+impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ // Ignore "extra" args from the call site for C variadic functions.
+ // Only the "fixed" args are part of the LLVM function signature.
+ let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
+
+ let args_capacity: usize = args.iter().map(|arg|
+ if arg.pad.is_some() { 1 } else { 0 } +
+ if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
+ ).sum();
+ let mut llargument_tys = Vec::with_capacity(
+ if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
+ );
+
+ let llreturn_ty = match self.ret.mode {
+ PassMode::Ignore => cx.type_void(),
+ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
+ PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Indirect { .. } => {
+ llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
+ }
+ };
+
+ for arg in args {
+ // add padding
+ if let Some(ty) = arg.pad {
+ llargument_tys.push(ty.llvm_type(cx));
+ }
+
+ let llarg_ty = match arg.mode {
+ PassMode::Ignore => continue,
+ PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+ PassMode::Pair(..) => {
+ llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
+ llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
+ let ptr_layout = cx.layout_of(ptr_ty);
+ llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
+ llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ cx.type_ptr_to(arg.memory_ty(cx))
+ }
+ };
+ llargument_tys.push(llarg_ty);
+ }
+
+ if self.c_variadic {
+ cx.type_variadic_func(&llargument_tys, llreturn_ty)
+ } else {
+ cx.type_func(&llargument_tys, llreturn_ty)
+ }
+ }
+
+ fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ unsafe {
+ llvm::LLVMPointerType(
+ self.llvm_type(cx),
+ cx.data_layout().instruction_address_space.0 as c_uint,
+ )
+ }
+ }
+
+ fn llvm_cconv(&self) -> llvm::CallConv {
+ match self.conv {
+ Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
+ Conv::RustCold => llvm::ColdCallConv,
+ Conv::AmdGpuKernel => llvm::AmdGpuKernel,
+ Conv::AvrInterrupt => llvm::AvrInterrupt,
+ Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
+ Conv::ArmAapcs => llvm::ArmAapcsCallConv,
+ Conv::Msp430Intr => llvm::Msp430Intr,
+ Conv::PtxKernel => llvm::PtxKernel,
+ Conv::X86Fastcall => llvm::X86FastcallCallConv,
+ Conv::X86Intr => llvm::X86_Intr,
+ Conv::X86Stdcall => llvm::X86StdcallCallConv,
+ Conv::X86ThisCall => llvm::X86_ThisCall,
+ Conv::X86VectorCall => llvm::X86_VectorCall,
+ Conv::X86_64SysV => llvm::X86_64_SysV,
+ Conv::X86_64Win64 => llvm::X86_64_Win64,
+ }
+ }
+
+ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
+ let mut func_attrs = SmallVec::<[_; 2]>::new();
+ if self.ret.layout.abi.is_uninhabited() {
+ func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
+ }
+ if !self.can_unwind {
+ func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
+ }
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
+
+ let mut i = 0;
+ let mut apply = |attrs: &ArgAttributes| {
+ attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
+ i += 1;
+ i - 1
+ };
+ match self.ret.mode {
+ PassMode::Direct(ref attrs) => {
+ attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
+ }
+ PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
+ assert!(!on_stack);
+ let i = apply(attrs);
+ let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
+ }
+ PassMode::Cast(cast) => {
+ cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
+ }
+ _ => {}
+ }
+ for arg in &self.args {
+ if arg.pad.is_some() {
+ apply(&ArgAttributes::new());
+ }
+ match arg.mode {
+ PassMode::Ignore => {}
+ PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
+ let i = apply(attrs);
+ let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
+ }
+ PassMode::Direct(ref attrs)
+ | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
+ apply(attrs);
+ }
+ PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
+ assert!(!on_stack);
+ apply(attrs);
+ apply(extra_attrs);
+ }
+ PassMode::Pair(ref a, ref b) => {
+ apply(a);
+ apply(b);
+ }
+ PassMode::Cast(cast) => {
+ apply(&cast.attrs);
+ }
+ }
+ }
+ }
+
+ fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
+ let mut func_attrs = SmallVec::<[_; 2]>::new();
+ if self.ret.layout.abi.is_uninhabited() {
+ func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
+ }
+ if !self.can_unwind {
+ func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
+ }
+ attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
+
+ let mut i = 0;
+ let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
+ attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
+ i += 1;
+ i - 1
+ };
+ match self.ret.mode {
+ PassMode::Direct(ref attrs) => {
+ attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
+ }
+ PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
+ assert!(!on_stack);
+ let i = apply(bx.cx, attrs);
+ let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
+ attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
+ }
+ PassMode::Cast(cast) => {
+ cast.attrs.apply_attrs_to_callsite(
+ llvm::AttributePlace::ReturnValue,
+ &bx.cx,
+ callsite,
+ );
+ }
+ _ => {}
+ }
+ if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+ // If the value is a boolean, the range is 0..2 and that ultimately
+ // become 0..0 when the type becomes i1, which would be rejected
+ // by the LLVM verifier.
+ if let Int(..) = scalar.primitive() {
+ if !scalar.is_bool() && !scalar.is_always_valid(bx) {
+ bx.range_metadata(callsite, scalar.valid_range(bx));
+ }
+ }
+ }
+ for arg in &self.args {
+ if arg.pad.is_some() {
+ apply(bx.cx, &ArgAttributes::new());
+ }
+ match arg.mode {
+ PassMode::Ignore => {}
+ PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
+ let i = apply(bx.cx, attrs);
+ let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
+ attributes::apply_to_callsite(
+ callsite,
+ llvm::AttributePlace::Argument(i),
+ &[byval],
+ );
+ }
+ PassMode::Direct(ref attrs)
+ | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
+ apply(bx.cx, attrs);
+ }
+ PassMode::Indirect {
+ ref attrs,
+ extra_attrs: Some(ref extra_attrs),
+ on_stack: _,
+ } => {
+ apply(bx.cx, attrs);
+ apply(bx.cx, extra_attrs);
+ }
+ PassMode::Pair(ref a, ref b) => {
+ apply(bx.cx, a);
+ apply(bx.cx, b);
+ }
+ PassMode::Cast(cast) => {
+ apply(bx.cx, &cast.attrs);
+ }
+ }
+ }
+
+ let cconv = self.llvm_cconv();
+ if cconv != llvm::CCallConv {
+ llvm::SetInstructionCallConv(callsite, cconv);
+ }
+
+ if self.conv == Conv::CCmseNonSecureCall {
+ // This will probably get ignored on all targets but those supporting the TrustZone-M
+ // extension (thumbv8m targets).
+ let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
+ attributes::apply_to_callsite(
+ callsite,
+ llvm::AttributePlace::Function,
+ &[cmse_nonsecure_call],
+ );
+ }
+
+ // Some intrinsics require that an elementtype attribute (with the pointee type of a
+ // pointer argument) is added to the callsite.
+ let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) };
+ if element_type_index >= 0 {
+ let arg_ty = self.args[element_type_index as usize].layout.ty;
+ let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument").ty;
+ let element_type_attr = unsafe {
+ llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx))
+ };
+ attributes::apply_to_callsite(
+ callsite,
+ llvm::AttributePlace::Argument(element_type_index as u32),
+ &[element_type_attr],
+ );
+ }
+ }
+}
+
+impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+ fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
+ fn_abi.apply_attrs_callsite(self, callsite)
+ }
+
+ fn get_param(&mut self, index: usize) -> Self::Value {
+ llvm::get_param(self.llfn(), index as c_uint)
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
new file mode 100644
index 000000000..72961ae88
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -0,0 +1,157 @@
+use crate::attributes;
+use libc::c_uint;
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{DebugInfo, OomStrategy};
+use rustc_span::symbol::sym;
+
+use crate::debuginfo;
+use crate::llvm::{self, False, True};
+use crate::ModuleLlvm;
+
+pub(crate) unsafe fn codegen(
+ tcx: TyCtxt<'_>,
+ module_llvm: &mut ModuleLlvm,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+) {
+ let llcx = &*module_llvm.llcx;
+ let llmod = module_llvm.llmod();
+ let usize = match tcx.sess.target.pointer_width {
+ 16 => llvm::LLVMInt16TypeInContext(llcx),
+ 32 => llvm::LLVMInt32TypeInContext(llcx),
+ 64 => llvm::LLVMInt64TypeInContext(llcx),
+ tws => bug!("Unsupported target word size for int: {}", tws),
+ };
+ let i8 = llvm::LLVMInt8TypeInContext(llcx);
+ let i8p = llvm::LLVMPointerType(i8, 0);
+ let void = llvm::LLVMVoidTypeInContext(llcx);
+
+ for method in ALLOCATOR_METHODS {
+ let mut args = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ args.push(usize); // size
+ args.push(usize); // align
+ }
+ AllocatorTy::Ptr => args.push(i8p),
+ AllocatorTy::Usize => args.push(usize),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(i8p),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+ let ty = llvm::LLVMFunctionType(
+ output.unwrap_or(void),
+ args.as_ptr(),
+ args.len() as c_uint,
+ False,
+ );
+ let name = format!("__rust_{}", method.name);
+ let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ let uwtable = attributes::uwtable_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+ }
+
+ let callee = kind.fn_name(method.name);
+ let callee =
+ llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret = llvm::LLVMRustBuildCall(
+ llbuilder,
+ ty,
+ callee,
+ args.as_ptr(),
+ args.len() as c_uint,
+ None,
+ );
+ llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ llvm::LLVMBuildRet(llbuilder, ret);
+ } else {
+ llvm::LLVMBuildRetVoid(llbuilder);
+ }
+ llvm::LLVMDisposeBuilder(llbuilder);
+ }
+
+ // rust alloc error handler
+ let args = [usize, usize]; // size, align
+
+ let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
+ let name = "__rust_alloc_error_handler";
+ let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+ // -> ! DIFlagNoReturn
+ let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
+
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ let uwtable = attributes::uwtable_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+ }
+
+ let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
+ let callee = kind.fn_name(sym::oom);
+ let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+ // -> ! DIFlagNoReturn
+ attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret =
+ llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None);
+ llvm::LLVMSetTailCall(ret, True);
+ llvm::LLVMBuildRetVoid(llbuilder);
+ llvm::LLVMDisposeBuilder(llbuilder);
+
+ // __rust_alloc_error_handler_should_panic
+ let name = OomStrategy::SYMBOL;
+ let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+ }
+ let val = tcx.sess.opts.unstable_opts.oom.should_panic();
+ let llval = llvm::LLVMConstInt(i8, val as u64, False);
+ llvm::LLVMSetInitializer(ll_g, llval);
+
+ if tcx.sess.opts.debuginfo != DebugInfo::None {
+ let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod);
+ debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx);
+ dbg_cx.finalize(tcx.sess);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
new file mode 100644
index 000000000..a53946995
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -0,0 +1,1037 @@
+use crate::attributes;
+use crate::builder::Builder;
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug, ty::Instance};
+use rustc_span::{Pos, Span};
+use rustc_target::abi::*;
+use rustc_target::asm::*;
+
+use libc::{c_char, c_uint};
+use smallvec::SmallVec;
+use tracing::debug;
+
+impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+ fn codegen_inline_asm(
+ &mut self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Self>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ instance: Instance<'_>,
+ dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
+ ) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Collect the types of output operands
+ let mut constraints = vec![];
+ let mut clobbers = vec![];
+ let mut output_types = vec![];
+ let mut op_idx = FxHashMap::default();
+ let mut clobbered_x87 = false;
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ let is_target_supported = |reg_class: InlineAsmRegClass| {
+ for &(_, feature) in reg_class.supported_types(asm_arch) {
+ if let Some(feature) = feature {
+ let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ if self.tcx.sess.target_features.contains(&feature)
+ || codegen_fn_attrs.target_features.contains(&feature)
+ {
+ return true;
+ }
+ } else {
+ // Register class is unconditionally supported
+ return true;
+ }
+ }
+ false
+ };
+
+ let mut layout = None;
+ let ty = if let Some(ref place) = place {
+ layout = Some(&place.layout);
+ llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
+ } else if matches!(
+ reg.reg_class(),
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
+ )
+ ) {
+ // Special handling for x87/mmx registers: we always
+ // clobber the whole set if one register is marked as
+ // clobbered. This is due to the way LLVM handles the
+ // FP stack in inline assembly.
+ if !clobbered_x87 {
+ clobbered_x87 = true;
+ clobbers.push("~{st}".to_string());
+ for i in 1..=7 {
+ clobbers.push(format!("~{{st({})}}", i));
+ }
+ }
+ continue;
+ } else if !is_target_supported(reg.reg_class())
+ || reg.reg_class().is_clobber_only(asm_arch)
+ {
+ // We turn discarded outputs into clobber constraints
+ // if the target feature needed by the register class is
+ // disabled. This is necessary otherwise LLVM will try
+ // to actually allocate a register for the dummy output.
+ assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
+ clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
+ continue;
+ } else {
+ // If the output is discarded, we don't really care what
+ // type is used. We're just using this to tell LLVM to
+ // reserve the register.
+ dummy_output_type(self.cx, reg.reg_class())
+ };
+ output_types.push(ty);
+ op_idx.insert(idx, constraints.len());
+ let prefix = if late { "=" } else { "=&" };
+ constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
+ }
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ let layout = if let Some(ref out_place) = out_place {
+ &out_place.layout
+ } else {
+ // LLVM required tied operands to have the same type,
+ // so we just use the type of the input.
+ &in_value.layout
+ };
+ let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
+ output_types.push(ty);
+ op_idx.insert(idx, constraints.len());
+ let prefix = if late { "=" } else { "=&" };
+ constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
+ }
+ _ => {}
+ }
+ }
+
+ // Collect input operands
+ let mut inputs = vec![];
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::In { reg, value } => {
+ let llval =
+ llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
+ inputs.push(llval);
+ op_idx.insert(idx, constraints.len());
+ constraints.push(reg_to_llvm(reg, Some(&value.layout)));
+ }
+ InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
+ let value = llvm_fixup_input(
+ self,
+ in_value.immediate(),
+ reg.reg_class(),
+ &in_value.layout,
+ );
+ inputs.push(value);
+ constraints.push(format!("{}", op_idx[&idx]));
+ }
+ InlineAsmOperandRef::SymFn { instance } => {
+ inputs.push(self.cx.get_fn(instance));
+ op_idx.insert(idx, constraints.len());
+ constraints.push("s".to_string());
+ }
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ inputs.push(self.cx.get_static(def_id));
+ op_idx.insert(idx, constraints.len());
+ constraints.push("s".to_string());
+ }
+ _ => {}
+ }
+ }
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => {
+ if s.contains('$') {
+ for c in s.chars() {
+ if c == '$' {
+ template_str.push_str("$$");
+ } else {
+ template_str.push(c);
+ }
+ }
+ } else {
+ template_str.push_str(s)
+ }
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ match operands[operand_idx] {
+ InlineAsmOperandRef::In { reg, .. }
+ | InlineAsmOperandRef::Out { reg, .. }
+ | InlineAsmOperandRef::InOut { reg, .. } => {
+ let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
+ if let Some(modifier) = modifier {
+ template_str.push_str(&format!(
+ "${{{}:{}}}",
+ op_idx[&operand_idx], modifier
+ ));
+ } else {
+ template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
+ }
+ }
+ InlineAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the template
+ template_str.push_str(string);
+ }
+ InlineAsmOperandRef::SymFn { .. }
+ | InlineAsmOperandRef::SymStatic { .. } => {
+ // Only emit the raw symbol name
+ template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
+ }
+ }
+ }
+ }
+ }
+
+ constraints.append(&mut clobbers);
+ if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+ match asm_arch {
+ InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
+ constraints.push("~{cc}".to_string());
+ }
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ constraints.extend_from_slice(&[
+ "~{dirflag}".to_string(),
+ "~{fpsr}".to_string(),
+ "~{flags}".to_string(),
+ ]);
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ constraints.extend_from_slice(&[
+ "~{vtype}".to_string(),
+ "~{vl}".to_string(),
+ "~{vxsat}".to_string(),
+ "~{vxrm}".to_string(),
+ ]);
+ }
+ InlineAsmArch::Avr => {
+ constraints.push("~{sreg}".to_string());
+ }
+ InlineAsmArch::Nvptx64 => {}
+ InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
+ InlineAsmArch::Hexagon => {}
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+ InlineAsmArch::S390x => {}
+ InlineAsmArch::SpirV => {}
+ InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
+ InlineAsmArch::Bpf => {}
+ InlineAsmArch::Msp430 => {
+ constraints.push("~{sr}".to_string());
+ }
+ }
+ }
+ if !options.contains(InlineAsmOptions::NOMEM) {
+ // This is actually ignored by LLVM, but it's probably best to keep
+ // it just in case. LLVM instead uses the ReadOnly/ReadNone
+ // attributes on the call instruction to optimize.
+ constraints.push("~{memory}".to_string());
+ }
+ let volatile = !options.contains(InlineAsmOptions::PURE);
+ let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
+ let output_type = match &output_types[..] {
+ [] => self.type_void(),
+ [ty] => ty,
+ tys => self.type_struct(tys, false),
+ };
+ let dialect = match asm_arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64
+ if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
+ {
+ llvm::AsmDialect::Intel
+ }
+ _ => llvm::AsmDialect::Att,
+ };
+ let result = inline_asm_call(
+ self,
+ &template_str,
+ &constraints.join(","),
+ &inputs,
+ output_type,
+ volatile,
+ alignstack,
+ dialect,
+ line_spans,
+ options.contains(InlineAsmOptions::MAY_UNWIND),
+ dest_catch_funclet,
+ )
+ .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
+
+ let mut attrs = SmallVec::<[_; 2]>::new();
+ if options.contains(InlineAsmOptions::PURE) {
+ if options.contains(InlineAsmOptions::NOMEM) {
+ attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx));
+ } else if options.contains(InlineAsmOptions::READONLY) {
+ attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx));
+ }
+ attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
+ } else if options.contains(InlineAsmOptions::NOMEM) {
+ attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx));
+ } else {
+ // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
+ }
+ attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
+
+ // Switch to the 'normal' basic block if we did an `invoke` instead of a `call`
+ if let Some((dest, _, _)) = dest_catch_funclet {
+ self.switch_to_block(dest);
+ }
+
+ // Write results to outputs
+ for (idx, op) in operands.iter().enumerate() {
+ if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
+ | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
+ {
+ let value = if output_types.len() == 1 {
+ result
+ } else {
+ self.extract_value(result, op_idx[&idx] as u64)
+ };
+ let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
+ OperandValue::Immediate(value).store(self, place);
+ }
+ }
+ }
+}
+
+impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> {
+ fn codegen_global_asm(
+ &self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[GlobalAsmOperandRef<'tcx>],
+ options: InlineAsmOptions,
+ _line_spans: &[Span],
+ ) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Default to Intel syntax on x86
+ let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+ && !options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // Build the template string
+ let mut template_str = String::new();
+ if intel_syntax {
+ template_str.push_str(".intel_syntax\n");
+ }
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+ match operands[operand_idx] {
+ GlobalAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the
+ // template. Note that we don't need to escape $
+ // here unlike normal inline assembly.
+ template_str.push_str(string);
+ }
+ GlobalAsmOperandRef::SymFn { instance } => {
+ let llval = self.get_fn(instance);
+ self.add_compiler_used_global(llval);
+ let symbol = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustGetMangledName(llval, s);
+ })
+ .expect("symbol is not valid UTF-8");
+ template_str.push_str(&symbol);
+ }
+ GlobalAsmOperandRef::SymStatic { def_id } => {
+ let llval = self
+ .renamed_statics
+ .borrow()
+ .get(&def_id)
+ .copied()
+ .unwrap_or_else(|| self.get_static(def_id));
+ self.add_compiler_used_global(llval);
+ let symbol = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustGetMangledName(llval, s);
+ })
+ .expect("symbol is not valid UTF-8");
+ template_str.push_str(&symbol);
+ }
+ }
+ }
+ }
+ }
+ if intel_syntax {
+ template_str.push_str("\n.att_syntax\n");
+ }
+
+ unsafe {
+ llvm::LLVMRustAppendModuleInlineAsm(
+ self.llmod,
+ template_str.as_ptr().cast(),
+ template_str.len(),
+ );
+ }
+ }
+}
+
+pub(crate) fn inline_asm_call<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ asm: &str,
+ cons: &str,
+ inputs: &[&'ll Value],
+ output: &'ll llvm::Type,
+ volatile: bool,
+ alignstack: bool,
+ dia: llvm::AsmDialect,
+ line_spans: &[Span],
+ unwind: bool,
+ dest_catch_funclet: Option<(
+ &'ll llvm::BasicBlock,
+ &'ll llvm::BasicBlock,
+ Option<&Funclet<'ll>>,
+ )>,
+) -> Option<&'ll Value> {
+ let volatile = if volatile { llvm::True } else { llvm::False };
+ let alignstack = if alignstack { llvm::True } else { llvm::False };
+ let can_throw = if unwind { llvm::True } else { llvm::False };
+
+ let argtys = inputs
+ .iter()
+ .map(|v| {
+ debug!("Asm Input Type: {:?}", *v);
+ bx.cx.val_ty(*v)
+ })
+ .collect::<Vec<_>>();
+
+ debug!("Asm Output Type: {:?}", output);
+ let fty = bx.cx.type_func(&argtys, output);
+ unsafe {
+ // Ask LLVM to verify that the constraints are well-formed.
+ let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
+ debug!("constraint verification result: {:?}", constraints_ok);
+ if constraints_ok {
+ if unwind && llvm_util::get_version() < (13, 0, 0) {
+ bx.cx.sess().span_fatal(
+ line_spans[0],
+ "unwinding from inline assembly is only supported on llvm >= 13.",
+ );
+ }
+
+ let v = llvm::LLVMRustInlineAsm(
+ fty,
+ asm.as_ptr().cast(),
+ asm.len(),
+ cons.as_ptr().cast(),
+ cons.len(),
+ volatile,
+ alignstack,
+ dia,
+ can_throw,
+ );
+
+ let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
+ bx.invoke(fty, v, inputs, dest, catch, funclet)
+ } else {
+ bx.call(fty, v, inputs, None)
+ };
+
+ // Store mark in a metadata node so we can map LLVM errors
+ // back to source locations. See #17552.
+ let key = "srcloc";
+ let kind = llvm::LLVMGetMDKindIDInContext(
+ bx.llcx,
+ key.as_ptr() as *const c_char,
+ key.len() as c_uint,
+ );
+
+ // srcloc contains one integer for each line of assembly code.
+ // Unfortunately this isn't enough to encode a full span so instead
+ // we just encode the start position of each line.
+ // FIXME: Figure out a way to pass the entire line spans.
+ let mut srcloc = vec![];
+ if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
+ // LLVM inserts an extra line to add the ".intel_syntax", so add
+ // a dummy srcloc entry for it.
+ //
+ // Don't do this if we only have 1 line span since that may be
+ // due to the asm template string coming from a macro. LLVM will
+ // default to the first srcloc for lines that don't have an
+ // associated srcloc.
+ srcloc.push(bx.const_i32(0));
+ }
+ srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
+ let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
+ llvm::LLVMSetMetadata(call, kind, md);
+
+ Some(call)
+ } else {
+ // LLVM has detected an issue with our constraints, bail out
+ None
+ }
+ }
+}
+
+/// If the register is an xmm/ymm/zmm register then return its index.
+fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
+ match reg {
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::xmm0 as u32
+ && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
+ }
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::ymm0 as u32
+ && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
+ }
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::zmm0 as u32
+ && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
+ }
+ _ => None,
+ }
+}
+
+/// If the register is an AArch64 vector register then return its index.
+fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
+ match reg {
+ InlineAsmReg::AArch64(reg)
+ if reg as u32 >= AArch64InlineAsmReg::v0 as u32
+ && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
+ {
+ Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
+ }
+ _ => None,
+ }
+}
+
+/// Converts a register class to an LLVM constraint code.
+fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
+ match reg {
+ // For vector registers LLVM wants the register name to match the type size.
+ InlineAsmRegOrRegClass::Reg(reg) => {
+ if let Some(idx) = xmm_reg_index(reg) {
+ let class = if let Some(layout) = layout {
+ match layout.size.bytes() {
+ 64 => 'z',
+ 32 => 'y',
+ _ => 'x',
+ }
+ } else {
+ // We use f32 as the type for discarded outputs
+ 'x'
+ };
+ format!("{{{}mm{}}}", class, idx)
+ } else if let Some(idx) = a64_vreg_index(reg) {
+ let class = if let Some(layout) = layout {
+ match layout.size.bytes() {
+ 16 => 'q',
+ 8 => 'd',
+ 4 => 's',
+ 2 => 'h',
+ 1 => 'd', // We fixup i8 to i8x8
+ _ => unreachable!(),
+ }
+ } else {
+ // We use i64x2 as the type for discarded outputs
+ 'q'
+ };
+ format!("{{{}{}}}", class, idx)
+ } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
+ // LLVM doesn't recognize x30
+ "{lr}".to_string()
+ } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
+ // LLVM doesn't recognize r14
+ "{lr}".to_string()
+ } else {
+ format!("{{{}}}", reg.name())
+ }
+ }
+ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg
+ | X86InlineAsmRegClass::mmx_reg
+ | X86InlineAsmRegClass::kreg0
+ | X86InlineAsmRegClass::tmm_reg,
+ ) => unreachable!("clobber-only"),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+ .to_string(),
+ }
+}
+
+/// Converts a modifier into LLVM's equivalent modifier.
+fn modifier_to_llvm(
+ arch: InlineAsmArch,
+ reg: InlineAsmRegClass,
+ modifier: Option<char>,
+) -> Option<char> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ if modifier == Some('v') { None } else { modifier }
+ }
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ if modifier.is_none() {
+ Some('q')
+ } else {
+ modifier
+ }
+ }
+ InlineAsmRegClass::Hexagon(_) => None,
+ InlineAsmRegClass::Mips(_) => None,
+ InlineAsmRegClass::Nvptx(_) => None,
+ InlineAsmRegClass::PowerPC(_) => None,
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+ None if arch == InlineAsmArch::X86_64 => Some('q'),
+ None => Some('k'),
+ Some('l') => Some('b'),
+ Some('h') => Some('h'),
+ Some('x') => Some('w'),
+ Some('e') => Some('k'),
+ Some('r') => Some('q'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+ InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+ (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+ (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+ (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+ (_, Some('x')) => Some('x'),
+ (_, Some('y')) => Some('t'),
+ (_, Some('z')) => Some('g'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg
+ | X86InlineAsmRegClass::mmx_reg
+ | X86InlineAsmRegClass::kreg0
+ | X86InlineAsmRegClass::tmm_reg,
+ ) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
+ InlineAsmRegClass::Bpf(_) => None,
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
+ | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
+ | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
+ Some('h') => Some('B'),
+ Some('l') => Some('A'),
+ _ => None,
+ },
+ InlineAsmRegClass::Avr(_) => None,
+ InlineAsmRegClass::S390x(_) => None,
+ InlineAsmRegClass::Msp430(_) => None,
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ cx.type_vector(cx.type_i64(), 2)
+ }
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ cx.type_vector(cx.type_i64(), 2)
+ }
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg
+ | X86InlineAsmRegClass::mmx_reg
+ | X86InlineAsmRegClass::kreg0
+ | X86InlineAsmRegClass::tmm_reg,
+ ) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
+/// the equivalent integer type.
+fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
+ match scalar.primitive() {
+ Primitive::Int(Integer::I8, _) => cx.type_i8(),
+ Primitive::Int(Integer::I16, _) => cx.type_i16(),
+ Primitive::Int(Integer::I32, _) => cx.type_i32(),
+ Primitive::Int(Integer::I64, _) => cx.type_i64(),
+ Primitive::F32 => cx.type_f32(),
+ Primitive::F64 => cx.type_f64(),
+ Primitive::Pointer => cx.type_isize(),
+ _ => unreachable!(),
+ }
+}
+
+/// Fix up an input value to work around LLVM bugs.
+fn llvm_fixup_input<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ mut value: &'ll Value,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+ match (reg, layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.primitive() {
+ let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
+ bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, s);
+ let count = 16 / layout.size.bytes();
+ let vec_ty = bx.cx.type_vector(elem_ty, count);
+ if let Primitive::Pointer = s.primitive() {
+ value = bx.ptrtoint(value, bx.cx.type_isize());
+ }
+ bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+ let vec_ty = bx.cx.type_vector(elem_ty, count);
+ let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
+ bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.primitive() == Primitive::F64 =>
+ {
+ bx.bitcast(value, bx.cx.type_i64())
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_f32())
+ } else {
+ value
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_f64())
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ match s.primitive() {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
+ Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+ Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
+ _ => value,
+ }
+ }
+ _ => value,
+ }
+}
+
+/// Fix up an output value to work around LLVM bugs.
+fn llvm_fixup_output<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ mut value: &'ll Value,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+ match (reg, layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.primitive() {
+ bx.extract_element(value, bx.const_i32(0))
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ value = bx.extract_element(value, bx.const_i32(0));
+ if let Primitive::Pointer = s.primitive() {
+ value = bx.inttoptr(value, layout.llvm_type(bx.cx));
+ }
+ value
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+ let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
+ let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
+ bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.primitive() == Primitive::F64 =>
+ {
+ bx.bitcast(value, bx.cx.type_f64())
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_i32())
+ } else {
+ value
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_i64())
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ match s.primitive() {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
+ Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
+ Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+ Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
+ _ => value,
+ }
+ }
+ _ => value,
+ }
+}
+
+/// Output type to use for llvm_fixup_output.
+fn llvm_fixup_output_type<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Type {
+ match (reg, layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.primitive() {
+ cx.type_vector(cx.type_i8(), 8)
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ let elem_ty = llvm_asm_scalar_type(cx, s);
+ let count = 16 / layout.size.bytes();
+ cx.type_vector(elem_ty, count)
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(cx, element);
+ cx.type_vector(elem_ty, count * 2)
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.primitive() == Primitive::F64 =>
+ {
+ cx.type_i64()
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.primitive() {
+ cx.type_f32()
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.primitive() {
+ cx.type_f64()
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ match s.primitive() {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
+ Primitive::F32 => cx.type_i32(),
+ Primitive::F64 => cx.type_i64(),
+ _ => layout.llvm_type(cx),
+ }
+ }
+ _ => layout.llvm_type(cx),
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
new file mode 100644
index 000000000..aabbe8ac2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -0,0 +1,449 @@
+//! Set and unset common attributes on LLVM values.
+
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::small_str::SmallStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::config::OptLevel;
+use rustc_span::symbol::sym;
+use rustc_target::spec::abi::Abi;
+use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
+use smallvec::SmallVec;
+
+use crate::attributes;
+use crate::llvm::AttributePlace::Function;
+use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace};
+use crate::llvm_util;
+pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
+
+use crate::context::CodegenCx;
+use crate::value::Value;
+
+pub fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
+ if !attrs.is_empty() {
+ llvm::AddFunctionAttributes(llfn, idx, attrs);
+ }
+}
+
+pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
+ if !attrs.is_empty() {
+ llvm::AddCallSiteAttributes(callsite, idx, attrs);
+ }
+}
+
+/// Get LLVM attribute for the provided inline heuristic.
+#[inline]
+fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
+ match inline {
+ InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
+ InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)),
+ InlineAttr::Never => {
+ if cx.sess().target.arch != "amdgpu" {
+ Some(AttributeKind::NoInline.create_attr(cx.llcx))
+ } else {
+ None
+ }
+ }
+ InlineAttr::None => None,
+ }
+}
+
+/// Get LLVM sanitize attributes.
+#[inline]
+pub fn sanitize_attrs<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ no_sanitize: SanitizerSet,
+) -> SmallVec<[&'ll Attribute; 4]> {
+ let mut attrs = SmallVec::new();
+ let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
+ if enabled.contains(SanitizerSet::ADDRESS) {
+ attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::MEMORY) {
+ attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::THREAD) {
+ attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::HWADDRESS) {
+ attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
+ attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::MEMTAG) {
+ // Check to make sure the mte target feature is actually enabled.
+ let features = cx.tcx.global_backend_features(());
+ let mte_feature =
+ features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
+ if let None | Some("-mte") = mte_feature {
+ cx.tcx.sess.err("`-Zsanitizer=memtag` requires `-Ctarget-feature=+mte`");
+ }
+
+ attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
+ }
+ attrs
+}
+
+/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
+#[inline]
+pub fn uwtable_attr(llcx: &llvm::Context) -> &Attribute {
+ // NOTE: We should determine if we even need async unwind tables, as they
+ // take have more overhead and if we can use sync unwind tables we
+ // probably should.
+ llvm::CreateUWTableAttr(llcx, true)
+}
+
+pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ let mut fp = cx.sess().target.frame_pointer;
+ // "mcount" function relies on stack pointer.
+ // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
+ if cx.sess().instrument_mcount() || matches!(cx.sess().opts.cg.force_frame_pointers, Some(true))
+ {
+ fp = FramePointer::Always;
+ }
+ let attr_value = match fp {
+ FramePointer::Always => "all",
+ FramePointer::NonLeaf => "non-leaf",
+ FramePointer::MayOmit => return None,
+ };
+ Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
+}
+
+/// Tell LLVM what instrument function to insert.
+#[inline]
+fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ if cx.sess().instrument_mcount() {
+ // Similar to `clang -pg` behavior. Handled by the
+ // `post-inline-ee-instrument` LLVM pass.
+
+ // The function name varies on platforms.
+ // See test/CodeGen/mcount.c in clang.
+ let mcount_name = cx.sess().target.mcount.as_ref();
+
+ Some(llvm::CreateAttrStringValue(
+ cx.llcx,
+ "instrument-function-entry-inlined",
+ &mcount_name,
+ ))
+ } else {
+ None
+ }
+}
+
+fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ // Currently stack probes seem somewhat incompatible with the address
+ // sanitizer and thread sanitizer. With asan we're already protected from
+ // stack overflow anyway so we don't really need stack probes regardless.
+ if cx
+ .sess()
+ .opts
+ .unstable_opts
+ .sanitizer
+ .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
+ {
+ return None;
+ }
+
+ // probestack doesn't play nice either with `-C profile-generate`.
+ if cx.sess().opts.cg.profile_generate.enabled() {
+ return None;
+ }
+
+ // probestack doesn't play nice either with gcov profiling.
+ if cx.sess().opts.unstable_opts.profile {
+ return None;
+ }
+
+ let attr_value = match cx.sess().target.stack_probes {
+ StackProbeType::None => return None,
+ // Request LLVM to generate the probes inline. If the given LLVM version does not support
+ // this, no probe is generated at all (even if the attribute is specified).
+ StackProbeType::Inline => "inline-asm",
+ // Flag our internal `__rust_probestack` function as the stack probe symbol.
+ // This is defined in the `compiler-builtins` crate for each architecture.
+ StackProbeType::Call => "__rust_probestack",
+ // Pick from the two above based on the LLVM version.
+ StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
+ if llvm_util::get_version() < min_llvm_version_for_inline {
+ "__rust_probestack"
+ } else {
+ "inline-asm"
+ }
+ }
+ };
+ Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
+}
+
+fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ let sspattr = match cx.sess().stack_protector() {
+ StackProtector::None => return None,
+ StackProtector::All => AttributeKind::StackProtectReq,
+ StackProtector::Strong => AttributeKind::StackProtectStrong,
+ StackProtector::Basic => AttributeKind::StackProtect,
+ };
+
+ Some(sspattr.create_attr(cx.llcx))
+}
+
+pub fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
+ let target_cpu = llvm_util::target_cpu(cx.tcx.sess);
+ llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
+}
+
+pub fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ llvm_util::tune_cpu(cx.tcx.sess)
+ .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
+}
+
+/// Get the `NonLazyBind` LLVM attribute,
+/// if the codegen options allow skipping the PLT.
+pub fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ // Don't generate calls through PLT if it's not necessary
+ if !cx.sess().needs_plt() {
+ Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
+ } else {
+ None
+ }
+}
+
+/// Get the default optimizations attrs for a function.
+#[inline]
+pub(crate) fn default_optimisation_attrs<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+) -> SmallVec<[&'ll Attribute; 2]> {
+ let mut attrs = SmallVec::new();
+ match cx.sess().opts.optimize {
+ OptLevel::Size => {
+ attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+ }
+ OptLevel::SizeMin => {
+ attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
+ attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+ }
+ _ => {}
+ }
+ attrs
+}
+
+fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
+ llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
+}
+
+/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
+/// attributes.
+pub fn from_fn_attrs<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ llfn: &'ll Value,
+ instance: ty::Instance<'tcx>,
+) {
+ let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
+
+ let mut to_add = SmallVec::<[_; 16]>::new();
+
+ match codegen_fn_attrs.optimize {
+ OptimizeAttr::None => {
+ to_add.extend(default_optimisation_attrs(cx));
+ }
+ OptimizeAttr::Size => {
+ to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
+ to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+ }
+ OptimizeAttr::Speed => {}
+ }
+
+ let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ InlineAttr::Never
+ } else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
+ InlineAttr::Hint
+ } else {
+ codegen_fn_attrs.inline
+ };
+ to_add.extend(inline_attr(cx, inline));
+
+ // The `uwtable` attribute according to LLVM is:
+ //
+ // This attribute indicates that the ABI being targeted requires that an
+ // unwind table entry be produced for this function even if we can show
+ // that no exceptions passes by it. This is normally the case for the
+ // ELF x86-64 abi, but it can be disabled for some compilation units.
+ //
+ // Typically when we're compiling with `-C panic=abort` (which implies this
+ // `no_landing_pads` check) we don't need `uwtable` because we can't
+ // generate any exceptions! On Windows, however, exceptions include other
+ // events such as illegal instructions, segfaults, etc. This means that on
+ // Windows we end up still needing the `uwtable` attribute even if the `-C
+ // panic=abort` flag is passed.
+ //
+ // You can also find more info on why Windows always requires uwtables here:
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
+ if cx.sess().must_emit_unwind_tables() {
+ to_add.push(uwtable_attr(cx.llcx));
+ }
+
+ if cx.sess().opts.unstable_opts.profile_sample_use.is_some() {
+ to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
+ }
+
+ // FIXME: none of these three functions interact with source level attributes.
+ to_add.extend(frame_pointer_type_attr(cx));
+ to_add.extend(instrument_function_attr(cx));
+ to_add.extend(probestack_attr(cx));
+ to_add.extend(stackprotector_attr(cx));
+
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+ to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) {
+ to_add.push(AttributeKind::ReturnsTwice.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
+ to_add.push(AttributeKind::ReadOnly.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
+ to_add.push(AttributeKind::ReadNone.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ to_add.push(AttributeKind::Naked.create_attr(cx.llcx));
+ // HACK(jubilee): "indirect branch tracking" works by attaching prologues to functions.
+ // And it is a module-level attribute, so the alternative is pulling naked functions into new LLVM modules.
+ // Otherwise LLVM's "naked" functions come with endbr prefixes per https://github.com/rust-lang/rust/issues/98768
+ to_add.push(AttributeKind::NoCfCheck.create_attr(cx.llcx));
+ // Need this for AArch64.
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "branch-target-enforcement", "false"));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
+ || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
+ {
+ if llvm_util::get_version() >= (15, 0, 0) {
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ // apply to argument place instead of function
+ let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
+ to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
+ let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+ flags |= AllocKindFlags::Uninitialized;
+ } else {
+ flags |= AllocKindFlags::Zeroed;
+ }
+ to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
+ }
+ // apply to return place instead of function (unlike all other attributes applied in this function)
+ let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
+ if llvm_util::get_version() >= (15, 0, 0) {
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ to_add.push(llvm::CreateAllocKindAttr(
+ cx.llcx,
+ AllocKindFlags::Realloc | AllocKindFlags::Aligned,
+ ));
+ // applies to argument place instead of function place
+ let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+ // apply to argument place instead of function
+ let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
+ to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
+ }
+ let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
+ if llvm_util::get_version() >= (15, 0, 0) {
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
+ // applies to argument place instead of function place
+ let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+ }
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
+ to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
+ }
+ if let Some(align) = codegen_fn_attrs.alignment {
+ llvm::set_alignment(llfn, align as usize);
+ }
+ to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
+
+ // Always annotate functions with the target-cpu they are compiled for.
+ // Without this, ThinLTO won't inline Rust functions into Clang generated
+ // functions (because Clang annotates functions this way too).
+ to_add.push(target_cpu_attr(cx));
+ // tune-cpu is only conveyed through the attribute for our purpose.
+ // The target doesn't care; the subtarget reads our attribute.
+ to_add.extend(tune_cpu_attr(cx));
+
+ let function_features =
+ codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::<Vec<&str>>();
+
+ if let Some(f) = llvm_util::check_tied_features(
+ cx.tcx.sess,
+ &function_features.iter().map(|f| (*f, true)).collect(),
+ ) {
+ let span = cx
+ .tcx
+ .get_attr(instance.def_id(), sym::target_feature)
+ .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
+ let msg = format!(
+ "the target features {} must all be either enabled or disabled together",
+ f.join(", ")
+ );
+ let mut err = cx.tcx.sess.struct_span_err(span, &msg);
+ err.help("add the missing features in a `target_feature` attribute");
+ err.emit();
+ return;
+ }
+
+ let mut function_features = function_features
+ .iter()
+ .flat_map(|feat| {
+ llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{}", f))
+ })
+ .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
+ InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
+ InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
+ }))
+ .collect::<Vec<String>>();
+
+ if cx.tcx.sess.target.is_like_wasm {
+ // If this function is an import from the environment but the wasm
+ // import has a specific module/name, apply them here.
+ if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", &module));
+
+ let name =
+ codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
+ let name = name.as_str();
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
+ }
+
+ // The `"wasm"` abi on wasm targets automatically enables the
+ // `+multivalue` feature because the purpose of the wasm abi is to match
+ // the WebAssembly specification, which has this feature. This won't be
+ // needed when LLVM enables this `multivalue` feature by default.
+ if !cx.tcx.is_closure(instance.def_id()) {
+ let abi = cx.tcx.fn_sig(instance.def_id()).abi();
+ if abi == Abi::Wasm {
+ function_features.push("+multivalue".to_string());
+ }
+ }
+ }
+
+ let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
+ let function_features = function_features.iter().map(|s| s.as_str());
+ let target_features =
+ global_features.chain(function_features).intersperse(",").collect::<SmallStr<1024>>();
+ if !target_features.is_empty() {
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
+ }
+
+ attributes::apply_to_llfn(llfn, Function, &to_add);
+}
+
+fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
+ tcx.wasm_import_module_map(id.krate).get(&id)
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
new file mode 100644
index 000000000..27039cda2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -0,0 +1,383 @@
+//! A helper class for dealing with static archives
+
+use std::env;
+use std::ffi::{CStr, CString, OsString};
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use std::str;
+
+use crate::llvm::archive_ro::{ArchiveRO, Child};
+use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::cstore::{DllCallingConvention, DllImport};
+use rustc_session::Session;
+
+/// Helper for adding many files to an archive.
+#[must_use = "must call build() to finish building the archive"]
+pub struct LlvmArchiveBuilder<'a> {
+ sess: &'a Session,
+ additions: Vec<Addition>,
+}
+
+enum Addition {
+ File { path: PathBuf, name_in_archive: String },
+ Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> },
+}
+
+impl Addition {
+ fn path(&self) -> &Path {
+ match self {
+ Addition::File { path, .. } | Addition::Archive { path, .. } => path,
+ }
+ }
+}
+
+fn is_relevant_child(c: &Child<'_>) -> bool {
+ match c.name() {
+ Some(name) => !name.contains("SYMDEF"),
+ None => false,
+ }
+}
+
+/// Map machine type strings to values of LLVM's MachineTypes enum.
+fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
+ match cpu {
+ "x86_64" => LLVMMachineType::AMD64,
+ "x86" => LLVMMachineType::I386,
+ "aarch64" => LLVMMachineType::ARM64,
+ "arm" => LLVMMachineType::ARM,
+ _ => panic!("unsupported cpu type {}", cpu),
+ }
+}
+
+impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
+ fn add_archive(
+ &mut self,
+ archive: &Path,
+ skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> io::Result<()> {
+ let archive_ro = match ArchiveRO::open(archive) {
+ Ok(ar) => ar,
+ Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+ };
+ if self.additions.iter().any(|ar| ar.path() == archive) {
+ return Ok(());
+ }
+ self.additions.push(Addition::Archive {
+ path: archive.to_path_buf(),
+ archive: archive_ro,
+ skip: Box::new(skip),
+ });
+ Ok(())
+ }
+
+ /// Adds an arbitrary file to this archive
+ fn add_file(&mut self, file: &Path) {
+ let name = file.file_name().unwrap().to_str().unwrap();
+ self.additions
+ .push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() });
+ }
+
+ /// Combine the provided files, rlibs, and native libraries into a single
+ /// `Archive`.
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ match self.build_with_llvm(output) {
+ Ok(any_members) => any_members,
+ Err(e) => self.sess.fatal(&format!("failed to build archive: {}", e)),
+ }
+ }
+}
+
+pub struct LlvmArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ sess: &Session,
+ lib_name: &str,
+ dll_imports: &[DllImport],
+ tmpdir: &Path,
+ ) -> PathBuf {
+ let output_path = {
+ let mut output_path: PathBuf = tmpdir.to_path_buf();
+ output_path.push(format!("{}_imports", lib_name));
+ output_path.with_extension("lib")
+ };
+
+ let target = &sess.target;
+ let mingw_gnu_toolchain = target.vendor == "pc"
+ && target.os == "windows"
+ && target.env == "gnu"
+ && target.abi.is_empty();
+
+ let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports
+ .iter()
+ .map(|import: &DllImport| {
+ if sess.target.arch == "x86" {
+ (
+ LlvmArchiveBuilder::i686_decorated_name(import, mingw_gnu_toolchain),
+ import.ordinal,
+ )
+ } else {
+ (import.name.to_string(), import.ordinal)
+ }
+ })
+ .collect();
+
+ if mingw_gnu_toolchain {
+ // The binutils linker used on -windows-gnu targets cannot read the import
+ // libraries generated by LLVM: in our attempts, the linker produced an .EXE
+ // that loaded but crashed with an AV upon calling one of the imported
+ // functions. Therefore, use binutils to create the import library instead,
+ // by writing a .DEF file to the temp dir and calling binutils's dlltool.
+ let def_file_path = tmpdir.join(format!("{}_imports", lib_name)).with_extension("def");
+
+ let def_file_content = format!(
+ "EXPORTS\n{}",
+ import_name_and_ordinal_vector
+ .into_iter()
+ .map(|(name, ordinal)| {
+ match ordinal {
+ Some(n) => format!("{} @{} NONAME", name, n),
+ None => name,
+ }
+ })
+ .collect::<Vec<String>>()
+ .join("\n")
+ );
+
+ match std::fs::write(&def_file_path, def_file_content) {
+ Ok(_) => {}
+ Err(e) => {
+ sess.fatal(&format!("Error writing .DEF file: {}", e));
+ }
+ };
+
+ let dlltool = find_binutils_dlltool(sess);
+ let result = std::process::Command::new(dlltool)
+ .args([
+ "-d",
+ def_file_path.to_str().unwrap(),
+ "-D",
+ lib_name,
+ "-l",
+ output_path.to_str().unwrap(),
+ ])
+ .output();
+
+ match result {
+ Err(e) => {
+ sess.fatal(&format!("Error calling dlltool: {}", e));
+ }
+ Ok(output) if !output.status.success() => sess.fatal(&format!(
+ "Dlltool could not create import library: {}\n{}",
+ String::from_utf8_lossy(&output.stdout),
+ String::from_utf8_lossy(&output.stderr)
+ )),
+ _ => {}
+ }
+ } else {
+ // we've checked for \0 characters in the library name already
+ let dll_name_z = CString::new(lib_name).unwrap();
+
+ let output_path_z = rustc_fs_util::path_to_c_string(&output_path);
+
+ tracing::trace!("invoking LLVMRustWriteImportLibrary");
+ tracing::trace!(" dll_name {:#?}", dll_name_z);
+ tracing::trace!(" output_path {}", output_path.display());
+ tracing::trace!(
+ " import names: {}",
+ dll_imports
+ .iter()
+ .map(|import| import.name.to_string())
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+
+ // All import names are Rust identifiers and therefore cannot contain \0 characters.
+ // FIXME: when support for #[link_name] is implemented, ensure that the import names
+ // still don't contain any \0 characters. Also need to check that the names don't
+ // contain substrings like " @" or "NONAME" that are keywords or otherwise reserved
+ // in definition files.
+ let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> =
+ import_name_and_ordinal_vector
+ .into_iter()
+ .map(|(name, ordinal)| (CString::new(name).unwrap(), ordinal))
+ .collect();
+
+ let ffi_exports: Vec<LLVMRustCOFFShortExport> = cstring_import_name_and_ordinal_vector
+ .iter()
+ .map(|(name_z, ordinal)| LLVMRustCOFFShortExport::new(name_z.as_ptr(), *ordinal))
+ .collect();
+ let result = unsafe {
+ crate::llvm::LLVMRustWriteImportLibrary(
+ dll_name_z.as_ptr(),
+ output_path_z.as_ptr(),
+ ffi_exports.as_ptr(),
+ ffi_exports.len(),
+ llvm_machine_type(&sess.target.arch) as u16,
+ !sess.target.is_like_msvc,
+ )
+ };
+
+ if result == crate::llvm::LLVMRustResult::Failure {
+ sess.fatal(&format!(
+ "Error creating import library for {}: {}",
+ lib_name,
+ llvm::last_error().unwrap_or("unknown LLVM error".to_string())
+ ));
+ }
+ };
+
+ output_path
+ }
+}
+
+impl<'a> LlvmArchiveBuilder<'a> {
+ fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> {
+ let kind = &*self.sess.target.archive_format;
+ let kind = kind.parse::<ArchiveKind>().map_err(|_| kind).unwrap_or_else(|kind| {
+ self.sess.fatal(&format!("Don't know how to build archive of type: {}", kind))
+ });
+
+ let mut additions = mem::take(&mut self.additions);
+ let mut strings = Vec::new();
+ let mut members = Vec::new();
+
+ let dst = CString::new(output.to_str().unwrap())?;
+
+ unsafe {
+ for addition in &mut additions {
+ match addition {
+ Addition::File { path, name_in_archive } => {
+ let path = CString::new(path.to_str().unwrap())?;
+ let name = CString::new(name_in_archive.clone())?;
+ members.push(llvm::LLVMRustArchiveMemberNew(
+ path.as_ptr(),
+ name.as_ptr(),
+ None,
+ ));
+ strings.push(path);
+ strings.push(name);
+ }
+ Addition::Archive { archive, skip, .. } => {
+ for child in archive.iter() {
+ let child = child.map_err(string_to_io_error)?;
+ if !is_relevant_child(&child) {
+ continue;
+ }
+ let child_name = child.name().unwrap();
+ if skip(child_name) {
+ continue;
+ }
+
+ // It appears that LLVM's archive writer is a little
+ // buggy if the name we pass down isn't just the
+ // filename component, so chop that off here and
+ // pass it in.
+ //
+ // See LLVM bug 25877 for more info.
+ let child_name =
+ Path::new(child_name).file_name().unwrap().to_str().unwrap();
+ let name = CString::new(child_name)?;
+ let m = llvm::LLVMRustArchiveMemberNew(
+ ptr::null(),
+ name.as_ptr(),
+ Some(child.raw),
+ );
+ members.push(m);
+ strings.push(name);
+ }
+ }
+ }
+ }
+
+ let r = llvm::LLVMRustWriteArchive(
+ dst.as_ptr(),
+ members.len() as libc::size_t,
+ members.as_ptr() as *const &_,
+ true,
+ kind,
+ );
+ let ret = if r.into_result().is_err() {
+ let err = llvm::LLVMRustGetLastError();
+ let msg = if err.is_null() {
+ "failed to write archive".into()
+ } else {
+ String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+ };
+ Err(io::Error::new(io::ErrorKind::Other, msg))
+ } else {
+ Ok(!members.is_empty())
+ };
+ for member in members {
+ llvm::LLVMRustArchiveMemberFree(member);
+ }
+ ret
+ }
+ }
+
+ fn i686_decorated_name(import: &DllImport, mingw: bool) -> String {
+ let name = import.name;
+ let prefix = if mingw { "" } else { "_" };
+
+ match import.calling_convention {
+ DllCallingConvention::C => format!("{}{}", prefix, name),
+ DllCallingConvention::Stdcall(arg_list_size) => {
+ format!("{}{}@{}", prefix, name, arg_list_size)
+ }
+ DllCallingConvention::Fastcall(arg_list_size) => format!("@{}@{}", name, arg_list_size),
+ DllCallingConvention::Vectorcall(arg_list_size) => {
+ format!("{}@@{}", name, arg_list_size)
+ }
+ }
+ }
+}
+
+fn string_to_io_error(s: String) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+}
+
+fn find_binutils_dlltool(sess: &Session) -> OsString {
+ assert!(sess.target.options.is_like_windows && !sess.target.options.is_like_msvc);
+ if let Some(dlltool_path) = &sess.opts.unstable_opts.dlltool {
+ return dlltool_path.clone().into_os_string();
+ }
+
+ let mut tool_name: OsString = if sess.host.arch != sess.target.arch {
+ // We are cross-compiling, so we need the tool with the prefix matching our target
+ if sess.target.arch == "x86" {
+ "i686-w64-mingw32-dlltool"
+ } else {
+ "x86_64-w64-mingw32-dlltool"
+ }
+ } else {
+ // We are not cross-compiling, so we just want `dlltool`
+ "dlltool"
+ }
+ .into();
+
+ if sess.host.options.is_like_windows {
+ // If we're compiling on Windows, add the .exe suffix
+ tool_name.push(".exe");
+ }
+
+ // NOTE: it's not clear how useful it is to explicitly search PATH.
+ for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
+ let full_path = dir.join(&tool_name);
+ if full_path.is_file() {
+ return full_path.into_os_string();
+ }
+ }
+
+ // The user didn't specify the location of the dlltool binary, and we weren't able
+ // to find the appropriate one on the PATH. Just return the name of the tool
+ // and let the invocation fail with a hopefully useful error message.
+ tool_name
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
new file mode 100644
index 000000000..3731c6bcf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -0,0 +1,936 @@
+use crate::back::write::{
+ self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
+};
+use crate::llvm::archive_ro::ArchiveRO;
+use crate::llvm::{self, build_string, False, True};
+use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{FatalError, Handler};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, CrateType, Lto};
+use tracing::{debug, info};
+
+use std::ffi::{CStr, CString};
+use std::fs::File;
+use std::io;
+use std::iter;
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::sync::Arc;
+
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+ match crate_type {
+ CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true,
+ CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false,
+ }
+}
+
+fn prepare_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
+ let export_threshold = match cgcx.lto {
+ // We're just doing LTO for our one crate
+ Lto::ThinLocal => SymbolExportLevel::Rust,
+
+ // We're doing LTO for the entire crate graph
+ Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+ Lto::No => panic!("didn't request LTO but we're doing LTO"),
+ };
+
+ let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+ if info.level.is_below_threshold(export_threshold) || info.used {
+ Some(CString::new(name.as_str()).unwrap())
+ } else {
+ None
+ }
+ };
+ let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ let mut symbols_below_threshold = {
+ let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
+ };
+ info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+ // If we're performing LTO for the entire crate graph, then for each of our
+ // upstream dependencies, find the corresponding rlib and load the bitcode
+ // from the archive.
+ //
+ // We save off all the bytecode and LLVM module ids for later processing
+ // with either fat or thin LTO
+ let mut upstream_modules = Vec::new();
+ if cgcx.lto != Lto::ThinLocal {
+ if cgcx.opts.cg.prefer_dynamic {
+ diag_handler
+ .struct_err("cannot prefer dynamic linking when performing LTO")
+ .note(
+ "only 'staticlib', 'bin', and 'cdylib' outputs are \
+ supported with LTO",
+ )
+ .emit();
+ return Err(FatalError);
+ }
+
+ // Make sure we actually can run LTO
+ for crate_type in cgcx.crate_types.iter() {
+ if !crate_type_allows_lto(*crate_type) {
+ let e = diag_handler.fatal(
+ "lto can only be run for executables, cdylibs and \
+ static library outputs",
+ );
+ return Err(e);
+ }
+ }
+
+ for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+ let exported_symbols =
+ cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ {
+ let _timer =
+ cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ symbols_below_threshold
+ .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+ }
+
+ let archive = ArchiveRO::open(path).expect("wanted an rlib");
+ let obj_files = archive
+ .iter()
+ .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c))))
+ .filter(|&(name, _)| looks_like_rust_object_file(name));
+ for (name, child) in obj_files {
+ info!("adding bitcode from {}", name);
+ match get_bitcode_slice_from_object_data(child.data()) {
+ Ok(data) => {
+ let module = SerializedModule::FromRlib(data.to_vec());
+ upstream_modules.push((module, CString::new(name).unwrap()));
+ }
+ Err(msg) => return Err(diag_handler.fatal(&msg)),
+ }
+ }
+ }
+ }
+
+ Ok((symbols_below_threshold, upstream_modules))
+}
+
+fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
+ let mut len = 0;
+ let data =
+ unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+ if !data.is_null() {
+ assert!(len != 0);
+ let bc = unsafe { slice::from_raw_parts(data, len) };
+
+ // `bc` must be a sub-slice of `obj`.
+ assert!(obj.as_ptr() <= bc.as_ptr());
+ assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
+
+ Ok(bc)
+ } else {
+ assert!(len == 0);
+ let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
+ Err(format!("failed to get bitcode from object file for LTO ({})", msg))
+ }
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ fat_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ cached_modules,
+ upstream_modules,
+ &symbols_below_threshold,
+ )
+}
+
+/// Performs thin LTO by performing necessary global analysis and returning two
+/// lists, one of the modules that need optimization and another for modules that
+/// can simply be copied over from the incr. comp. cache.
+pub(crate) fn run_thin(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<(String, ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ if cgcx.opts.cg.linker_plugin_lto.enabled() {
+ unreachable!(
+ "We should never reach this case if the LTO step \
+ is deferred to the linker"
+ );
+ }
+ thin_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ upstream_modules,
+ cached_modules,
+ &symbols_below_threshold,
+ )
+}
+
+pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
+ let name = module.name.clone();
+ let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
+ (name, buffer)
+}
+
+fn fat_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
+ info!("going for a fat lto");
+
+ // Sort out all our lists of incoming modules into two lists.
+ //
+ // * `serialized_modules` (also and argument to this function) contains all
+ // modules that are serialized in-memory.
+ // * `in_memory` contains modules which are already parsed and in-memory,
+ // such as from multi-CGU builds.
+ //
+ // All of `cached_modules` (cached from previous incremental builds) can
+ // immediately go onto the `serialized_modules` modules list and then we can
+ // split the `modules` array into these two lists.
+ let mut in_memory = Vec::new();
+ serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+ info!("pushing cached module {:?}", wp.cgu_name);
+ (buffer, CString::new(wp.cgu_name).unwrap())
+ }));
+ for module in modules {
+ match module {
+ FatLTOInput::InMemory(m) => in_memory.push(m),
+ FatLTOInput::Serialized { name, buffer } => {
+ info!("pushing serialized module {:?}", name);
+ let buffer = SerializedModule::Local(buffer);
+ serialized_modules.push((buffer, CString::new(name).unwrap()));
+ }
+ }
+ }
+
+ // Find the "costliest" module and merge everything into that codegen unit.
+ // All the other modules will be serialized and reparsed into the new
+ // context, so this hopefully avoids serializing and parsing the largest
+ // codegen unit.
+ //
+ // Additionally use a regular module as the base here to ensure that various
+ // file copy operations in the backend work correctly. The only other kind
+ // of module here should be an allocator one, and if your crate is smaller
+ // than the allocator module then the size doesn't really matter anyway.
+ let costliest_module = in_memory
+ .iter()
+ .enumerate()
+ .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+ .map(|(i, module)| {
+ let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+ (cost, i)
+ })
+ .max();
+
+ // If we found a costliest module, we're good to go. Otherwise all our
+ // inputs were serialized which could happen in the case, for example, that
+ // all our inputs were incrementally reread from the cache and we're just
+ // re-executing the LTO passes. If that's the case deserialize the first
+ // module and create a linker with it.
+ let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
+ Some((_cost, i)) => in_memory.remove(i),
+ None => {
+ assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+ let (buffer, name) = serialized_modules.remove(0);
+ info!("no in-memory regular modules to choose from, parsing {:?}", name);
+ ModuleCodegen {
+ module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?,
+ name: name.into_string().unwrap(),
+ kind: ModuleKind::Regular,
+ }
+ }
+ };
+ let mut serialized_bitcode = Vec::new();
+ {
+ let (llcx, llmod) = {
+ let llvm = &module.module_llvm;
+ (&llvm.llcx, llvm.llmod())
+ };
+ info!("using {:?} as a base module", module.name);
+
+ // The linking steps below may produce errors and diagnostics within LLVM
+ // which we'd like to handle and print, so set up our diagnostic handlers
+ // (which get unregistered when they go out of scope below).
+ let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ // For all other modules we codegened we'll need to link them into our own
+ // bitcode. All modules were codegened in their own LLVM context, however,
+ // and we want to move everything to the same LLVM context. Currently the
+ // way we know of to do that is to serialize them to a string and them parse
+ // them later. Not great but hey, that's why it's "fat" LTO, right?
+ for module in in_memory {
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ let llmod_id = CString::new(&module.name[..]).unwrap();
+ serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+ }
+ // Sort the modules to ensure we produce deterministic results.
+ serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+ // For all serialized bitcode files we parse them and link them in as we did
+ // above, this is all mostly handled in C++. Like above, though, we don't
+ // know much about the memory management here so we err on the side of being
+ // save and persist everything with the original module.
+ let mut linker = Linker::new(llmod);
+ for (bc_decoded, name) in serialized_modules {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
+ recorder.record_arg(format!("{:?}", name))
+ });
+ info!("linking {:?}", name);
+ let data = bc_decoded.data();
+ linker.add(data).map_err(|()| {
+ let msg = format!("failed to load bitcode of module {:?}", name);
+ write::llvm_err(diag_handler, &msg)
+ })?;
+ serialized_bitcode.push(bc_decoded);
+ }
+ drop(linker);
+ save_temp_bitcode(cgcx, &module, "lto.input");
+
+ // Internalize everything below threshold to help strip out more modules and such.
+ unsafe {
+ let ptr = symbols_below_threshold.as_ptr();
+ llvm::LLVMRustRunRestrictionPass(
+ llmod,
+ ptr as *const *const libc::c_char,
+ symbols_below_threshold.len() as libc::size_t,
+ );
+ save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+ }
+ }
+
+ Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+}
+
+pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
+
+impl<'a> Linker<'a> {
+ pub(crate) fn new(llmod: &'a llvm::Module) -> Self {
+ unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
+ }
+
+ pub(crate) fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
+ unsafe {
+ if llvm::LLVMRustLinkerAdd(
+ self.0,
+ bytecode.as_ptr() as *const libc::c_char,
+ bytecode.len(),
+ ) {
+ Ok(())
+ } else {
+ Err(())
+ }
+ }
+ }
+}
+
+impl Drop for Linker<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+/// 1. Prepare a "summary" of each LLVM module in question which describes
+/// the values inside, cost of the values, etc.
+/// 2. Merge the summaries of all modules in question into one "index"
+/// 3. Perform some global analysis on this index
+/// 4. For each module, use the index and analysis calculated previously to
+/// perform local transformations on the module, for example inlining
+/// small functions from other modules.
+/// 5. Run thin-specific optimization passes over each module, and then code
+/// generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<(String, ThinBuffer)>,
+ serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
+ unsafe {
+ info!("going for that thin, thin LTO");
+
+ let green_modules: FxHashMap<_, _> =
+ cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+
+ let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+ let mut thin_buffers = Vec::with_capacity(modules.len());
+ let mut module_names = Vec::with_capacity(full_scope_len);
+ let mut thin_modules = Vec::with_capacity(full_scope_len);
+
+ for (i, (name, buffer)) in modules.into_iter().enumerate() {
+ info!("local module: {} - {}", i, name);
+ let cname = CString::new(name.clone()).unwrap();
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: cname.as_ptr(),
+ data: buffer.data().as_ptr(),
+ len: buffer.data().len(),
+ });
+ thin_buffers.push(buffer);
+ module_names.push(cname);
+ }
+
+ // FIXME: All upstream crates are deserialized internally in the
+ // function below to extract their summary and modules. Note that
+ // unlike the loop above we *must* decode and/or read something
+ // here as these are all just serialized files on disk. An
+ // improvement, however, to make here would be to store the
+ // module summary separately from the actual module itself. Right
+ // now this is store in one large bitcode file, and the entire
+ // file is deflate-compressed. We could try to bypass some of the
+ // decompression by storing the index uncompressed and only
+ // lazily decompressing the bytecode if necessary.
+ //
+ // Note that truly taking advantage of this optimization will
+ // likely be further down the road. We'd have to implement
+ // incremental ThinLTO first where we could actually avoid
+ // looking at upstream modules entirely sometimes (the contents,
+ // we must always unconditionally look at the index).
+ let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
+
+ let cached_modules =
+ cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
+
+ for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
+ info!("upstream or cached module {:?}", name);
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: name.as_ptr(),
+ data: module.data().as_ptr(),
+ len: module.data().len(),
+ });
+ serialized.push(module);
+ module_names.push(name);
+ }
+
+ // Sanity check
+ assert_eq!(thin_modules.len(), module_names.len());
+
+ // Delegate to the C++ bindings to create some data here. Once this is a
+ // tried-and-true interface we may wish to try to upstream some of this
+ // to LLVM itself, right now we reimplement a lot of what they do
+ // upstream...
+ let data = llvm::LLVMRustCreateThinLTOData(
+ thin_modules.as_ptr(),
+ thin_modules.len() as u32,
+ symbols_below_threshold.as_ptr(),
+ symbols_below_threshold.len() as u32,
+ )
+ .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?;
+
+ let data = ThinData(data);
+
+ info!("thin LTO data created");
+
+ let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+ cgcx.incr_comp_session_dir
+ {
+ let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+ // If the previous file was deleted, or we get an IO error
+ // reading the file, then we'll just use `None` as the
+ // prev_key_map, which will force the code to be recompiled.
+ let prev =
+ if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+ let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+ (Some(path), prev, curr)
+ } else {
+ // If we don't compile incrementally, we don't need to load the
+ // import data from LLVM.
+ assert!(green_modules.is_empty());
+ let curr = ThinLTOKeysMap::default();
+ (None, None, curr)
+ };
+ info!("thin LTO cache key map loaded");
+ info!("prev_key_map: {:#?}", prev_key_map);
+ info!("curr_key_map: {:#?}", curr_key_map);
+
+ // Throw our data in an `Arc` as we'll be sharing it across threads. We
+ // also put all memory referenced by the C++ data (buffers, ids, etc)
+ // into the arc as well. After this we'll create a thin module
+ // codegen per module in this data.
+ let shared = Arc::new(ThinShared {
+ data,
+ thin_buffers,
+ serialized_modules: serialized,
+ module_names,
+ });
+
+ let mut copy_jobs = vec![];
+ let mut opt_jobs = vec![];
+
+ info!("checking which modules can be-reused and which have to be re-optimized.");
+ for (module_index, module_name) in shared.module_names.iter().enumerate() {
+ let module_name = module_name_to_str(module_name);
+ if let (Some(prev_key_map), true) =
+ (prev_key_map.as_ref(), green_modules.contains_key(module_name))
+ {
+ assert!(cgcx.incr_comp_session_dir.is_some());
+
+ // If a module exists in both the current and the previous session,
+ // and has the same LTO cache key in both sessions, then we can re-use it
+ if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
+ let work_product = green_modules[module_name].clone();
+ copy_jobs.push(work_product);
+ info!(" - {}: re-used", module_name);
+ assert!(cgcx.incr_comp_session_dir.is_some());
+ cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
+ continue;
+ }
+ }
+
+ info!(" - {}: re-compiled", module_name);
+ opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
+ shared: shared.clone(),
+ idx: module_index,
+ }));
+ }
+
+ // Save the current ThinLTO import information for the next compilation
+ // session, overwriting the previous serialized data (if any).
+ if let Some(path) = key_map_path {
+ if let Err(err) = curr_key_map.save_to_file(&path) {
+ let msg = format!("Error while writing ThinLTO key data: {}", err);
+ return Err(write::llvm_err(diag_handler, &msg));
+ }
+ }
+
+ Ok((opt_jobs, copy_jobs))
+ }
+}
+
+pub(crate) fn run_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &mut ModuleCodegen<ModuleLlvm>,
+ thin: bool,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name);
+ let config = cgcx.config(module.kind);
+
+ // Now we have one massive module inside of llmod. Time to run the
+ // LTO-specific optimization passes that LLVM provides.
+ //
+ // This code is based off the code found in llvm's LTO code generator:
+ // llvm/lib/LTO/LTOCodeGenerator.cpp
+ debug!("running the pass manager");
+ unsafe {
+ if !llvm::LLVMRustHasModuleFlag(
+ module.module_llvm.llmod(),
+ "LTOPostLink".as_ptr().cast(),
+ 11,
+ ) {
+ llvm::LLVMRustAddModuleFlag(
+ module.module_llvm.llmod(),
+ llvm::LLVMModFlagBehavior::Error,
+ "LTOPostLink\0".as_ptr().cast(),
+ 1,
+ );
+ }
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
+ let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+ let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+ write::optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ )?;
+ debug!("lto done");
+ return Ok(());
+ }
+
+ let pm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm);
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ let opt_level = config
+ .opt_level
+ .map(|x| to_llvm_opt_settings(x).0)
+ .unwrap_or(llvm::CodeGenOptLevel::None);
+ with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
+ if thin {
+ llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
+ } else {
+ llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager(
+ b, pm, /* Internalize = */ False, /* RunInliner = */ True,
+ );
+ }
+ });
+
+ // We always generate bitcode through ThinLTOBuffers,
+ // which do not support anonymous globals
+ if config.bitcode_needed() {
+ let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ llvm::LLVMRunPassManager(pm, module.module_llvm.llmod());
+
+ llvm::LLVMDisposePassManager(pm);
+ }
+ debug!("lto done");
+ Ok(())
+}
+
+pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
+
+unsafe impl Send for ModuleBuffer {}
+unsafe impl Sync for ModuleBuffer {}
+
+impl ModuleBuffer {
+ pub fn new(m: &llvm::Module) -> ModuleBuffer {
+ ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
+ }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
+ let len = llvm::LLVMRustModuleBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ModuleBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinData(&'static mut llvm::ThinLTOData);
+
+unsafe impl Send for ThinData {}
+unsafe impl Sync for ThinData {}
+
+impl Drop for ThinData {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
+
+unsafe impl Send for ThinBuffer {}
+unsafe impl Sync for ThinBuffer {}
+
+impl ThinBuffer {
+ pub fn new(m: &llvm::Module, is_thin: bool) -> ThinBuffer {
+ unsafe {
+ let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin);
+ ThinBuffer(buffer)
+ }
+ }
+}
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
+ let len = llvm::LLVMRustThinLTOBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ThinBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub unsafe fn optimize_thin_module(
+ thin_module: ThinModule<LlvmCodegenBackend>,
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ let module_name = &thin_module.shared.module_names[thin_module.idx];
+ let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
+ let tm =
+ (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
+
+ // Right now the implementation we've got only works over serialized
+ // modules, so we create a fresh new LLVM context and parse the module
+ // into that context. One day, however, we may do this for upstream
+ // crates but for locally codegened modules we may be able to reuse
+ // that LLVM Context and Module.
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
+ let mut module = ModuleCodegen {
+ module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+ name: thin_module.name().to_string(),
+ kind: ModuleKind::Regular,
+ };
+ {
+ let target = &*module.module_llvm.tm;
+ let llmod = module.module_llvm.llmod();
+ save_temp_bitcode(cgcx, &module, "thin-lto-input");
+
+ // Before we do much else find the "main" `DICompileUnit` that we'll be
+ // using below. If we find more than one though then rustc has changed
+ // in a way we're not ready for, so generate an ICE by returning
+ // an error.
+ let mut cu1 = ptr::null_mut();
+ let mut cu2 = ptr::null_mut();
+ llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
+ if !cu2.is_null() {
+ let msg = "multiple source DICompileUnits found";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+
+ // Up next comes the per-module local analyses that we do for Thin LTO.
+ // Each of these functions is basically copied from the LLVM
+ // implementation and then tailored to suit this implementation. Ideally
+ // each of these would be supported by upstream LLVM but that's perhaps
+ // a patch for another day!
+ //
+ // You can find some more comments about these functions in the LLVM
+ // bindings we've got (currently `PassWrapper.cpp`)
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
+ }
+
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
+ }
+
+ // Ok now this is a bit unfortunate. This is also something you won't
+ // find upstream in LLVM's ThinLTO passes! This is a hack for now to
+ // work around bugs in LLVM.
+ //
+ // First discovered in #45511 it was found that as part of ThinLTO
+ // importing passes LLVM will import `DICompileUnit` metadata
+ // information across modules. This means that we'll be working with one
+ // LLVM module that has multiple `DICompileUnit` instances in it (a
+ // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
+ // bugs in LLVM's backend which generates invalid DWARF in a situation
+ // like this:
+ //
+ // https://bugs.llvm.org/show_bug.cgi?id=35212
+ // https://bugs.llvm.org/show_bug.cgi?id=35562
+ //
+ // While the first bug there is fixed the second ended up causing #46346
+ // which was basically a resurgence of #45511 after LLVM's bug 35212 was
+ // fixed.
+ //
+ // This function below is a huge hack around this problem. The function
+ // below is defined in `PassWrapper.cpp` and will basically "merge"
+ // all `DICompileUnit` instances in a module. Basically it'll take all
+ // the objects, rewrite all pointers of `DISubprogram` to point to the
+ // first `DICompileUnit`, and then delete all the other units.
+ //
+ // This is probably mangling to the debug info slightly (but hopefully
+ // not too much) but for now at least gets LLVM to emit valid DWARF (or
+ // so it appears). Hopefully we can remove this once upstream bugs are
+ // fixed in LLVM.
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name());
+ llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-patch");
+ }
+
+ // Alright now that we've done everything related to the ThinLTO
+ // analysis it's time to run some optimizations! Here we use the same
+ // `run_pass_manager` as the "fat" LTO above except that we tell it to
+ // populate a thin-specific pass manager, which presumably LLVM treats a
+ // little differently.
+ {
+ info!("running thin lto passes over {}", module.name);
+ run_pass_manager(cgcx, &diag_handler, &mut module, true)?;
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
+ }
+ }
+ Ok(module)
+}
+
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
+#[derive(Debug, Default)]
+pub struct ThinLTOKeysMap {
+ // key = llvm name of importing module, value = LLVM cache key
+ keys: FxHashMap<String, String>,
+}
+
+impl ThinLTOKeysMap {
+ fn save_to_file(&self, path: &Path) -> io::Result<()> {
+ use std::io::Write;
+ let file = File::create(path)?;
+ let mut writer = io::BufWriter::new(file);
+ for (module, key) in &self.keys {
+ writeln!(writer, "{} {}", module, key)?;
+ }
+ Ok(())
+ }
+
+ fn load_from_file(path: &Path) -> io::Result<Self> {
+ use std::io::BufRead;
+ let mut keys = FxHashMap::default();
+ let file = File::open(path)?;
+ for line in io::BufReader::new(file).lines() {
+ let line = line?;
+ let mut split = line.split(' ');
+ let module = split.next().unwrap();
+ let key = split.next().unwrap();
+ assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+ keys.insert(module.to_string(), key.to_string());
+ }
+ Ok(Self { keys })
+ }
+
+ fn from_thin_lto_modules(
+ data: &ThinData,
+ modules: &[llvm::ThinLTOModule],
+ names: &[CString],
+ ) -> Self {
+ let keys = iter::zip(modules, names)
+ .map(|(module, name)| {
+ let key = build_string(|rust_str| unsafe {
+ llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+ })
+ .expect("Invalid ThinLTO module key");
+ (name.clone().into_string().unwrap(), key)
+ })
+ .collect();
+ Self { keys }
+ }
+}
+
+fn module_name_to_str(c_str: &CStr) -> &str {
+ c_str.to_str().unwrap_or_else(|e| {
+ bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
+ })
+}
+
+pub fn parse_module<'a>(
+ cx: &'a llvm::Context,
+ name: &CStr,
+ data: &[u8],
+ diag_handler: &Handler,
+) -> Result<&'a llvm::Module, FatalError> {
+ unsafe {
+ llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
+ || {
+ let msg = "failed to parse bitcode for LTO module";
+ write::llvm_err(diag_handler, msg)
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs
new file mode 100644
index 000000000..2741f7d84
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs
@@ -0,0 +1,58 @@
+use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId};
+use rustc_data_structures::profiling::{SelfProfiler, TimingGuard};
+use std::ffi::{c_void, CStr};
+use std::os::raw::c_char;
+use std::sync::Arc;
+
+fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId {
+ let pass_name = profiler.get_or_alloc_cached_string(pass_name);
+ let mut components = vec![StringComponent::Ref(pass_name)];
+ // handle that LazyCallGraph::SCC is a comma separated list within parentheses
+ let parentheses: &[_] = &['(', ')'];
+ let trimmed = ir_name.trim_matches(parentheses);
+ for part in trimmed.split(", ") {
+ let demangled_ir_name = rustc_demangle::demangle(part).to_string();
+ let ir_name = profiler.get_or_alloc_cached_string(demangled_ir_name);
+ components.push(StringComponent::Value(SEPARATOR_BYTE));
+ components.push(StringComponent::Ref(ir_name));
+ }
+ EventId::from_label(profiler.alloc_string(components.as_slice()))
+}
+
+pub struct LlvmSelfProfiler<'a> {
+ profiler: Arc<SelfProfiler>,
+ stack: Vec<TimingGuard<'a>>,
+ llvm_pass_event_kind: StringId,
+}
+
+impl<'a> LlvmSelfProfiler<'a> {
+ pub fn new(profiler: Arc<SelfProfiler>) -> Self {
+ let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass");
+ Self { profiler, stack: Vec::default(), llvm_pass_event_kind }
+ }
+
+ fn before_pass_callback(&'a mut self, pass_name: &str, ir_name: &str) {
+ let event_id = llvm_args_to_string_id(&self.profiler, pass_name, ir_name);
+
+ self.stack.push(TimingGuard::start(&self.profiler, self.llvm_pass_event_kind, event_id));
+ }
+ fn after_pass_callback(&mut self) {
+ self.stack.pop();
+ }
+}
+
+pub unsafe extern "C" fn selfprofile_before_pass_callback(
+ llvm_self_profiler: *mut c_void,
+ pass_name: *const c_char,
+ ir_name: *const c_char,
+) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ let pass_name = CStr::from_ptr(pass_name).to_str().expect("valid UTF-8");
+ let ir_name = CStr::from_ptr(ir_name).to_str().expect("valid UTF-8");
+ llvm_self_profiler.before_pass_callback(pass_name, ir_name);
+}
+
+pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ llvm_self_profiler.after_pass_callback();
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
new file mode 100644
index 000000000..534d32e8a
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -0,0 +1,1212 @@
+use crate::back::lto::ThinBuffer;
+use crate::back::profiling::{
+ selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
+};
+use crate::base;
+use crate::common;
+use crate::consts;
+use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::LlvmCodegenBackend;
+use crate::ModuleLlvm;
+use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::write::{
+ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
+ TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{FatalError, Handler, Level};
+use rustc_fs_util::{link_or_copy, path_to_c_string};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::InnerSpan;
+use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo};
+use tracing::debug;
+
+use libc::{c_char, c_int, c_uint, c_void, size_t};
+use std::ffi::CString;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::slice;
+use std::str;
+use std::sync::Arc;
+
+pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
+ match llvm::last_error() {
+ Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
+ None => handler.fatal(msg),
+ }
+}
+
+pub fn write_output_file<'ll>(
+ handler: &rustc_errors::Handler,
+ target: &'ll llvm::TargetMachine,
+ pm: &llvm::PassManager<'ll>,
+ m: &'ll llvm::Module,
+ output: &Path,
+ dwo_output: Option<&Path>,
+ file_type: llvm::FileType,
+ self_profiler_ref: &SelfProfilerRef,
+) -> Result<(), FatalError> {
+ debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
+ unsafe {
+ let output_c = path_to_c_string(output);
+ let dwo_output_c;
+ let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
+ dwo_output_c = path_to_c_string(dwo_output);
+ dwo_output_c.as_ptr()
+ } else {
+ std::ptr::null()
+ };
+ let result = llvm::LLVMRustWriteOutputFile(
+ target,
+ pm,
+ m,
+ output_c.as_ptr(),
+ dwo_output_ptr,
+ file_type,
+ );
+
+ // Record artifact sizes for self-profiling
+ if result == llvm::LLVMRustResult::Success {
+ let artifact_kind = match file_type {
+ llvm::FileType::ObjectFile => "object_file",
+ llvm::FileType::AssemblyFile => "assembly_file",
+ };
+ record_artifact_size(self_profiler_ref, artifact_kind, output);
+ if let Some(dwo_file) = dwo_output {
+ record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
+ }
+ }
+
+ result.into_result().map_err(|()| {
+ let msg = format!("could not write output to {}", output.display());
+ llvm_err(handler, &msg)
+ })
+ }
+}
+
+pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
+ let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+ // Can't use query system here quite yet because this function is invoked before the query
+ // system/tcx is set up.
+ let features = llvm_util::global_llvm_features(sess, false);
+ target_machine_factory(sess, config::OptLevel::No, &features)(config)
+ .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
+}
+
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+ let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
+ tcx.output_filenames(()).split_dwarf_path(
+ tcx.sess.split_debuginfo(),
+ tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ Some(mod_name),
+ )
+ } else {
+ None
+ };
+ let config = TargetMachineFactoryConfig { split_dwarf_file };
+ target_machine_factory(
+ &tcx.sess,
+ tcx.backend_optimization_level(()),
+ tcx.global_backend_features(()),
+ )(config)
+ .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
+}
+
+pub fn to_llvm_opt_settings(
+ cfg: config::OptLevel,
+) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
+ use self::config::OptLevel::*;
+ match cfg {
+ No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
+ Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
+ Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
+ Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
+ Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
+ SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
+ }
+}
+
+fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
+ use config::OptLevel::*;
+ match cfg {
+ No => llvm::PassBuilderOptLevel::O0,
+ Less => llvm::PassBuilderOptLevel::O1,
+ Default => llvm::PassBuilderOptLevel::O2,
+ Aggressive => llvm::PassBuilderOptLevel::O3,
+ Size => llvm::PassBuilderOptLevel::Os,
+ SizeMin => llvm::PassBuilderOptLevel::Oz,
+ }
+}
+
+fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
+ match relocation_model {
+ RelocModel::Static => llvm::RelocModel::Static,
+ // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
+ RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
+ RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
+ RelocModel::Ropi => llvm::RelocModel::ROPI,
+ RelocModel::Rwpi => llvm::RelocModel::RWPI,
+ RelocModel::RopiRwpi => llvm::RelocModel::ROPI_RWPI,
+ }
+}
+
+pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
+ match code_model {
+ Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
+ Some(CodeModel::Small) => llvm::CodeModel::Small,
+ Some(CodeModel::Kernel) => llvm::CodeModel::Kernel,
+ Some(CodeModel::Medium) => llvm::CodeModel::Medium,
+ Some(CodeModel::Large) => llvm::CodeModel::Large,
+ None => llvm::CodeModel::None,
+ }
+}
+
+pub fn target_machine_factory(
+ sess: &Session,
+ optlvl: config::OptLevel,
+ target_features: &[String],
+) -> TargetMachineFactoryFn<LlvmCodegenBackend> {
+ let reloc_model = to_llvm_relocation_model(sess.relocation_model());
+
+ let (opt_level, _) = to_llvm_opt_settings(optlvl);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ let ffunction_sections =
+ sess.opts.unstable_opts.function_sections.unwrap_or(sess.target.function_sections);
+ let fdata_sections = ffunction_sections;
+ let funique_section_names = !sess.opts.unstable_opts.no_unique_section_names;
+
+ let code_model = to_llvm_code_model(sess.code_model());
+
+ let mut singlethread = sess.target.singlethread;
+
+ // On the wasm target once the `atomics` feature is enabled that means that
+ // we're no longer single-threaded, or otherwise we don't want LLVM to
+ // lower atomic operations to single-threaded operations.
+ if singlethread && sess.target.is_like_wasm && sess.target_features.contains(&sym::atomics) {
+ singlethread = false;
+ }
+
+ let triple = SmallCStr::new(&sess.target.llvm_target);
+ let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
+ let features = CString::new(target_features.join(",")).unwrap();
+ let abi = SmallCStr::new(&sess.target.llvm_abiname);
+ let trap_unreachable =
+ sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
+ let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
+
+ let asm_comments = sess.asm_comments();
+ let relax_elf_relocations =
+ sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
+
+ let use_init_array =
+ !sess.opts.unstable_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
+
+ let path_mapping = sess.source_map().path_mapping().clone();
+
+ Arc::new(move |config: TargetMachineFactoryConfig| {
+ let split_dwarf_file =
+ path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
+ let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
+
+ let tm = unsafe {
+ llvm::LLVMRustCreateTargetMachine(
+ triple.as_ptr(),
+ cpu.as_ptr(),
+ features.as_ptr(),
+ abi.as_ptr(),
+ code_model,
+ reloc_model,
+ opt_level,
+ use_softfp,
+ ffunction_sections,
+ fdata_sections,
+ funique_section_names,
+ trap_unreachable,
+ singlethread,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ split_dwarf_file.as_ptr(),
+ )
+ };
+
+ tm.ok_or_else(|| {
+ format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
+ })
+ })
+}
+
+pub(crate) fn save_temp_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ module: &ModuleCodegen<ModuleLlvm>,
+ name: &str,
+) {
+ if !cgcx.save_temps {
+ return;
+ }
+ unsafe {
+ let ext = format!("{}.bc", name);
+ let cgu = Some(&module.name[..]);
+ let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+ let cstr = path_to_c_string(&path);
+ let llmod = module.module_llvm.llmod();
+ llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+ }
+}
+
+pub struct DiagnosticHandlers<'a> {
+ data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
+ llcx: &'a llvm::Context,
+ old_handler: Option<&'a llvm::DiagnosticHandler>,
+}
+
+impl<'a> DiagnosticHandlers<'a> {
+ pub fn new(
+ cgcx: &'a CodegenContext<LlvmCodegenBackend>,
+ handler: &'a Handler,
+ llcx: &'a llvm::Context,
+ ) -> Self {
+ let remark_passes_all: bool;
+ let remark_passes: Vec<CString>;
+ match &cgcx.remark {
+ Passes::All => {
+ remark_passes_all = true;
+ remark_passes = Vec::new();
+ }
+ Passes::Some(passes) => {
+ remark_passes_all = false;
+ remark_passes =
+ passes.iter().map(|name| CString::new(name.as_str()).unwrap()).collect();
+ }
+ };
+ let remark_passes: Vec<*const c_char> =
+ remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+ let data = Box::into_raw(Box::new((cgcx, handler)));
+ unsafe {
+ let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
+ llvm::LLVMRustContextConfigureDiagnosticHandler(
+ llcx,
+ diagnostic_handler,
+ data.cast(),
+ remark_passes_all,
+ remark_passes.as_ptr(),
+ remark_passes.len(),
+ );
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
+ DiagnosticHandlers { data, llcx, old_handler }
+ }
+ }
+}
+
+impl<'a> Drop for DiagnosticHandlers<'a> {
+ fn drop(&mut self) {
+ use std::ptr::null_mut;
+ unsafe {
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
+ llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
+ drop(Box::from_raw(self.data));
+ }
+ }
+}
+
+fn report_inline_asm(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ msg: String,
+ level: llvm::DiagnosticLevel,
+ mut cookie: c_uint,
+ source: Option<(String, Vec<InnerSpan>)>,
+) {
+ // In LTO build we may get srcloc values from other crates which are invalid
+ // since they use a different source map. To be safe we just suppress these
+ // in LTO builds.
+ if matches!(cgcx.lto, Lto::Fat | Lto::Thin) {
+ cookie = 0;
+ }
+ let level = match level {
+ llvm::DiagnosticLevel::Error => Level::Error { lint: false },
+ llvm::DiagnosticLevel::Warning => Level::Warning(None),
+ llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note,
+ };
+ cgcx.diag_emitter.inline_asm_error(cookie as u32, msg, level, source);
+}
+
+unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag);
+ report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source);
+}
+
+unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ match llvm::diagnostic::Diagnostic::unpack(info) {
+ llvm::diagnostic::InlineAsm(inline) => {
+ report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source);
+ }
+
+ llvm::diagnostic::Optimization(opt) => {
+ let enabled = match cgcx.remark {
+ Passes::All => true,
+ Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
+ };
+
+ if enabled {
+ diag_handler.note_without_error(&format!(
+ "{}:{}:{}: {}: {}",
+ opt.filename, opt.line, opt.column, opt.pass_name, opt.message,
+ ));
+ }
+ }
+ llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.warn(&msg);
+ }
+ llvm::diagnostic::Unsupported(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.err(&msg);
+ }
+ llvm::diagnostic::UnknownDiagnostic(..) => {}
+ }
+}
+
+fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
+ match config.pgo_gen {
+ SwitchWithOptPath::Enabled(ref opt_dir_path) => {
+ let path = if let Some(dir_path) = opt_dir_path {
+ dir_path.join("default_%m.profraw")
+ } else {
+ PathBuf::from("default_%m.profraw")
+ };
+
+ Some(CString::new(format!("{}", path.display())).unwrap())
+ }
+ SwitchWithOptPath::Disabled => None,
+ }
+}
+
+fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_sample_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+ opt_level: config::OptLevel,
+ opt_stage: llvm::OptStage,
+) -> Result<(), FatalError> {
+ let unroll_loops =
+ opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
+ let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let pgo_sample_use_path = get_pgo_sample_use_path(config);
+ let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
+ // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
+ let sanitizer_options = if !is_lto {
+ Some(llvm::SanitizerOptions {
+ sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
+ sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+ sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
+ sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
+ sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
+ sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
+ sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
+ sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
+ })
+ } else {
+ None
+ };
+
+ let mut llvm_profiler = if cgcx.prof.llvm_recording_enabled() {
+ Some(LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()))
+ } else {
+ None
+ };
+
+ let llvm_selfprofiler =
+ llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
+
+ let extra_passes = if !is_lto { config.passes.join(",") } else { "".to_string() };
+
+ let llvm_plugins = config.llvm_plugins.join(",");
+
+ // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
+ // We would have to add upstream support for this first, before we can support
+ // config.inline_threshold and our more aggressive default thresholds.
+ let result = llvm::LLVMRustOptimizeWithNewPassManager(
+ module.module_llvm.llmod(),
+ &*module.module_llvm.tm,
+ to_pass_builder_opt_level(opt_level),
+ opt_stage,
+ config.no_prepopulate_passes,
+ config.verify_llvm_ir,
+ using_thin_buffers,
+ config.merge_functions,
+ unroll_loops,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ config.no_builtins,
+ config.emit_lifetime_markers,
+ sanitizer_options.as_ref(),
+ pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.instrument_coverage,
+ config.instrument_gcov,
+ pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.debug_info_for_profiling,
+ llvm_selfprofiler,
+ selfprofile_before_pass_callback,
+ selfprofile_after_pass_callback,
+ extra_passes.as_ptr().cast(),
+ extra_passes.len(),
+ llvm_plugins.as_ptr().cast(),
+ llvm_plugins.len(),
+ );
+ result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
+}
+
+// Unsafe due to LLVM calls.
+pub(crate) unsafe fn optimize(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
+
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+
+ if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) {
+ diag_handler.warn(
+ "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15",
+ );
+ }
+
+ if config.emit_no_opt_bc {
+ let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
+ let out = path_to_c_string(&out);
+ llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+ }
+
+ if let Some(opt_level) = config.opt_level {
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
+ let opt_stage = match cgcx.lto {
+ Lto::Fat => llvm::OptStage::PreLinkFatLTO,
+ Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
+ _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+ _ => llvm::OptStage::PreLinkNoLTO,
+ };
+ return optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ );
+ }
+
+ if cgcx.prof.llvm_recording_enabled() {
+ diag_handler
+ .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`");
+ }
+
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ {
+ let find_pass = |pass_name: &str| {
+ let pass_name = SmallCStr::new(pass_name);
+ llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
+ };
+
+ if config.verify_llvm_ir {
+ // Verification should run as the very first pass.
+ llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
+ }
+
+ let mut extra_passes = Vec::new();
+ let mut have_name_anon_globals_pass = false;
+
+ for pass_name in &config.passes {
+ if pass_name == "lint" {
+ // Linting should also be performed early, directly on the generated IR.
+ llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
+ continue;
+ }
+
+ if let Some(pass) = find_pass(pass_name) {
+ extra_passes.push(pass);
+ } else {
+ diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
+ }
+
+ if pass_name == "name-anon-globals" {
+ have_name_anon_globals_pass = true;
+ }
+ }
+
+ // Instrumentation must be inserted before optimization,
+ // otherwise LLVM may optimize some functions away which
+ // breaks llvm-cov.
+ //
+ // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
+ if config.instrument_gcov {
+ llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap());
+ }
+ if config.instrument_coverage {
+ llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap());
+ }
+ if config.debug_info_for_profiling {
+ llvm::LLVMRustAddPass(mpm, find_pass("add-discriminators").unwrap());
+ }
+
+ add_sanitizer_passes(config, &mut extra_passes);
+
+ // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
+ // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
+ // we'll get errors in LLVM.
+ let using_thin_buffers = config.bitcode_needed();
+ if !config.no_prepopulate_passes {
+ llvm::LLVMAddAnalysisPasses(tm, fpm);
+ llvm::LLVMAddAnalysisPasses(tm, mpm);
+ let opt_level = to_llvm_opt_settings(opt_level).0;
+ let prepare_for_thin_lto = cgcx.lto == Lto::Thin
+ || cgcx.lto == Lto::ThinLocal
+ || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
+ with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| {
+ llvm::LLVMRustAddLastExtensionPasses(
+ b,
+ extra_passes.as_ptr(),
+ extra_passes.len() as size_t,
+ );
+ llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+ llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm);
+ });
+
+ have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
+ if using_thin_buffers && !prepare_for_thin_lto {
+ llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
+ have_name_anon_globals_pass = true;
+ }
+ } else {
+ // If we don't use the standard pipeline, directly populate the MPM
+ // with the extra passes.
+ for pass in extra_passes {
+ llvm::LLVMRustAddPass(mpm, pass);
+ }
+ }
+
+ if using_thin_buffers && !have_name_anon_globals_pass {
+ // As described above, this will probably cause an error in LLVM
+ if config.no_prepopulate_passes {
+ diag_handler.err(
+ "The current compilation is going to use thin LTO buffers \
+ without running LLVM's NameAnonGlobals pass. \
+ This will likely cause errors in LLVM. Consider adding \
+ -C passes=name-anon-globals to the compiler command line.",
+ );
+ } else {
+ bug!(
+ "We are using thin LTO buffers without running the NameAnonGlobals pass. \
+ This will likely cause errors in LLVM and should never happen."
+ );
+ }
+ }
+ }
+
+ diag_handler.abort_if_errors();
+
+ // Finally, run the actual optimization passes
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_function_passes",
+ &*module.name,
+ );
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
+ }
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_module_passes",
+ &*module.name,
+ );
+ llvm::LLVMRunPassManager(mpm, llmod);
+ }
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+ }
+ Ok(())
+}
+
+unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
+ if config.sanitizer.contains(SanitizerSet::ADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS);
+ passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
+ passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::MEMORY) {
+ let track_origins = config.sanitizer_memory_track_origins as c_int;
+ let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY);
+ passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::THREAD) {
+ passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
+ }
+ if config.sanitizer.contains(SanitizerSet::HWADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS);
+ passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover));
+ }
+}
+
+pub(crate) fn link(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ mut modules: Vec<ModuleCodegen<ModuleLlvm>>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ use super::lto::{Linker, ModuleBuffer};
+ // Sort the modules by name to ensure to ensure deterministic behavior.
+ modules.sort_by(|a, b| a.name.cmp(&b.name));
+ let (first, elements) =
+ modules.split_first().expect("Bug! modules must contain at least one module.");
+
+ let mut linker = Linker::new(first.module_llvm.llmod());
+ for module in elements {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ linker.add(buffer.data()).map_err(|()| {
+ let msg = format!("failed to serialize module {:?}", module.name);
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+ drop(linker);
+ Ok(modules.remove(0))
+}
+
+pub(crate) unsafe fn codegen(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+ {
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+ let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ if cgcx.msvc_imps_needed {
+ create_msvc_imps(cgcx, llcx, llmod);
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen<'ll, F, R>(
+ tm: &'ll llvm::TargetMachine,
+ llmod: &'ll llvm::Module,
+ no_builtins: bool,
+ f: F,
+ ) -> R
+ where
+ F: FnOnce(&'ll mut PassManager<'ll>) -> R,
+ {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(tm, cpm);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm)
+ }
+
+ // Two things to note:
+ // - If object files are just LLVM bitcode we write bitcode, copy it to
+ // the .o file, and delete the bitcode if it wasn't otherwise
+ // requested.
+ // - If we don't have the integrated assembler then we need to emit
+ // asm from LLVM and use `gcc` to create the object file.
+
+ let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name);
+ let thin = ThinBuffer::new(llmod, config.emit_thin_lto);
+ let data = thin.data();
+
+ if let Some(bitcode_filename) = bc_out.file_name() {
+ cgcx.prof.artifact_size(
+ "llvm_bitcode",
+ bitcode_filename.to_string_lossy(),
+ data.len() as u64,
+ );
+ }
+
+ if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
+ if let Err(e) = fs::write(&bc_out, data) {
+ let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
+ diag_handler.err(&msg);
+ }
+ }
+
+ if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name);
+ embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+ }
+ }
+
+ if config.emit_ir {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name);
+ let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+ let out_c = path_to_c_string(&out);
+
+ extern "C" fn demangle_callback(
+ input_ptr: *const c_char,
+ input_len: size_t,
+ output_ptr: *mut c_char,
+ output_len: size_t,
+ ) -> size_t {
+ let input =
+ unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
+
+ let Ok(input) = str::from_utf8(input) else { return 0 };
+
+ let output = unsafe {
+ slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
+ };
+ let mut cursor = io::Cursor::new(output);
+
+ let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
+
+ if write!(cursor, "{:#}", demangled).is_err() {
+ // Possible only if provided buffer is not big enough
+ return 0;
+ }
+
+ cursor.position() as size_t
+ }
+
+ let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
+
+ if result == llvm::LLVMRustResult::Success {
+ record_artifact_size(&cgcx.prof, "llvm_ir", &out);
+ }
+
+ result.into_result().map_err(|()| {
+ let msg = format!("failed to write LLVM IR to {}", out.display());
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+
+ if config.emit_asm {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+
+ // We can't use the same module for asm and object code output,
+ // because that triggers various errors like invalid IR or broken
+ // binaries. So we must clone the module to produce the asm output
+ // if we are also producing object code.
+ let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj {
+ llvm::LLVMCloneModule(llmod)
+ } else {
+ llmod
+ };
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &path,
+ None,
+ llvm::FileType::AssemblyFile,
+ &cgcx.prof,
+ )
+ })?;
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+
+ let dwo_out = cgcx.output_filenames.temp_path_dwo(module_name);
+ let dwo_out = match (cgcx.split_debuginfo, cgcx.split_dwarf_kind) {
+ // Don't change how DWARF is emitted when disabled.
+ (SplitDebuginfo::Off, _) => None,
+ // Don't provide a DWARF object path if split debuginfo is enabled but this is
+ // a platform that doesn't support Split DWARF.
+ _ if !cgcx.target_can_use_split_dwarf => None,
+ // Don't provide a DWARF object path in single mode, sections will be written
+ // into the object as normal but ignored by linker.
+ (_, SplitDwarfKind::Single) => None,
+ // Emit (a subset of the) DWARF into a separate dwarf object file in split
+ // mode.
+ (_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
+ };
+
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &obj_out,
+ dwo_out,
+ llvm::FileType::ObjectFile,
+ &cgcx.prof,
+ )
+ })?;
+ }
+
+ EmitObj::Bitcode => {
+ debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+ if let Err(e) = link_or_copy(&bc_out, &obj_out) {
+ diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+ }
+
+ if !config.emit_bc {
+ debug!("removing_bitcode {:?}", bc_out);
+ ensure_removed(diag_handler, &bc_out);
+ }
+ }
+
+ EmitObj::None => {}
+ }
+
+ drop(handlers);
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ cgcx.target_can_use_split_dwarf
+ && cgcx.split_debuginfo != SplitDebuginfo::Off
+ && cgcx.split_dwarf_kind == SplitDwarfKind::Split,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
+ let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+ asm.extend_from_slice(b".ascii \"");
+ asm.reserve(data.len());
+ for &byte in data {
+ if byte == b'\\' || byte == b'"' {
+ asm.push(b'\\');
+ asm.push(byte);
+ } else if byte < 0x20 || byte >= 0x80 {
+ // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
+ // width, while hex escapes will consume following characters.
+ asm.push(b'\\');
+ asm.push(b'0' + ((byte >> 6) & 0x7));
+ asm.push(b'0' + ((byte >> 3) & 0x7));
+ asm.push(b'0' + ((byte >> 0) & 0x7));
+ } else {
+ asm.push(byte);
+ }
+ }
+ asm.extend_from_slice(b"\"\n");
+ asm
+}
+
+/// Embed the bitcode of an LLVM module in the LLVM module itself.
+///
+/// This is done primarily for iOS where it appears to be standard to compile C
+/// code at least with `-fembed-bitcode` which creates two sections in the
+/// executable:
+///
+/// * __LLVM,__bitcode
+/// * __LLVM,__cmdline
+///
+/// It appears *both* of these sections are necessary to get the linker to
+/// recognize what's going on. A suitable cmdline value is taken from the
+/// target spec.
+///
+/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
+/// embed an empty section.
+///
+/// Basically all of this is us attempting to follow in the footsteps of clang
+/// on iOS. See #35968 for lots more info.
+unsafe fn embed_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+ cmdline: &str,
+ bitcode: &[u8],
+) {
+ // We're adding custom sections to the output object file, but we definitely
+ // do not want these custom sections to make their way into the final linked
+ // executable. The purpose of these custom sections is for tooling
+ // surrounding object files to work with the LLVM IR, if necessary. For
+ // example rustc's own LTO will look for LLVM IR inside of the object file
+ // in these sections by default.
+ //
+ // To handle this is a bit different depending on the object file format
+ // used by the backend, broken down into a few different categories:
+ //
+ // * Mach-O - this is for macOS. Inspecting the source code for the native
+ // linker here shows that the `.llvmbc` and `.llvmcmd` sections are
+ // automatically skipped by the linker. In that case there's nothing extra
+ // that we need to do here.
+ //
+ // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
+ // `.llvmcmd` sections, so there's nothing extra we need to do.
+ //
+ // * COFF - if we don't do anything the linker will by default copy all
+ // these sections to the output artifact, not what we want! To subvert
+ // this we want to flag the sections we inserted here as
+ // `IMAGE_SCN_LNK_REMOVE`.
+ //
+ // * ELF - this is very similar to COFF above. One difference is that these
+ // sections are removed from the output linked artifact when
+ // `--gc-sections` is passed, which we pass by default. If that flag isn't
+ // passed though then these sections will show up in the final output.
+ // Additionally the flag that we need to set here is `SHF_EXCLUDE`.
+ //
+ // Unfortunately, LLVM provides no way to set custom section flags. For ELF
+ // and COFF we emit the sections using module level inline assembly for that
+ // reason (see issue #90326 for historical background).
+ let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos")
+ || cgcx.opts.target_triple.triple().contains("-watchos");
+ if is_apple
+ || cgcx.opts.target_triple.triple().starts_with("wasm")
+ || cgcx.opts.target_triple.triple().starts_with("asmjs")
+ {
+ // We don't need custom section flags, create LLVM globals.
+ let llconst = common::bytes_in_context(llcx, bitcode);
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.module\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+
+ let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+ let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.cmdline\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+ let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ } else {
+ // We need custom section flags, so emit module-level inline assembly.
+ let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
+ let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ }
+}
+
+pub unsafe fn with_llvm_pmb(
+ llmod: &llvm::Module,
+ config: &ModuleConfig,
+ opt_level: llvm::CodeGenOptLevel,
+ prepare_for_thin_lto: bool,
+ f: &mut dyn FnMut(&llvm::PassManagerBuilder),
+) {
+ use std::ptr;
+
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMRustPassManagerBuilderCreate();
+ let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1);
+ let inline_threshold = config.inline_threshold;
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let pgo_sample_use_path = get_pgo_sample_use_path(config);
+
+ llvm::LLVMRustConfigurePassManagerBuilder(
+ builder,
+ opt_level,
+ config.merge_functions,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ prepare_for_thin_lto,
+ pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ opt_size as c_int,
+ );
+
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
+
+ // Here we match what clang does (kinda). For O0 we only inline
+ // always-inline functions (but don't add lifetime intrinsics), at O1 we
+ // inline with lifetime intrinsics, and O2+ we add an inliner with a
+ // thresholds copied from clang.
+ match (opt_level, opt_size, inline_threshold) {
+ (.., Some(t)) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t);
+ }
+ (llvm::CodeGenOptLevel::Aggressive, ..) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275);
+ }
+ (_, llvm::CodeGenOptSizeDefault, _) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75);
+ }
+ (_, llvm::CodeGenOptSizeAggressive, _) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25);
+ }
+ (llvm::CodeGenOptLevel::None, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Less, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Default, ..) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225);
+ }
+ }
+
+ f(builder);
+ llvm::LLVMRustPassManagerBuilderDispose(builder);
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker. We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_msvc_imps(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+) {
+ if !cgcx.msvc_imps_needed {
+ return;
+ }
+ // The x86 ABI seems to require that leading underscores are added to symbol
+ // names, so we need an extra underscore on x86. There's also a leading
+ // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
+ // underscores added in front).
+ let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
+
+ unsafe {
+ let i8p_ty = Type::i8p_llcx(llcx);
+ let globals = base::iter_globals(llmod)
+ .filter(|&val| {
+ llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
+ && llvm::LLVMIsDeclaration(val) == 0
+ })
+ .filter_map(|val| {
+ // Exclude some symbols that we know are not Rust symbols.
+ let name = llvm::get_value_name(val);
+ if ignored(name) { None } else { Some((val, name)) }
+ })
+ .map(move |(val, name)| {
+ let mut imp_name = prefix.as_bytes().to_vec();
+ imp_name.extend(name);
+ let imp_name = CString::new(imp_name).unwrap();
+ (imp_name, val)
+ })
+ .collect::<Vec<_>>();
+
+ for (imp_name, val) in globals {
+ let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+ llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+ }
+ }
+
+ // Use this function to exclude certain symbols from `__imp` generation.
+ fn ignored(symbol_name: &[u8]) -> bool {
+ // These are symbols generated by LLVM's profiling instrumentation
+ symbol_name.starts_with(b"__llvm_profile_")
+ }
+}
+
+fn record_artifact_size(
+ self_profiler_ref: &SelfProfilerRef,
+ artifact_kind: &'static str,
+ path: &Path,
+) {
+ // Don't stat the file if we are not going to record its size.
+ if !self_profiler_ref.enabled() {
+ return;
+ }
+
+ if let Some(artifact_name) = path.file_name() {
+ let file_size = std::fs::metadata(path).map(|m| m.len()).unwrap_or(0);
+ self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
new file mode 100644
index 000000000..86f92dc02
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -0,0 +1,173 @@
+//! Codegen the MIR to the LLVM IR.
+//!
+//! Hopefully useful general knowledge about codegen:
+//!
+//! * There's no way to find out the [`Ty`] type of a [`Value`]. Doing so
+//! would be "trying to get the eggs out of an omelette" (credit:
+//! pcwalton). You can, instead, find out its [`llvm::Type`] by calling [`val_ty`],
+//! but one [`llvm::Type`] corresponds to many [`Ty`]s; for instance, `tup(int, int,
+//! int)` and `rec(x=int, y=int, z=int)` will have the same [`llvm::Type`].
+//!
+//! [`Ty`]: rustc_middle::ty::Ty
+//! [`val_ty`]: crate::common::val_ty
+
+use super::ModuleLlvm;
+
+use crate::attributes;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::dep_graph;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::SanitizerSet;
+
+use std::time::Instant;
+
+pub struct ValueIter<'ll> {
+ cur: Option<&'ll Value>,
+ step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
+}
+
+impl<'ll> Iterator for ValueIter<'ll> {
+ type Item = &'ll Value;
+
+ fn next(&mut self) -> Option<&'ll Value> {
+ let old = self.cur;
+ if let Some(old) = old {
+ self.cur = unsafe { (self.step)(old) };
+ }
+ old
+ }
+}
+
+pub fn iter_globals(llmod: &llvm::Module) -> ValueIter<'_> {
+ unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } }
+}
+
+pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<ModuleLlvm>, u64) {
+ let start_time = Instant::now();
+
+ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+ let (module, _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ cgu_name,
+ module_codegen,
+ Some(dep_graph::hash_result),
+ );
+ let time_to_codegen = start_time.elapsed();
+
+ // We assume that the cost to run LLVM on a CGU is proportional to
+ // the time we needed for codegenning it.
+ let cost = time_to_codegen.as_nanos() as u64;
+
+ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let _prof_timer =
+ tcx.prof.generic_activity_with_arg_recorder("codegen_module", |recorder| {
+ recorder.record_arg(cgu_name.to_string());
+ recorder.record_arg(cgu.size_estimate().to_string());
+ });
+ // Instantiate monomorphizations without filling out definitions yet...
+ let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str());
+ {
+ let cx = CodegenCx::new(tcx, cgu, &llvm_module);
+ let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
+ for &(mono_item, (linkage, visibility)) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ }
+
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(mono_item, _) in &mono_items {
+ mono_item.define::<Builder<'_, '_, '_>>(&cx);
+ }
+
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
+ let attrs = attributes::sanitize_attrs(&cx, SanitizerSet::empty());
+ attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs);
+ }
+
+ // Finalize code coverage by injecting the coverage map. Note, the coverage map will
+ // also be added to the `llvm.compiler.used` variable, created next.
+ if cx.sess().instrument_coverage() {
+ cx.coverageinfo_finalize();
+ }
+
+ // Create the llvm.used and llvm.compiler.used variables.
+ if !cx.used_statics().borrow().is_empty() {
+ cx.create_used_variable()
+ }
+ if !cx.compiler_used_statics().borrow().is_empty() {
+ cx.create_compiler_used_variable()
+ }
+
+ // Run replace-all-uses-with for statics that need it. This must
+ // happen after the llvm.used variables are created.
+ for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
+ unsafe {
+ let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
+ llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+ llvm::LLVMDeleteGlobal(old_g);
+ }
+ }
+
+ // Finalize debuginfo
+ if cx.sess().opts.debuginfo != DebugInfo::None {
+ cx.debuginfo_finalize();
+ }
+ }
+
+ ModuleCodegen {
+ name: cgu_name.to_string(),
+ module_llvm: llvm_module,
+ kind: ModuleKind::Regular,
+ }
+ }
+
+ (module, cost)
+}
+
+pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
+ let Some(sect) = attrs.link_section else { return };
+ unsafe {
+ let buf = SmallCStr::new(sect.as_str());
+ llvm::LLVMSetSection(llval, buf.as_ptr());
+ }
+}
+
+pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
+ match linkage {
+ Linkage::External => llvm::Linkage::ExternalLinkage,
+ Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
+ Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
+ Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
+ Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
+ Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
+ Linkage::Appending => llvm::Linkage::AppendingLinkage,
+ Linkage::Internal => llvm::Linkage::InternalLinkage,
+ Linkage::Private => llvm::Linkage::PrivateLinkage,
+ Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
+ Linkage::Common => llvm::Linkage::CommonLinkage,
+ }
+}
+
+pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
+ match linkage {
+ Visibility::Default => llvm::Visibility::Default,
+ Visibility::Hidden => llvm::Visibility::Hidden,
+ Visibility::Protected => llvm::Visibility::Protected,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
new file mode 100644
index 000000000..d3096c73a
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -0,0 +1,1508 @@
+use crate::attributes;
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, False};
+use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use cstr::cstr;
+use libc::{c_char, c_uint};
+use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
+};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::Span;
+use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
+use rustc_target::spec::{HasTargetSpec, Target};
+use std::borrow::Cow;
+use std::ffi::CStr;
+use std::iter;
+use std::ops::Deref;
+use std::ptr;
+use tracing::{debug, instrument};
+
+// All Builders must have an llfn associated with them
+#[must_use]
+pub struct Builder<'a, 'll, 'tcx> {
+ pub llbuilder: &'ll mut llvm::Builder<'ll>,
+ pub cx: &'a CodegenCx<'ll, 'tcx>,
+}
+
+impl Drop for Builder<'_, '_, '_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
+ }
+ }
+}
+
+// FIXME(eddyb) use a checked constructor when they become `const fn`.
+const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
+
+/// Empty string, to be used where LLVM expects an instruction name, indicating
+/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
+// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
+const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
+
+impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
+ type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
+ type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
+ type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
+ type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
+ type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
+
+ type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
+ type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
+ type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl abi::HasDataLayout for Builder<'_, '_, '_> {
+ fn data_layout(&self) -> &abi::TargetDataLayout {
+ self.cx.data_layout()
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.tcx
+ }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.cx.param_env()
+ }
+}
+
+impl HasTargetSpec for Builder<'_, '_, '_> {
+ #[inline]
+ fn target_spec(&self) -> &Target {
+ self.cx.target_spec()
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ self.cx.handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
+ type Target = CodegenCx<'ll, 'tcx>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.cx
+ }
+}
+
+impl<'ll, 'tcx> HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
+ type CodegenCx = CodegenCx<'ll, 'tcx>;
+}
+
+macro_rules! builder_methods_for_value_instructions {
+ ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
+ $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
+ unsafe {
+ llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
+ }
+ })+
+ }
+}
+
+impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
+ fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self {
+ let bx = Builder::with_cx(cx);
+ unsafe {
+ llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb);
+ }
+ bx
+ }
+
+ fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
+ self.cx
+ }
+
+ fn llbb(&self) -> &'ll BasicBlock {
+ unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
+ }
+
+ fn set_span(&mut self, _span: Span) {}
+
+ fn append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock {
+ unsafe {
+ let name = SmallCStr::new(name);
+ llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
+ }
+ }
+
+ fn append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock {
+ Self::append_block(self.cx, self.llfn(), name)
+ }
+
+ fn switch_to_block(&mut self, llbb: Self::BasicBlock) {
+ *self = Self::build(self.cx, llbb)
+ }
+
+ fn ret_void(&mut self) {
+ unsafe {
+ llvm::LLVMBuildRetVoid(self.llbuilder);
+ }
+ }
+
+ fn ret(&mut self, v: &'ll Value) {
+ unsafe {
+ llvm::LLVMBuildRet(self.llbuilder, v);
+ }
+ }
+
+ fn br(&mut self, dest: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMBuildBr(self.llbuilder, dest);
+ }
+ }
+
+ fn cond_br(
+ &mut self,
+ cond: &'ll Value,
+ then_llbb: &'ll BasicBlock,
+ else_llbb: &'ll BasicBlock,
+ ) {
+ unsafe {
+ llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
+ }
+ }
+
+ fn switch(
+ &mut self,
+ v: &'ll Value,
+ else_llbb: &'ll BasicBlock,
+ cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
+ ) {
+ let switch =
+ unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
+ for (on_val, dest) in cases {
+ let on_val = self.const_uint_big(self.val_ty(v), on_val);
+ unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
+ }
+ }
+
+ fn invoke(
+ &mut self,
+ llty: &'ll Type,
+ llfn: &'ll Value,
+ args: &[&'ll Value],
+ then: &'ll BasicBlock,
+ catch: &'ll BasicBlock,
+ funclet: Option<&Funclet<'ll>>,
+ ) -> &'ll Value {
+ debug!("invoke {:?} with args ({:?})", llfn, args);
+
+ let args = self.check_call("invoke", llty, llfn, args);
+ let bundle = funclet.map(|funclet| funclet.bundle());
+ let bundle = bundle.as_ref().map(|b| &*b.raw);
+
+ unsafe {
+ llvm::LLVMRustBuildInvoke(
+ self.llbuilder,
+ llty,
+ llfn,
+ args.as_ptr(),
+ args.len() as c_uint,
+ then,
+ catch,
+ bundle,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn unreachable(&mut self) {
+ unsafe {
+ llvm::LLVMBuildUnreachable(self.llbuilder);
+ }
+ }
+
+ builder_methods_for_value_instructions! {
+ add(a, b) => LLVMBuildAdd,
+ fadd(a, b) => LLVMBuildFAdd,
+ sub(a, b) => LLVMBuildSub,
+ fsub(a, b) => LLVMBuildFSub,
+ mul(a, b) => LLVMBuildMul,
+ fmul(a, b) => LLVMBuildFMul,
+ udiv(a, b) => LLVMBuildUDiv,
+ exactudiv(a, b) => LLVMBuildExactUDiv,
+ sdiv(a, b) => LLVMBuildSDiv,
+ exactsdiv(a, b) => LLVMBuildExactSDiv,
+ fdiv(a, b) => LLVMBuildFDiv,
+ urem(a, b) => LLVMBuildURem,
+ srem(a, b) => LLVMBuildSRem,
+ frem(a, b) => LLVMBuildFRem,
+ shl(a, b) => LLVMBuildShl,
+ lshr(a, b) => LLVMBuildLShr,
+ ashr(a, b) => LLVMBuildAShr,
+ and(a, b) => LLVMBuildAnd,
+ or(a, b) => LLVMBuildOr,
+ xor(a, b) => LLVMBuildXor,
+ neg(x) => LLVMBuildNeg,
+ fneg(x) => LLVMBuildFNeg,
+ not(x) => LLVMBuildNot,
+ unchecked_sadd(x, y) => LLVMBuildNSWAdd,
+ unchecked_uadd(x, y) => LLVMBuildNUWAdd,
+ unchecked_ssub(x, y) => LLVMBuildNSWSub,
+ unchecked_usub(x, y) => LLVMBuildNUWSub,
+ unchecked_smul(x, y) => LLVMBuildNSWMul,
+ unchecked_umul(x, y) => LLVMBuildNUWMul,
+ }
+
+ fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn checked_binop(
+ &mut self,
+ oop: OverflowOp,
+ ty: Ty<'_>,
+ lhs: Self::Value,
+ rhs: Self::Value,
+ ) -> (Self::Value, Self::Value) {
+ use rustc_middle::ty::{Int, Uint};
+ use rustc_middle::ty::{IntTy::*, UintTy::*};
+
+ let new_kind = match ty.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
+ let name = match oop {
+ OverflowOp::Add => match new_kind {
+ Int(I8) => "llvm.sadd.with.overflow.i8",
+ Int(I16) => "llvm.sadd.with.overflow.i16",
+ Int(I32) => "llvm.sadd.with.overflow.i32",
+ Int(I64) => "llvm.sadd.with.overflow.i64",
+ Int(I128) => "llvm.sadd.with.overflow.i128",
+
+ Uint(U8) => "llvm.uadd.with.overflow.i8",
+ Uint(U16) => "llvm.uadd.with.overflow.i16",
+ Uint(U32) => "llvm.uadd.with.overflow.i32",
+ Uint(U64) => "llvm.uadd.with.overflow.i64",
+ Uint(U128) => "llvm.uadd.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub => match new_kind {
+ Int(I8) => "llvm.ssub.with.overflow.i8",
+ Int(I16) => "llvm.ssub.with.overflow.i16",
+ Int(I32) => "llvm.ssub.with.overflow.i32",
+ Int(I64) => "llvm.ssub.with.overflow.i64",
+ Int(I128) => "llvm.ssub.with.overflow.i128",
+
+ Uint(U8) => "llvm.usub.with.overflow.i8",
+ Uint(U16) => "llvm.usub.with.overflow.i16",
+ Uint(U32) => "llvm.usub.with.overflow.i32",
+ Uint(U64) => "llvm.usub.with.overflow.i64",
+ Uint(U128) => "llvm.usub.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul => match new_kind {
+ Int(I8) => "llvm.smul.with.overflow.i8",
+ Int(I16) => "llvm.smul.with.overflow.i16",
+ Int(I32) => "llvm.smul.with.overflow.i32",
+ Int(I64) => "llvm.smul.with.overflow.i64",
+ Int(I128) => "llvm.smul.with.overflow.i128",
+
+ Uint(U8) => "llvm.umul.with.overflow.i8",
+ Uint(U16) => "llvm.umul.with.overflow.i16",
+ Uint(U32) => "llvm.umul.with.overflow.i32",
+ Uint(U64) => "llvm.umul.with.overflow.i64",
+ Uint(U128) => "llvm.umul.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ };
+
+ let res = self.call_intrinsic(name, &[lhs, rhs]);
+ (self.extract_value(res, 0), self.extract_value(res, 1))
+ }
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+ if self.cx().val_ty(val) == self.cx().type_i1() {
+ self.zext(val, self.cx().type_i8())
+ } else {
+ val
+ }
+ }
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+ if scalar.is_bool() {
+ return self.trunc(val, self.cx().type_i1());
+ }
+ val
+ }
+
+ fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+ let mut bx = Builder::with_cx(self.cx);
+ bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
+ bx.dynamic_alloca(ty, align)
+ }
+
+ fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+ unsafe {
+ let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
+ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+ alloca
+ }
+ }
+
+ fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
+ unsafe {
+ let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
+ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+ alloca
+ }
+ }
+
+ fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
+ llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
+ load
+ }
+ }
+
+ fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
+ llvm::LLVMSetVolatile(load, llvm::True);
+ load
+ }
+ }
+
+ fn atomic_load(
+ &mut self,
+ ty: &'ll Type,
+ ptr: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ size: Size,
+ ) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMRustBuildAtomicLoad(
+ self.llbuilder,
+ ty,
+ ptr,
+ UNNAMED,
+ AtomicOrdering::from_generic(order),
+ );
+ // LLVM requires the alignment of atomic loads to be at least the size of the type.
+ llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
+ load
+ }
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
+ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+ if place.layout.is_zst() {
+ return OperandRef::new_zst(self, place.layout);
+ }
+
+ #[instrument(level = "trace", skip(bx))]
+ fn scalar_load_metadata<'a, 'll, 'tcx>(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ load: &'ll Value,
+ scalar: abi::Scalar,
+ layout: TyAndLayout<'tcx>,
+ offset: Size,
+ ) {
+ if !scalar.is_always_valid(bx) {
+ bx.noundef_metadata(load);
+ }
+
+ match scalar.primitive() {
+ abi::Int(..) => {
+ if !scalar.is_always_valid(bx) {
+ bx.range_metadata(load, scalar.valid_range(bx));
+ }
+ }
+ abi::Pointer => {
+ if !scalar.valid_range(bx).contains(0) {
+ bx.nonnull_metadata(load);
+ }
+
+ if let Some(pointee) = layout.pointee_info_at(bx, offset) {
+ if let Some(_) = pointee.safe {
+ bx.align_metadata(load, pointee.align);
+ }
+ }
+ }
+ abi::F32 | abi::F64 => {}
+ }
+ }
+
+ let val = if let Some(llextra) = place.llextra {
+ OperandValue::Ref(place.llval, Some(llextra), place.align)
+ } else if place.layout.is_llvm_immediate() {
+ let mut const_llval = None;
+ let llty = place.layout.llvm_type(self);
+ unsafe {
+ if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
+ if llvm::LLVMIsGlobalConstant(global) == llvm::True {
+ if let Some(init) = llvm::LLVMGetInitializer(global) {
+ if self.val_ty(init) == llty {
+ const_llval = Some(init);
+ }
+ }
+ }
+ }
+ }
+ let llval = const_llval.unwrap_or_else(|| {
+ let load = self.load(llty, place.llval, place.align);
+ if let abi::Abi::Scalar(scalar) = place.layout.abi {
+ scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
+ }
+ load
+ });
+ OperandValue::Immediate(self.to_immediate(llval, place.layout))
+ } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+ let b_offset = a.size(self).align_to(b.align(self).abi);
+ let pair_ty = place.layout.llvm_type(self);
+
+ let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
+ let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
+ let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
+ let load = self.load(llty, llptr, align);
+ scalar_load_metadata(self, load, scalar, layout, offset);
+ self.to_immediate_scalar(load, scalar)
+ };
+
+ OperandValue::Pair(
+ load(0, a, place.layout, place.align, Size::ZERO),
+ load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
+ )
+ } else {
+ OperandValue::Ref(place.llval, None, place.align)
+ };
+
+ OperandRef { val, layout: place.layout }
+ }
+
+ fn write_operand_repeatedly(
+ mut self,
+ cg_elem: OperandRef<'tcx, &'ll Value>,
+ count: u64,
+ dest: PlaceRef<'tcx, &'ll Value>,
+ ) -> Self {
+ let zero = self.const_usize(0);
+ let count = self.const_usize(count);
+ let start = dest.project_index(&mut self, zero).llval;
+ let end = dest.project_index(&mut self, count).llval;
+
+ let header_bb = self.append_sibling_block("repeat_loop_header");
+ let body_bb = self.append_sibling_block("repeat_loop_body");
+ let next_bb = self.append_sibling_block("repeat_loop_next");
+
+ self.br(header_bb);
+
+ let mut header_bx = Self::build(self.cx, header_bb);
+ let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
+
+ let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
+ header_bx.cond_br(keep_going, body_bb, next_bb);
+
+ let mut body_bx = Self::build(self.cx, body_bb);
+ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+ cg_elem
+ .val
+ .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
+
+ let next = body_bx.inbounds_gep(
+ self.backend_type(cg_elem.layout),
+ current,
+ &[self.const_usize(1)],
+ );
+ body_bx.br(header_bb);
+ header_bx.add_incoming_to_phi(current, next, body_bb);
+
+ Self::build(self.cx, next_bb)
+ }
+
+ fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
+ if self.sess().target.arch == "amdgpu" {
+ // amdgpu/LLVM does something weird and thinks an i64 value is
+ // split into a v2i32, halving the bitwidth LLVM expects,
+ // tripping an assertion. So, for now, just disable this
+ // optimization.
+ return;
+ }
+
+ unsafe {
+ let llty = self.cx.val_ty(load);
+ let v = [
+ self.cx.const_uint_big(llty, range.start),
+ self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
+ ];
+
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_range as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+ );
+ }
+ }
+
+ fn nonnull_metadata(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_nonnull as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
+ self.store_with_flags(val, ptr, align, MemFlags::empty())
+ }
+
+ fn store_with_flags(
+ &mut self,
+ val: &'ll Value,
+ ptr: &'ll Value,
+ align: Align,
+ flags: MemFlags,
+ ) -> &'ll Value {
+ debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
+ let ptr = self.check_store(val, ptr);
+ unsafe {
+ let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
+ let align =
+ if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
+ llvm::LLVMSetAlignment(store, align);
+ if flags.contains(MemFlags::VOLATILE) {
+ llvm::LLVMSetVolatile(store, llvm::True);
+ }
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // According to LLVM [1] building a nontemporal store must
+ // *always* point to a metadata value of the integer 1.
+ //
+ // [1]: https://llvm.org/docs/LangRef.html#store-instruction
+ let one = self.cx.const_i32(1);
+ let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
+ llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
+ }
+ store
+ }
+ }
+
+ fn atomic_store(
+ &mut self,
+ val: &'ll Value,
+ ptr: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ size: Size,
+ ) {
+ debug!("Store {:?} -> {:?}", val, ptr);
+ let ptr = self.check_store(val, ptr);
+ unsafe {
+ let store = llvm::LLVMRustBuildAtomicStore(
+ self.llbuilder,
+ val,
+ ptr,
+ AtomicOrdering::from_generic(order),
+ );
+ // LLVM requires the alignment of atomic stores to be at least the size of the type.
+ llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
+ }
+ }
+
+ fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildGEP2(
+ self.llbuilder,
+ ty,
+ ptr,
+ indices.as_ptr(),
+ indices.len() as c_uint,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn inbounds_gep(
+ &mut self,
+ ty: &'ll Type,
+ ptr: &'ll Value,
+ indices: &[&'ll Value],
+ ) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildInBoundsGEP2(
+ self.llbuilder,
+ ty,
+ ptr,
+ indices.as_ptr(),
+ indices.len() as c_uint,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) }
+ }
+
+ /* Casts */
+ fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ self.fptoint_sat(false, val, dest_ty)
+ }
+
+ fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ self.fptoint_sat(true, val, dest_ty)
+ }
+
+ fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ // On WebAssembly the `fptoui` and `fptosi` instructions currently have
+ // poor codegen. The reason for this is that the corresponding wasm
+ // instructions, `i32.trunc_f32_s` for example, will trap when the float
+ // is out-of-bounds, infinity, or nan. This means that LLVM
+ // automatically inserts control flow around `fptoui` and `fptosi`
+ // because the LLVM instruction `fptoui` is defined as producing a
+ // poison value, not having UB on out-of-bounds values.
+ //
+ // This method, however, is only used with non-saturating casts that
+ // have UB on out-of-bounds values. This means that it's ok if we use
+ // the raw wasm instruction since out-of-bounds values can do whatever
+ // we like. To ensure that LLVM picks the right instruction we choose
+ // the raw wasm intrinsic functions which avoid LLVM inserting all the
+ // other control flow automatically.
+ if self.sess().target.is_like_wasm {
+ let src_ty = self.cx.val_ty(val);
+ if self.cx.type_kind(src_ty) != TypeKind::Vector {
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ return self.call_intrinsic(name, &[val]);
+ }
+ }
+ }
+ unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ // see `fptoui` above for why wasm is different here
+ if self.sess().target.is_like_wasm {
+ let src_ty = self.cx.val_ty(val);
+ if self.cx.type_kind(src_ty) != TypeKind::Vector {
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ return self.call_intrinsic(name, &[val]);
+ }
+ }
+ }
+ unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
+ }
+
+ fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ /* Comparisons */
+ fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ let op = llvm::IntPredicate::from_generic(op);
+ unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+ }
+
+ fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ let op = llvm::RealPredicate::from_generic(op);
+ unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+ }
+
+ /* Miscellaneous instructions */
+ fn memcpy(
+ &mut self,
+ dst: &'ll Value,
+ dst_align: Align,
+ src: &'ll Value,
+ src_align: Align,
+ size: &'ll Value,
+ flags: MemFlags,
+ ) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
+ let size = self.intcast(size, self.type_isize(), false);
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemCpy(
+ self.llbuilder,
+ dst,
+ dst_align.bytes() as c_uint,
+ src,
+ src_align.bytes() as c_uint,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn memmove(
+ &mut self,
+ dst: &'ll Value,
+ dst_align: Align,
+ src: &'ll Value,
+ src_align: Align,
+ size: &'ll Value,
+ flags: MemFlags,
+ ) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
+ let size = self.intcast(size, self.type_isize(), false);
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemMove(
+ self.llbuilder,
+ dst,
+ dst_align.bytes() as c_uint,
+ src,
+ src_align.bytes() as c_uint,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn memset(
+ &mut self,
+ ptr: &'ll Value,
+ fill_byte: &'ll Value,
+ size: &'ll Value,
+ align: Align,
+ flags: MemFlags,
+ ) {
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let ptr = self.pointercast(ptr, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemSet(
+ self.llbuilder,
+ ptr,
+ align.bytes() as c_uint,
+ fill_byte,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn select(
+ &mut self,
+ cond: &'ll Value,
+ then_val: &'ll Value,
+ else_val: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
+ }
+
+ fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+ }
+
+ fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
+ }
+
+ fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
+ unsafe {
+ let elt_ty = self.cx.val_ty(elt);
+ let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
+ let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
+ let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
+ self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
+ }
+ }
+
+ fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
+ }
+
+ fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
+ }
+
+ fn set_personality_fn(&mut self, personality: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetPersonalityFn(self.llfn(), personality);
+ }
+ }
+
+ fn cleanup_landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value) -> &'ll Value {
+ let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */);
+ unsafe {
+ llvm::LLVMSetCleanup(landing_pad, llvm::True);
+ }
+ landing_pad
+ }
+
+ fn resume(&mut self, exn: &'ll Value) {
+ unsafe {
+ llvm::LLVMBuildResume(self.llbuilder, exn);
+ }
+ }
+
+ fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
+ let name = cstr!("cleanuppad");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCleanupPad(
+ self.llbuilder,
+ parent,
+ args.len() as c_uint,
+ args.as_ptr(),
+ name.as_ptr(),
+ )
+ };
+ Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
+ }
+
+ fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) {
+ unsafe {
+ llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
+ .expect("LLVM does not have support for cleanupret");
+ }
+ }
+
+ fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
+ let name = cstr!("catchpad");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCatchPad(
+ self.llbuilder,
+ parent,
+ args.len() as c_uint,
+ args.as_ptr(),
+ name.as_ptr(),
+ )
+ };
+ Funclet::new(ret.expect("LLVM does not have support for catchpad"))
+ }
+
+ fn catch_switch(
+ &mut self,
+ parent: Option<&'ll Value>,
+ unwind: Option<&'ll BasicBlock>,
+ handlers: &[&'ll BasicBlock],
+ ) -> &'ll Value {
+ let name = cstr!("catchswitch");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCatchSwitch(
+ self.llbuilder,
+ parent,
+ unwind,
+ handlers.len() as c_uint,
+ name.as_ptr(),
+ )
+ };
+ let ret = ret.expect("LLVM does not have support for catchswitch");
+ for handler in handlers {
+ unsafe {
+ llvm::LLVMRustAddHandler(ret, handler);
+ }
+ }
+ ret
+ }
+
+ // Atomic Operations
+ fn atomic_cmpxchg(
+ &mut self,
+ dst: &'ll Value,
+ cmp: &'ll Value,
+ src: &'ll Value,
+ mut order: rustc_codegen_ssa::common::AtomicOrdering,
+ failure_order: rustc_codegen_ssa::common::AtomicOrdering,
+ weak: bool,
+ ) -> &'ll Value {
+ let weak = if weak { llvm::True } else { llvm::False };
+ if llvm_util::get_version() < (13, 0, 0) {
+ use rustc_codegen_ssa::common::AtomicOrdering::*;
+ // Older llvm has the pre-C++17 restriction on
+ // success and failure memory ordering,
+ // requiring the former to be at least as strong as the latter.
+ // So, for llvm 12, we upgrade the success ordering to a stronger
+ // one if necessary.
+ match (order, failure_order) {
+ (Relaxed, Acquire) => order = Acquire,
+ (Release, Acquire) => order = AcquireRelease,
+ (_, SequentiallyConsistent) => order = SequentiallyConsistent,
+ _ => {}
+ }
+ }
+ unsafe {
+ llvm::LLVMRustBuildAtomicCmpXchg(
+ self.llbuilder,
+ dst,
+ cmp,
+ src,
+ AtomicOrdering::from_generic(order),
+ AtomicOrdering::from_generic(failure_order),
+ weak,
+ )
+ }
+ }
+ fn atomic_rmw(
+ &mut self,
+ op: rustc_codegen_ssa::common::AtomicRmwBinOp,
+ dst: &'ll Value,
+ src: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ ) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildAtomicRMW(
+ self.llbuilder,
+ AtomicRmwBinOp::from_generic(op),
+ dst,
+ src,
+ AtomicOrdering::from_generic(order),
+ False,
+ )
+ }
+ }
+
+ fn atomic_fence(
+ &mut self,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ scope: rustc_codegen_ssa::common::SynchronizationScope,
+ ) {
+ unsafe {
+ llvm::LLVMRustBuildAtomicFence(
+ self.llbuilder,
+ AtomicOrdering::from_generic(order),
+ SynchronizationScope::from_generic(scope),
+ );
+ }
+ }
+
+ fn set_invariant_load(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_invariant_load as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
+ self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
+ }
+
+ fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
+ self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
+ }
+
+ fn instrprof_increment(
+ &mut self,
+ fn_name: &'ll Value,
+ hash: &'ll Value,
+ num_counters: &'ll Value,
+ index: &'ll Value,
+ ) {
+ debug!(
+ "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
+ fn_name, hash, num_counters, index
+ );
+
+ let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
+ let llty = self.cx.type_func(
+ &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+ self.cx.type_void(),
+ );
+ let args = &[fn_name, hash, num_counters, index];
+ let args = self.check_call("call", llty, llfn, args);
+
+ unsafe {
+ let _ = llvm::LLVMRustBuildCall(
+ self.llbuilder,
+ llty,
+ llfn,
+ args.as_ptr() as *const &llvm::Value,
+ args.len() as c_uint,
+ None,
+ );
+ }
+ }
+
+ fn call(
+ &mut self,
+ llty: &'ll Type,
+ llfn: &'ll Value,
+ args: &[&'ll Value],
+ funclet: Option<&Funclet<'ll>>,
+ ) -> &'ll Value {
+ debug!("call {:?} with args ({:?})", llfn, args);
+
+ let args = self.check_call("call", llty, llfn, args);
+ let bundle = funclet.map(|funclet| funclet.bundle());
+ let bundle = bundle.as_ref().map(|b| &*b.raw);
+
+ unsafe {
+ llvm::LLVMRustBuildCall(
+ self.llbuilder,
+ llty,
+ llfn,
+ args.as_ptr() as *const &llvm::Value,
+ args.len() as c_uint,
+ bundle,
+ )
+ }
+ }
+
+ fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn do_not_inline(&mut self, llret: &'ll Value) {
+ let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+ attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+ }
+}
+
+impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
+ fn get_static(&mut self, def_id: DefId) -> &'ll Value {
+ // Forward to the `get_static` method of `CodegenCx`
+ self.cx().get_static(def_id)
+ }
+}
+
+impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
+ fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
+ // Create a fresh builder from the crate context.
+ let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
+ Builder { llbuilder, cx }
+ }
+
+ pub fn llfn(&self) -> &'ll Value {
+ unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
+ }
+
+ fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
+ }
+ }
+
+ fn align_metadata(&mut self, load: &'ll Value, align: Align) {
+ unsafe {
+ let v = [self.cx.const_u64(align.bytes())];
+
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_align as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+ );
+ }
+ }
+
+ fn noundef_metadata(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_noundef as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
+ }
+
+ pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
+ }
+
+ pub fn insert_element(
+ &mut self,
+ vec: &'ll Value,
+ elt: &'ll Value,
+ idx: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
+ }
+
+ pub fn shuffle_vector(
+ &mut self,
+ v1: &'ll Value,
+ v2: &'ll Value,
+ mask: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
+ }
+
+ pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
+ }
+ pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
+ }
+ pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
+ }
+ }
+ pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
+ }
+ }
+ pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr =
+ llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr =
+ llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
+ }
+ pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
+ }
+
+ pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
+ unsafe {
+ llvm::LLVMAddClause(landing_pad, clause);
+ }
+ }
+
+ pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
+ let ret =
+ unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
+ ret.expect("LLVM does not have support for catchret")
+ }
+
+ fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
+ let dest_ptr_ty = self.cx.val_ty(ptr);
+ let stored_ty = self.cx.val_ty(val);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
+ assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
+
+ if dest_ptr_ty == stored_ptr_ty {
+ ptr
+ } else {
+ debug!(
+ "type mismatch in store. \
+ Expected {:?}, got {:?}; inserting bitcast",
+ dest_ptr_ty, stored_ptr_ty
+ );
+ self.bitcast(ptr, stored_ptr_ty)
+ }
+ }
+
+ fn check_call<'b>(
+ &mut self,
+ typ: &str,
+ fn_ty: &'ll Type,
+ llfn: &'ll Value,
+ args: &'b [&'ll Value],
+ ) -> Cow<'b, [&'ll Value]> {
+ assert!(
+ self.cx.type_kind(fn_ty) == TypeKind::Function,
+ "builder::{} not passed a function, but {:?}",
+ typ,
+ fn_ty
+ );
+
+ let param_tys = self.cx.func_params_types(fn_ty);
+
+ let all_args_match = iter::zip(&param_tys, args.iter().map(|&v| self.val_ty(v)))
+ .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = iter::zip(param_tys, args)
+ .enumerate()
+ .map(|(i, (expected_ty, &actual_val))| {
+ let actual_ty = self.val_ty(actual_val);
+ if expected_ty != actual_ty {
+ debug!(
+ "type mismatch in function call of {:?}. \
+ Expected {:?} for param {}, got {:?}; injecting bitcast",
+ llfn, expected_ty, i, actual_ty
+ );
+ self.bitcast(actual_val, expected_ty)
+ } else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+ }
+
+ pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
+ let (ty, f) = self.cx.get_intrinsic(intrinsic);
+ self.call(ty, f, args, None)
+ }
+
+ fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
+ let size = size.bytes();
+ if size == 0 {
+ return;
+ }
+
+ if !self.cx().sess().emit_lifetime_markers() {
+ return;
+ }
+
+ let ptr = self.pointercast(ptr, self.cx.type_i8p());
+ self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
+ }
+
+ pub(crate) fn phi(
+ &mut self,
+ ty: &'ll Type,
+ vals: &[&'ll Value],
+ bbs: &[&'ll BasicBlock],
+ ) -> &'ll Value {
+ assert_eq!(vals.len(), bbs.len());
+ let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
+ unsafe {
+ llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
+ phi
+ }
+ }
+
+ fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
+ }
+ }
+
+ fn fptoint_sat_broken_in_llvm(&self) -> bool {
+ match self.tcx.sess.target.arch.as_ref() {
+ // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083
+ "riscv64" => llvm_util::get_version() < (13, 0, 0),
+ _ => false,
+ }
+ }
+
+ fn fptoint_sat(
+ &mut self,
+ signed: bool,
+ val: &'ll Value,
+ dest_ty: &'ll Type,
+ ) -> Option<&'ll Value> {
+ if !self.fptoint_sat_broken_in_llvm() {
+ let src_ty = self.cx.val_ty(val);
+ let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector
+ {
+ assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
+ (
+ self.cx.element_type(src_ty),
+ self.cx.element_type(dest_ty),
+ Some(self.cx.vector_length(src_ty)),
+ )
+ } else {
+ (src_ty, dest_ty, None)
+ };
+ let float_width = self.cx.float_width(float_ty);
+ let int_width = self.cx.int_width(int_ty);
+
+ let instr = if signed { "fptosi" } else { "fptoui" };
+ let name = if let Some(vector_length) = vector_length {
+ format!(
+ "llvm.{}.sat.v{}i{}.v{}f{}",
+ instr, vector_length, int_width, vector_length, float_width
+ )
+ } else {
+ format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
+ };
+ let f =
+ self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
+ Some(self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None))
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn landing_pad(
+ &mut self,
+ ty: &'ll Type,
+ pers_fn: &'ll Value,
+ num_clauses: usize,
+ ) -> &'ll Value {
+ // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while,
+ // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The
+ // personality lives on the parent function anyway.
+ self.set_personality_fn(pers_fn);
+ unsafe {
+ llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
new file mode 100644
index 000000000..72155d874
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -0,0 +1,194 @@
+//! Handles codegen of callees as well as other call-related
+//! things. Callees are a superset of normal rust values and sometimes
+//! have different representations. In particular, top-level fn items
+//! and methods are represented as just a fn ptr and not a full
+//! closure.
+
+use crate::abi::FnAbiLlvmExt;
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::*;
+use tracing::debug;
+
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
+ let tcx = cx.tcx();
+
+ debug!("get_fn(instance={:?})", instance);
+
+ assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_escaping_bound_vars());
+
+ if let Some(&llfn) = cx.instances.borrow().get(&instance) {
+ return llfn;
+ }
+
+ let sym = tcx.symbol_name(instance).name;
+ debug!(
+ "get_fn({:?}: {:?}) => {}",
+ instance,
+ instance.ty(cx.tcx(), ty::ParamEnv::reveal_all()),
+ sym
+ );
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+ let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
+ // Create a fn pointer with the new signature.
+ let llptrty = fn_abi.ptr_to_llvm_type(cx);
+
+ // This is subtle and surprising, but sometimes we have to bitcast
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
+ // library, they may not use precisely the same types: for
+ // example, they will probably each declare their own structs,
+ // which are distinct types from LLVM's point of view (nominal
+ // types).
+ //
+ // Now, if those two crates are linked into an application, and
+ // they contain inlined code, you can wind up with a situation
+ // where both of those functions wind up being loaded into this
+ // application simultaneously. In that case, the same function
+ // (from LLVM's point of view) requires two types. But of course
+ // LLVM won't allow one function to have two types.
+ //
+ // What we currently do, therefore, is declare the function with
+ // one of the two types (whichever happens to come first) and then
+ // bitcast as needed when the function is referenced to make sure
+ // it has the type we expect.
+ //
+ // This can occur on either a crate-local or crate-external
+ // reference. It also occurs when testing libcore and in some
+ // other weird situations. Annoying.
+ if cx.val_ty(llfn) != llptrty {
+ debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
+ cx.const_ptrcast(llfn, llptrty)
+ } else {
+ debug!("get_fn: not casting pointer!");
+ llfn
+ }
+ } else {
+ let llfn = cx.declare_fn(sym, fn_abi);
+ debug!("get_fn: not casting pointer!");
+
+ attributes::from_fn_attrs(cx, llfn, instance);
+
+ let instance_def_id = instance.def_id();
+
+ // Apply an appropriate linkage/visibility value to our item that we
+ // just declared.
+ //
+ // This is sort of subtle. Inside our codegen unit we started off
+ // compilation by predefining all our own `MonoItem` instances. That
+ // is, everything we're codegenning ourselves is already defined. That
+ // means that anything we're actually codegenning in this codegen unit
+ // will have hit the above branch in `get_declared_value`. As a result,
+ // we're guaranteed here that we're declaring a symbol that won't get
+ // defined, or in other words we're referencing a value from another
+ // codegen unit or even another crate.
+ //
+ // So because this is a foreign value we blanket apply an external
+ // linkage directive because it's coming from a different object file.
+ // The visibility here is where it gets tricky. This symbol could be
+ // referencing some foreign crate or foreign library (an `extern`
+ // block) in which case we want to leave the default visibility. We may
+ // also, though, have multiple codegen units. It could be a
+ // monomorphization, in which case its expected visibility depends on
+ // whether we are sharing generics or not. The important thing here is
+ // that the visibility we apply to the declaration is the same one that
+ // has been applied to the definition (wherever that definition may be).
+ unsafe {
+ llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
+
+ let is_generic = instance.substs.non_erasable_generics().next().is_some();
+
+ if is_generic {
+ // This is a monomorphization. Its expected visibility depends
+ // on whether we are in share-generics mode.
+
+ if cx.tcx.sess.opts.share_generics() {
+ // We are in share_generics mode.
+
+ if let Some(instance_def_id) = instance_def_id.as_local() {
+ // This is a definition from the current crate. If the
+ // definition is unreachable for downstream crates or
+ // the current crate does not re-export generics, the
+ // definition of the instance will have been declared
+ // as `hidden`.
+ if cx.tcx.is_unreachable_local_definition(instance_def_id)
+ || !cx.tcx.local_crate_exports_generics()
+ {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a monomorphization of a generic function
+ // defined in an upstream crate.
+ if instance.upstream_monomorphization(tcx).is_some() {
+ // This is instantiated in another crate. It cannot
+ // be `hidden`.
+ } else {
+ // This is a local instantiation of an upstream definition.
+ // If the current crate does not re-export it
+ // (because it is a C library or an executable), it
+ // will have been declared `hidden`.
+ if !cx.tcx.local_crate_exports_generics() {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ }
+ }
+ } else {
+ // When not sharing generics, all instances are in the same
+ // crate and have hidden visibility
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a non-generic function
+ if cx.tcx.is_codegened_item(instance_def_id) {
+ // This is a function that is instantiated in the local crate
+
+ if instance_def_id.is_local() {
+ // This is function that is defined in the local crate.
+ // If it is not reachable, it is hidden.
+ if !cx.tcx.is_reachable_non_generic(instance_def_id) {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a function from an upstream crate that has
+ // been instantiated here. These are always hidden.
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ }
+ }
+
+ // MinGW: For backward compatibility we rely on the linker to decide whether it
+ // should use dllimport for functions.
+ if cx.use_dll_storage_attrs
+ && tcx.is_dllimport_foreign_item(instance_def_id)
+ && !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc")
+ {
+ llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+ }
+
+ if cx.should_assume_dso_local(llfn, true) {
+ llvm::LLVMRustSetDSOLocal(llfn, true);
+ }
+ }
+
+ llfn
+ };
+
+ cx.instances.borrow_mut().insert(instance, llfn);
+
+ llfn
+}
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
new file mode 100644
index 000000000..fb4da9a5f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -0,0 +1,359 @@
+//! Code that is useful in various codegen modules.
+
+use crate::consts::{self, const_alloc_to_llvm};
+pub use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::Mutability;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
+
+use libc::{c_char, c_uint};
+use tracing::debug;
+
+/*
+* A note on nomenclature of linking: "extern", "foreign", and "upcall".
+*
+* An "extern" is an LLVM symbol we wind up emitting an undefined external
+* reference to. This means "we don't have the thing in this compilation unit,
+* please make sure you link it in at runtime". This could be a reference to
+* C code found in a C library, or rust code found in a rust crate.
+*
+* Most "externs" are implicitly declared (automatically) as a result of a
+* user declaring an extern _module_ dependency; this causes the rust driver
+* to locate an extern crate, scan its compilation metadata, and emit extern
+* declarations for any symbols used by the declaring crate.
+*
+* A "foreign" is an extern that references C (or other non-rust ABI) code.
+* There is no metadata to scan for extern references so in these cases either
+* a header-digester like bindgen, or manual function prototypes, have to
+* serve as declarators. So these are usually given explicitly as prototype
+* declarations, in rust code, with ABI attributes on them noting which ABI to
+* link via.
+*
+* An "upcall" is a foreign call generated by the compiler (not corresponding
+* to any user-written call in the code) into the runtime library, to perform
+* some helper task such as bringing a task to life, allocating memory, etc.
+*
+*/
+
+/// A structure representing an active landing pad for the duration of a basic
+/// block.
+///
+/// Each `Block` may contain an instance of this, indicating whether the block
+/// is part of a landing pad or not. This is used to make decision about whether
+/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
+/// use `invoke`) and also about various function call metadata.
+///
+/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
+/// just a bunch of `None` instances (not too interesting), but for MSVC
+/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
+/// When inside of a landing pad, each function call in LLVM IR needs to be
+/// annotated with which landing pad it's a part of. This is accomplished via
+/// the `OperandBundleDef` value created for MSVC landing pads.
+pub struct Funclet<'ll> {
+ cleanuppad: &'ll Value,
+ operand: OperandBundleDef<'ll>,
+}
+
+impl<'ll> Funclet<'ll> {
+ pub fn new(cleanuppad: &'ll Value) -> Self {
+ Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
+ }
+
+ pub fn cleanuppad(&self) -> &'ll Value {
+ self.cleanuppad
+ }
+
+ pub fn bundle(&self) -> &OperandBundleDef<'ll> {
+ &self.operand
+ }
+}
+
+impl<'ll> BackendTypes for CodegenCx<'ll, '_> {
+ type Value = &'ll Value;
+ // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
+ type Function = &'ll Value;
+
+ type BasicBlock = &'ll BasicBlock;
+ type Type = &'ll Type;
+ type Funclet = Funclet<'ll>;
+
+ type DIScope = &'ll llvm::debuginfo::DIScope;
+ type DILocation = &'ll llvm::debuginfo::DILocation;
+ type DIVariable = &'ll llvm::debuginfo::DIVariable;
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
+ unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) }
+ }
+
+ pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
+ unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) }
+ }
+
+ pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
+ bytes_in_context(self.llcx, bytes)
+ }
+
+ pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
+ unsafe {
+ assert_eq!(idx as c_uint as u64, idx);
+ let r = llvm::LLVMGetAggregateElement(v, idx as c_uint).unwrap();
+
+ debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
+
+ r
+ }
+ }
+}
+
+impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn const_null(&self, t: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstNull(t) }
+ }
+
+ fn const_undef(&self, t: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMGetUndef(t) }
+ }
+
+ fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
+ unsafe { llvm::LLVMConstInt(t, i as u64, True) }
+ }
+
+ fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
+ unsafe { llvm::LLVMConstInt(t, i, False) }
+ }
+
+ fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
+ unsafe {
+ let words = [u as u64, (u >> 64) as u64];
+ llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
+ }
+ }
+
+ fn const_bool(&self, val: bool) -> &'ll Value {
+ self.const_uint(self.type_i1(), val as u64)
+ }
+
+ fn const_i16(&self, i: i16) -> &'ll Value {
+ self.const_int(self.type_i16(), i as i64)
+ }
+
+ fn const_i32(&self, i: i32) -> &'ll Value {
+ self.const_int(self.type_i32(), i as i64)
+ }
+
+ fn const_u32(&self, i: u32) -> &'ll Value {
+ self.const_uint(self.type_i32(), i as u64)
+ }
+
+ fn const_u64(&self, i: u64) -> &'ll Value {
+ self.const_uint(self.type_i64(), i)
+ }
+
+ fn const_usize(&self, i: u64) -> &'ll Value {
+ let bit_size = self.data_layout().pointer_size.bits();
+ if bit_size < 64 {
+ // make sure it doesn't overflow
+ assert!(i < (1 << bit_size));
+ }
+
+ self.const_uint(self.isize_ty, i)
+ }
+
+ fn const_u8(&self, i: u8) -> &'ll Value {
+ self.const_uint(self.type_i8(), i as u64)
+ }
+
+ fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
+ unsafe { llvm::LLVMConstReal(t, val) }
+ }
+
+ fn const_str(&self, s: &str) -> (&'ll Value, &'ll Value) {
+ let str_global = *self
+ .const_str_cache
+ .borrow_mut()
+ .raw_entry_mut()
+ .from_key(s)
+ .or_insert_with(|| {
+ let sc = self.const_bytes(s.as_bytes());
+ let sym = self.generate_local_symbol_name("str");
+ let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", sym);
+ });
+ unsafe {
+ llvm::LLVMSetInitializer(g, sc);
+ llvm::LLVMSetGlobalConstant(g, True);
+ llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
+ }
+ (s.to_owned(), g)
+ })
+ .1;
+ let len = s.len();
+ let cs = consts::ptrcast(
+ str_global,
+ self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
+ );
+ (cs, self.const_usize(len as u64))
+ }
+
+ fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
+ struct_in_context(self.llcx, elts, packed)
+ }
+
+ fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
+ try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
+ }
+
+ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
+ try_as_const_integral(v).and_then(|v| unsafe {
+ let (mut lo, mut hi) = (0u64, 0u64);
+ let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
+ success.then_some(hi_lo_to_u128(lo, hi))
+ })
+ }
+
+ fn zst_to_backend(&self, _llty: &'ll Type) -> &'ll Value {
+ self.const_undef(self.type_ix(0))
+ }
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
+ let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
+ match cv {
+ Scalar::Int(int) => {
+ let data = int.assert_bits(layout.size(self));
+ let llval = self.const_uint_big(self.type_ix(bitsize), data);
+ if layout.primitive() == Pointer {
+ unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+ } else {
+ self.const_bitcast(llval, llty)
+ }
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts();
+ let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let init = const_alloc_to_llvm(self, alloc);
+ let alloc = alloc.inner();
+ let value = match alloc.mutability {
+ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+ _ => self.static_addr_of(init, alloc.align, None),
+ };
+ if !self.sess().fewer_names() {
+ llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
+ }
+ (value, AddressSpace::DATA)
+ }
+ GlobalAlloc::Function(fn_instance) => (
+ self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
+ self.data_layout().instruction_address_space,
+ ),
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc = self
+ .tcx
+ .global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
+ .unwrap_memory();
+ let init = const_alloc_to_llvm(self, alloc);
+ let value = self.static_addr_of(init, alloc.inner().align, None);
+ (value, AddressSpace::DATA)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ (self.get_static(def_id), AddressSpace::DATA)
+ }
+ };
+ let llval = unsafe {
+ llvm::LLVMRustConstInBoundsGEP2(
+ self.type_i8(),
+ self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
+ &self.const_usize(offset.bytes()),
+ 1,
+ )
+ };
+ if layout.primitive() != Pointer {
+ unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
+ } else {
+ self.const_bitcast(llval, llty)
+ }
+ }
+ }
+ }
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value {
+ const_alloc_to_llvm(self, alloc)
+ }
+
+ fn from_const_alloc(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ alloc: ConstAllocation<'tcx>,
+ offset: Size,
+ ) -> PlaceRef<'tcx, &'ll Value> {
+ let alloc_align = alloc.inner().align;
+ assert_eq!(alloc_align, layout.align.abi);
+ let llty = self.type_ptr_to(layout.llvm_type(self));
+ let llval = if layout.size == Size::ZERO {
+ let llval = self.const_usize(alloc_align.bytes());
+ unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+ } else {
+ let init = const_alloc_to_llvm(self, alloc);
+ let base_addr = self.static_addr_of(init, alloc_align, None);
+
+ let llval = unsafe {
+ llvm::LLVMRustConstInBoundsGEP2(
+ self.type_i8(),
+ self.const_bitcast(base_addr, self.type_i8p()),
+ &self.const_usize(offset.bytes()),
+ 1,
+ )
+ };
+ self.const_bitcast(llval, llty)
+ };
+ PlaceRef::new_sized(llval, layout)
+ }
+
+ fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ consts::ptrcast(val, ty)
+ }
+}
+
+/// Get the [LLVM type][Type] of a [`Value`].
+pub fn val_ty(v: &Value) -> &Type {
+ unsafe { llvm::LLVMTypeOf(v) }
+}
+
+pub fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+ unsafe {
+ let ptr = bytes.as_ptr() as *const c_char;
+ llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True)
+ }
+}
+
+pub fn struct_in_context<'ll>(
+ llcx: &'ll llvm::Context,
+ elts: &[&'ll Value],
+ packed: bool,
+) -> &'ll Value {
+ unsafe {
+ llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool)
+ }
+}
+
+#[inline]
+fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
+ ((hi as u128) << 64) | (lo as u128)
+}
+
+fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
+ unsafe { llvm::LLVMIsAConstantInt(v) }
+}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
new file mode 100644
index 000000000..18467e370
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -0,0 +1,577 @@
+use crate::base;
+use crate::common::CodegenCx;
+use crate::debuginfo;
+use crate::llvm::{self, True};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use cstr::cstr;
+use libc::c_uint;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::interpret::{
+ read_target_uint, Allocation, ConstAllocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer,
+ Scalar as InterpScalar,
+};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_target::abi::{
+ AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
+use std::ops::Range;
+use tracing::debug;
+
+pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
+ let alloc = alloc.inner();
+ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
+
+ // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`,
+ // so `range` must be within the bounds of `alloc` and not contain or overlap a relocation.
+ fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
+ llvals: &mut Vec<&'ll Value>,
+ cx: &'a CodegenCx<'ll, 'b>,
+ alloc: &'a Allocation,
+ range: Range<usize>,
+ ) {
+ let chunks = alloc
+ .init_mask()
+ .range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end));
+
+ let chunk_to_llval = move |chunk| match chunk {
+ InitChunk::Init(range) => {
+ let range = (range.start.bytes() as usize)..(range.end.bytes() as usize);
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ cx.const_bytes(bytes)
+ }
+ InitChunk::Uninit(range) => {
+ let len = range.end.bytes() - range.start.bytes();
+ cx.const_undef(cx.type_array(cx.type_i8(), len))
+ }
+ };
+
+ // Generating partially-uninit consts is limited to small numbers of chunks,
+ // to avoid the cost of generating large complex const expressions.
+ // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
+ // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
+ let max = if llvm_util::get_version() < (14, 0, 0) {
+ // Generating partially-uninit consts inhibits optimizations in LLVM < 14.
+ // See https://github.com/rust-lang/rust/issues/84565.
+ 1
+ } else {
+ cx.sess().opts.unstable_opts.uninit_const_chunk_threshold
+ };
+ let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
+
+ if allow_uninit_chunks {
+ llvals.extend(chunks.map(chunk_to_llval));
+ } else {
+ // If this allocation contains any uninit bytes, codegen as if it was initialized
+ // (using some arbitrary value for uninit bytes).
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ llvals.push(cx.const_bytes(bytes));
+ }
+ }
+
+ let mut next_offset = 0;
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let offset = offset.bytes();
+ assert_eq!(offset as usize as u64, offset);
+ let offset = offset as usize;
+ if offset > next_offset {
+ // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // is within the bounds of the allocation, and it doesn't affect interpreter execution
+ // (we inspect the result after interpreter execution).
+ append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset);
+ }
+ let ptr_offset = read_target_uint(
+ dl.endian,
+ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+ // affect interpreter execution (we inspect the result after interpreter execution),
+ // and we properly interpret the relocation as a relocation pointer offset.
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+ )
+ .expect("const_alloc_to_llvm: could not read relocation pointer")
+ as u64;
+
+ let address_space = match cx.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
+ GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
+ AddressSpace::DATA
+ }
+ };
+
+ llvals.push(cx.scalar_to_backend(
+ InterpScalar::from_pointer(
+ Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
+ &cx.tcx,
+ ),
+ Scalar::Initialized {
+ value: Primitive::Pointer,
+ valid_range: WrappingRange::full(dl.pointer_size),
+ },
+ cx.type_i8p_ext(address_space),
+ ));
+ next_offset = offset + pointer_size;
+ }
+ if alloc.len() >= next_offset {
+ let range = next_offset..alloc.len();
+ // This `inspect` is okay since we have check that it is after all relocations, it is
+ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+ // inspect the result after interpreter execution).
+ append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range);
+ }
+
+ cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ def_id: DefId,
+) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> {
+ let alloc = cx.tcx.eval_static_initializer(def_id)?;
+ Ok((const_alloc_to_llvm(cx, alloc), alloc))
+}
+
+fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
+ // The target may require greater alignment for globals than the type does.
+ // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
+ // which can force it to be smaller. Rust doesn't support this yet.
+ if let Some(min) = cx.sess().target.min_global_align {
+ match Align::from_bits(min) {
+ Ok(min) => align = align.max(min),
+ Err(err) => {
+ cx.sess().err(&format!("invalid minimum global alignment: {}", err));
+ }
+ }
+ }
+ unsafe {
+ llvm::LLVMSetAlignment(gv, align.bytes() as u32);
+ }
+}
+
+fn check_and_apply_linkage<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ attrs: &CodegenFnAttrs,
+ ty: Ty<'tcx>,
+ sym: &str,
+ span_def_id: DefId,
+) -> &'ll Value {
+ let llty = cx.layout_of(ty).llvm_type(cx);
+ if let Some(linkage) = attrs.linkage {
+ debug!("get_static: sym={} linkage={:?}", sym, linkage);
+
+ // If this is a static with a linkage specified, then we need to handle
+ // it a little specially. The typesystem prevents things like &T and
+ // extern "C" fn() from being non-null, so we can't just declare a
+ // static and call it a day. Some linkages (like weak) will make it such
+ // that the static actually has a null value.
+ let llty2 = if let ty::RawPtr(ref mt) = ty.kind() {
+ cx.layout_of(mt.ty).llvm_type(cx)
+ } else {
+ cx.sess().span_fatal(
+ cx.tcx.def_span(span_def_id),
+ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+ )
+ };
+ unsafe {
+ // Declare a symbol `foo` with the desired linkage.
+ let g1 = cx.declare_global(sym, llty2);
+ llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
+
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+ let mut real_name = "_rust_extern_with_linkage_".to_string();
+ real_name.push_str(sym);
+ let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
+ cx.sess().span_fatal(
+ cx.tcx.def_span(span_def_id),
+ &format!("symbol `{}` is already defined", &sym),
+ )
+ });
+ llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
+ llvm::LLVMSetInitializer(g2, g1);
+ g2
+ }
+ } else {
+ // Generate an external declaration.
+ // FIXME(nagisa): investigate whether it can be changed into define_global
+ cx.declare_global(sym, llty)
+ }
+}
+
+pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstPointerCast(val, ty) }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstBitCast(val, ty) }
+ }
+
+ pub(crate) fn static_addr_of_mut(
+ &self,
+ cv: &'ll Value,
+ align: Align,
+ kind: Option<&str>,
+ ) -> &'ll Value {
+ unsafe {
+ let gv = match kind {
+ Some(kind) if !self.tcx.sess.fewer_names() => {
+ let name = self.generate_local_symbol_name(kind);
+ let gv = self.define_global(&name, self.val_ty(cv)).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", name);
+ });
+ llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
+ gv
+ }
+ _ => self.define_private_global(self.val_ty(cv)),
+ };
+ llvm::LLVMSetInitializer(gv, cv);
+ set_global_alignment(self, gv, align);
+ llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
+ gv
+ }
+ }
+
+ pub(crate) fn get_static(&self, def_id: DefId) -> &'ll Value {
+ let instance = Instance::mono(self.tcx, def_id);
+ if let Some(&g) = self.instances.borrow().get(&instance) {
+ return g;
+ }
+
+ let defined_in_current_codegen_unit =
+ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+ assert!(
+ !defined_in_current_codegen_unit,
+ "consts::get_static() should always hit the cache for \
+ statics defined in the same CGU, but did not for `{:?}`",
+ def_id
+ );
+
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let sym = self.tcx.symbol_name(instance).name;
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ debug!("get_static: sym={} instance={:?} fn_attrs={:?}", sym, instance, fn_attrs);
+
+ let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
+ let llty = self.layout_of(ty).llvm_type(self);
+ if let Some(g) = self.get_declared_value(sym) {
+ if self.val_ty(g) != self.type_ptr_to(llty) {
+ span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
+ }
+ }
+
+ let g = self.declare_global(sym, llty);
+
+ if !self.tcx.is_reachable_non_generic(def_id) {
+ unsafe {
+ llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
+ }
+ }
+
+ g
+ } else {
+ check_and_apply_linkage(self, fn_attrs, ty, sym, def_id)
+ };
+
+ // Thread-local statics in some other crate need to *always* be linked
+ // against in a thread-local fashion, so we need to be sure to apply the
+ // thread-local attribute locally if it was present remotely. If we
+ // don't do this then linker errors can be generated where the linker
+ // complains that one object files has a thread local version of the
+ // symbol and another one doesn't.
+ if fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+ }
+
+ if !def_id.is_local() {
+ let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
+ // ThinLTO can't handle this workaround in all cases, so we don't
+ // emit the attrs. Instead we make them unnecessary by disallowing
+ // dynamic linking when linker plugin based LTO is enabled.
+ !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();
+
+ // If this assertion triggers, there's something wrong with commandline
+ // argument validation.
+ debug_assert!(
+ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && self.tcx.sess.target.is_like_windows
+ && self.tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ if needs_dll_storage_attr {
+ // This item is external but not foreign, i.e., it originates from an external Rust
+ // crate. Since we don't know whether this crate will be linked dynamically or
+ // statically in the final application, we always mark such symbols as 'dllimport'.
+ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+ // to make things work.
+ //
+ // However, in some scenarios we defer emission of statics to downstream
+ // crates, so there are cases where a static with an upstream DefId
+ // is actually present in the current crate. We can find out via the
+ // is_codegened_item query.
+ if !self.tcx.is_codegened_item(def_id) {
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+ }
+ }
+ }
+ }
+
+ if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
+ // For foreign (native) libs we know the exact storage type to use.
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+ }
+ }
+
+ unsafe {
+ if self.should_assume_dso_local(g, true) {
+ llvm::LLVMRustSetDSOLocal(g, true);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, g);
+ g
+ }
+}
+
+impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
+ fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value {
+ if let Some(&gv) = self.const_globals.borrow().get(&cv) {
+ unsafe {
+ // Upgrade the alignment in cases where the same constant is used with different
+ // alignment requirements
+ let llalign = align.bytes() as u32;
+ if llalign > llvm::LLVMGetAlignment(gv) {
+ llvm::LLVMSetAlignment(gv, llalign);
+ }
+ }
+ return gv;
+ }
+ let gv = self.static_addr_of_mut(cv, align, kind);
+ unsafe {
+ llvm::LLVMSetGlobalConstant(gv, True);
+ }
+ self.const_globals.borrow_mut().insert(cv, gv);
+ gv
+ }
+
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+ unsafe {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else {
+ // Error has already been reported
+ return;
+ };
+ let alloc = alloc.inner();
+
+ let g = self.get_static(def_id);
+
+ // boolean SSA values are i1, but they have to be stored in i8 slots,
+ // otherwise some LLVM optimization passes don't work as expected
+ let mut val_llty = self.val_ty(v);
+ let v = if val_llty == self.type_i1() {
+ val_llty = self.type_i8();
+ llvm::LLVMConstZExt(v, val_llty)
+ } else {
+ v
+ };
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let llty = self.layout_of(ty).llvm_type(self);
+ let g = if val_llty == llty {
+ g
+ } else {
+ // If we created the global with the wrong type,
+ // correct the type.
+ let name = llvm::get_value_name(g).to_vec();
+ llvm::set_value_name(g, b"");
+
+ let linkage = llvm::LLVMRustGetLinkage(g);
+ let visibility = llvm::LLVMRustGetVisibility(g);
+
+ let new_g = llvm::LLVMRustGetOrInsertGlobal(
+ self.llmod,
+ name.as_ptr().cast(),
+ name.len(),
+ val_llty,
+ );
+
+ llvm::LLVMRustSetLinkage(new_g, linkage);
+ llvm::LLVMRustSetVisibility(new_g, visibility);
+
+ // The old global has had its name removed but is returned by
+ // get_static since it is in the instance cache. Provide an
+ // alternative lookup that points to the new global so that
+ // global_asm! can compute the correct mangled symbol name
+ // for the global.
+ self.renamed_statics.borrow_mut().insert(def_id, new_g);
+
+ // To avoid breaking any invariants, we leave around the old
+ // global for the moment; we'll replace all references to it
+ // with the new global later. (See base::codegen_backend.)
+ self.statics_to_rauw.borrow_mut().push((g, new_g));
+ new_g
+ };
+ set_global_alignment(self, g, self.align_of(ty));
+ llvm::LLVMSetInitializer(g, v);
+
+ if self.should_assume_dso_local(g, true) {
+ llvm::LLVMRustSetDSOLocal(g, true);
+ }
+
+ // As an optimization, all shared statics which do not have interior
+ // mutability are placed into read-only memory.
+ if !is_mutable && self.type_is_freeze(ty) {
+ llvm::LLVMSetGlobalConstant(g, llvm::True);
+ }
+
+ debuginfo::build_global_var_di_node(self, def_id, g);
+
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+
+ // Do not allow LLVM to change the alignment of a TLS on macOS.
+ //
+ // By default a global's alignment can be freely increased.
+ // This allows LLVM to generate more performant instructions
+ // e.g., using load-aligned into a SIMD register.
+ //
+ // However, on macOS 10.10 or below, the dynamic linker does not
+ // respect any alignment given on the TLS (radar 24221680).
+ // This will violate the alignment assumption, and causing segfault at runtime.
+ //
+ // This bug is very easy to trigger. In `println!` and `panic!`,
+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+ // which the values would be `mem::replace`d on initialization.
+ // The implementation of `mem::replace` will use SIMD
+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+ // which macOS's dyld disregarded and causing crashes
+ // (see issues #51794, #51758, #50867, #48866 and #44056).
+ //
+ // To workaround the bug, we trick LLVM into not increasing
+ // the global's alignment by explicitly assigning a section to it
+ // (equivalent to automatically generating a `#[link_section]` attribute).
+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+ // of `lib/IR/Globals.cpp` for why this works.
+ //
+ // When the alignment is not increased, the optimized `mem::replace`
+ // will use load-unaligned instructions instead, and thus avoiding the crash.
+ //
+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
+ if self.tcx.sess.target.is_like_osx {
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state
+ // (not as part of the interpreter execution).
+ //
+ // FIXME: This check requires that the (arbitrary) value of undefined bytes
+ // happens to be zero. Instead, we should only check the value of defined bytes
+ // and set all undefined bytes to zero if this allocation is headed for the
+ // BSS.
+ let all_bytes_are_zero = alloc.relocations().is_empty()
+ && alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+ .iter()
+ .all(|&byte| byte == 0);
+
+ let sect_name = if all_bytes_are_zero {
+ cstr!("__DATA,__thread_bss")
+ } else {
+ cstr!("__DATA,__thread_data")
+ };
+ llvm::LLVMSetSection(g, sect_name.as_ptr());
+ }
+ }
+
+ // Wasm statics with custom link sections get special treatment as they
+ // go into custom sections of the wasm executable.
+ if self.tcx.sess.target.is_like_wasm {
+ if let Some(section) = attrs.link_section {
+ let section = llvm::LLVMMDStringInContext(
+ self.llcx,
+ section.as_str().as_ptr().cast(),
+ section.as_str().len() as c_uint,
+ );
+ assert!(alloc.relocations().is_empty());
+
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state (not
+ // as part of the interpreter execution).
+ let bytes =
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
+ let alloc = llvm::LLVMMDStringInContext(
+ self.llcx,
+ bytes.as_ptr().cast(),
+ bytes.len() as c_uint,
+ );
+ let data = [section, alloc];
+ let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
+ llvm::LLVMAddNamedMetadataOperand(
+ self.llmod,
+ "wasm.custom_sections\0".as_ptr().cast(),
+ meta,
+ );
+ }
+ } else {
+ base::set_link_section(g, attrs);
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::USED) {
+ // `USED` and `USED_LINKER` can't be used together.
+ assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER));
+
+ // The semantics of #[used] in Rust only require the symbol to make it into the
+ // object file. It is explicitly allowed for the linker to strip the symbol if it
+ // is dead, which means we are allowed use `llvm.compiler.used` instead of
+ // `llvm.used` here.
+ //
+ // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique
+ // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs
+ // in the handling of `.init_array` (the static constructor list) in versions of
+ // the gold linker (prior to the one released with binutils 2.36).
+ //
+ // That said, we only ever emit these when compiling for ELF targets, unless
+ // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
+ // on other targets, in particular MachO targets have *their* static constructor
+ // lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However,
+ // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_typeck`,
+ // so we don't need to take care of it here.
+ self.add_compiler_used_global(g);
+ }
+ if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
+ // `USED` and `USED_LINKER` can't be used together.
+ assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED));
+
+ self.add_used_global(g);
+ }
+ }
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ fn add_used_global(&self, global: &'ll Value) {
+ let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
+ self.used_statics.borrow_mut().push(cast);
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
+ /// an array of i8*.
+ fn add_compiler_used_global(&self, global: &'ll Value) {
+ let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
+ self.compiler_used_statics.borrow_mut().push(cast);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
new file mode 100644
index 000000000..5857b83f6
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -0,0 +1,1014 @@
+use crate::attributes;
+use crate::back::write::to_llvm_code_model;
+use crate::callee::get_fn;
+use crate::coverageinfo;
+use crate::debuginfo;
+use crate::llvm;
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::value::Value;
+
+use cstr::cstr;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
+ TyAndLayout,
+};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
+use rustc_session::config::{BranchProtection, CFGuard, CFProtection};
+use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet};
+use rustc_session::Session;
+use rustc_span::source_map::Span;
+use rustc_target::abi::{
+ call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
+};
+use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
+use smallvec::SmallVec;
+
+use std::cell::{Cell, RefCell};
+use std::ffi::CStr;
+use std::str;
+
+/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
+/// `llvm::Context` so that several compilation units may be optimized in parallel.
+/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
+pub struct CodegenCx<'ll, 'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub check_overflow: bool,
+ pub use_dll_storage_attrs: bool,
+ pub tls_model: llvm::ThreadLocalMode,
+
+ pub llmod: &'ll llvm::Module,
+ pub llcx: &'ll llvm::Context,
+ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+
+ /// Cache instances of monomorphic and polymorphic items
+ pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
+ /// Cache generated vtables
+ pub vtables:
+ RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
+ /// Cache of constant strings,
+ pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
+
+ /// Reverse-direction for const ptrs cast from globals.
+ ///
+ /// Key is a Value holding a `*T`,
+ /// Val is a Value holding a `*[T]`.
+ ///
+ /// Needed because LLVM loses pointer->pointee association
+ /// when we ptrcast, and we have to ptrcast during codegen
+ /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
+ /// a pointer to an LLVM array type. Similar for trait objects.
+ pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+ /// Cache of emitted const globals (value -> global)
+ pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+ /// List of globals for static variables which need to be passed to the
+ /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
+ /// (We have to make sure we don't invalidate any Values referring
+ /// to constants.)
+ pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
+
+ /// Statics that will be placed in the llvm.used variable
+ /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
+ pub used_statics: RefCell<Vec<&'ll Value>>,
+
+ /// Statics that will be placed in the llvm.compiler.used variable
+ /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
+ pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
+
+ /// Mapping of non-scalar types to llvm types and field remapping if needed.
+ pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
+
+ /// Mapping of scalar types to llvm types.
+ pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
+
+ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+ pub isize_ty: &'ll Type,
+
+ pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
+ pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
+
+ eh_personality: Cell<Option<&'ll Value>>,
+ eh_catch_typeinfo: Cell<Option<&'ll Value>>,
+ pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
+
+ intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
+
+ /// A counter that is used for generating local symbol names
+ local_gen_sym_counter: Cell<usize>,
+
+ /// `codegen_static` will sometimes create a second global variable with a
+ /// different type and clear the symbol name of the original global.
+ /// `global_asm!` needs to be able to find this new global so that it can
+ /// compute the correct mangled symbol name to insert into the asm.
+ pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
+}
+
+pub struct TypeLowering<'ll> {
+ /// Associated LLVM type
+ pub lltype: &'ll Type,
+
+ /// If padding is used the slice maps fields from source order
+ /// to llvm order.
+ pub field_remapping: Option<SmallVec<[u32; 4]>>,
+}
+
+fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
+ match tls_model {
+ TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
+ TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
+ TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
+ TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
+ }
+}
+
+pub unsafe fn create_module<'ll>(
+ tcx: TyCtxt<'_>,
+ llcx: &'ll llvm::Context,
+ mod_name: &str,
+) -> &'ll llvm::Module {
+ let sess = tcx.sess;
+ let mod_name = SmallCStr::new(mod_name);
+ let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
+
+ let mut target_data_layout = sess.target.data_layout.to_string();
+ let llvm_version = llvm_util::get_version();
+ if llvm_version < (13, 0, 0) {
+ if sess.target.arch == "powerpc64" {
+ target_data_layout = target_data_layout.replace("-S128", "");
+ }
+ if sess.target.arch == "wasm32" {
+ target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string();
+ }
+ if sess.target.arch == "wasm64" {
+ target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string();
+ }
+ }
+ if llvm_version < (14, 0, 0) {
+ if sess.target.llvm_target == "i686-pc-windows-msvc"
+ || sess.target.llvm_target == "i586-pc-windows-msvc"
+ {
+ target_data_layout =
+ "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
+ .to_string();
+ }
+ if sess.target.arch == "wasm32" {
+ target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", "");
+ }
+ }
+
+ // Ensure the data-layout values hardcoded remain the defaults.
+ if sess.target.is_builtin {
+ let tm = crate::back::write::create_informational_target_machine(tcx.sess);
+ llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+
+ let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
+ let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
+ .expect("got a non-UTF8 data-layout from LLVM");
+
+ // Unfortunately LLVM target specs change over time, and right now we
+ // don't have proper support to work with any more than one
+ // `data_layout` than the one that is in the rust-lang/rust repo. If
+ // this compiler is configured against a custom LLVM, we may have a
+ // differing data layout, even though we should update our own to use
+ // that one.
+ //
+ // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
+ // disable this check entirely as we may be configured with something
+ // that has a different target layout.
+ //
+ // Unsure if this will actually cause breakage when rustc is configured
+ // as such.
+ //
+ // FIXME(#34960)
+ let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
+ let custom_llvm_used = cfg_llvm_root.trim() != "";
+
+ if !custom_llvm_used && target_data_layout != llvm_data_layout {
+ bug!(
+ "data-layout for target `{rustc_target}`, `{rustc_layout}`, \
+ differs from LLVM target's `{llvm_target}` default layout, `{llvm_layout}`",
+ rustc_target = sess.opts.target_triple,
+ rustc_layout = target_data_layout,
+ llvm_target = sess.target.llvm_target,
+ llvm_layout = llvm_data_layout
+ );
+ }
+ }
+
+ let data_layout = SmallCStr::new(&target_data_layout);
+ llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+
+ let llvm_target = SmallCStr::new(&sess.target.llvm_target);
+ llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
+
+ let reloc_model = sess.relocation_model();
+ if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) {
+ llvm::LLVMRustSetModulePICLevel(llmod);
+ // PIE is potentially more effective than PIC, but can only be used in executables.
+ // If all our outputs are executables, then we can relax PIC to PIE.
+ if reloc_model == RelocModel::Pie
+ || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
+ {
+ llvm::LLVMRustSetModulePIELevel(llmod);
+ }
+ }
+
+ // Linking object files with different code models is undefined behavior
+ // because the compiler would have to generate additional code (to span
+ // longer jumps) if a larger code model is used with a smaller one.
+ //
+ // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
+ llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
+
+ // If skipping the PLT is enabled, we need to add some module metadata
+ // to ensure intrinsic calls don't use it.
+ if !sess.needs_plt() {
+ let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
+ llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
+ }
+
+ if sess.is_sanitizer_cfi_enabled() {
+ // FIXME(rcvalle): Add support for non canonical jump tables.
+ let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
+ // FIXME(rcvalle): Add it with Override behavior flag.
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ canonical_jump_tables,
+ 1,
+ );
+ }
+
+ // Control Flow Guard is currently only supported by the MSVC linker on Windows.
+ if sess.target.is_like_msvc {
+ match sess.opts.cg.control_flow_guard {
+ CFGuard::Disabled => {}
+ CFGuard::NoChecks => {
+ // Set `cfguard=1` module flag to emit metadata only.
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "cfguard\0".as_ptr() as *const _,
+ 1,
+ )
+ }
+ CFGuard::Checks => {
+ // Set `cfguard=2` module flag to emit metadata and checks.
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "cfguard\0".as_ptr() as *const _,
+ 2,
+ )
+ }
+ }
+ }
+
+ if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
+ if sess.target.arch != "aarch64" {
+ sess.err("-Zbranch-protection is only supported on aarch64");
+ } else {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "branch-target-enforcement\0".as_ptr().cast(),
+ bti.into(),
+ );
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "sign-return-address\0".as_ptr().cast(),
+ pac_ret.is_some().into(),
+ );
+ let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "sign-return-address-all\0".as_ptr().cast(),
+ pac_opts.leaf.into(),
+ );
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "sign-return-address-with-bkey\0".as_ptr().cast(),
+ u32::from(pac_opts.key == PAuthKey::B),
+ );
+ }
+ }
+
+ // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
+ if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Override,
+ "cf-protection-branch\0".as_ptr().cast(),
+ 1,
+ )
+ }
+ if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Override,
+ "cf-protection-return\0".as_ptr().cast(),
+ 1,
+ )
+ }
+
+ if sess.opts.unstable_opts.virtual_function_elimination {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "Virtual Function Elim\0".as_ptr().cast(),
+ 1,
+ );
+ }
+
+ llmod
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+ pub(crate) fn new(
+ tcx: TyCtxt<'tcx>,
+ codegen_unit: &'tcx CodegenUnit<'tcx>,
+ llvm_module: &'ll crate::ModuleLlvm,
+ ) -> Self {
+ // An interesting part of Windows which MSVC forces our hand on (and
+ // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
+ // attributes in LLVM IR as well as native dependencies (in C these
+ // correspond to `__declspec(dllimport)`).
+ //
+ // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
+ // relying on that can result in issues like #50176.
+ // LLD won't support that and expects symbols with proper attributes.
+ // Because of that we make MinGW target emit dllexport just like MSVC.
+ // When it comes to dllimport we use it for constants but for functions
+ // rely on the linker to do the right thing. Opposed to dllexport this
+ // task is easy for them (both LD and LLD) and allows us to easily use
+ // symbols from static libraries in shared libraries.
+ //
+ // Whenever a dynamic library is built on Windows it must have its public
+ // interface specified by functions tagged with `dllexport` or otherwise
+ // they're not available to be linked against. This poses a few problems
+ // for the compiler, some of which are somewhat fundamental, but we use
+ // the `use_dll_storage_attrs` variable below to attach the `dllexport`
+ // attribute to all LLVM functions that are exported e.g., they're
+ // already tagged with external linkage). This is suboptimal for a few
+ // reasons:
+ //
+ // * If an object file will never be included in a dynamic library,
+ // there's no need to attach the dllexport attribute. Most object
+ // files in Rust are not destined to become part of a dll as binaries
+ // are statically linked by default.
+ // * If the compiler is emitting both an rlib and a dylib, the same
+ // source object file is currently used but with MSVC this may be less
+ // feasible. The compiler may be able to get around this, but it may
+ // involve some invasive changes to deal with this.
+ //
+ // The flip side of this situation is that whenever you link to a dll and
+ // you import a function from it, the import should be tagged with
+ // `dllimport`. At this time, however, the compiler does not emit
+ // `dllimport` for any declarations other than constants (where it is
+ // required), which is again suboptimal for even more reasons!
+ //
+ // * Calling a function imported from another dll without using
+ // `dllimport` causes the linker/compiler to have extra overhead (one
+ // `jmp` instruction on x86) when calling the function.
+ // * The same object file may be used in different circumstances, so a
+ // function may be imported from a dll if the object is linked into a
+ // dll, but it may be just linked against if linked into an rlib.
+ // * The compiler has no knowledge about whether native functions should
+ // be tagged dllimport or not.
+ //
+ // For now the compiler takes the perf hit (I do not have any numbers to
+ // this effect) by marking very little as `dllimport` and praying the
+ // linker will take care of everything. Fixing this problem will likely
+ // require adding a few attributes to Rust itself (feature gated at the
+ // start) and then strongly recommending static linkage on Windows!
+ let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
+
+ let check_overflow = tcx.sess.overflow_checks();
+
+ let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
+
+ let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
+
+ let coverage_cx = if tcx.sess.instrument_coverage() {
+ let covctx = coverageinfo::CrateCoverageContext::new();
+ Some(covctx)
+ } else {
+ None
+ };
+
+ let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
+ let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
+ debuginfo::metadata::build_compile_unit_di_node(
+ tcx,
+ codegen_unit.name().as_str(),
+ &dctx,
+ );
+ Some(dctx)
+ } else {
+ None
+ };
+
+ let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
+
+ CodegenCx {
+ tcx,
+ check_overflow,
+ use_dll_storage_attrs,
+ tls_model,
+ llmod,
+ llcx,
+ codegen_unit,
+ instances: Default::default(),
+ vtables: Default::default(),
+ const_str_cache: Default::default(),
+ const_unsized: Default::default(),
+ const_globals: Default::default(),
+ statics_to_rauw: RefCell::new(Vec::new()),
+ used_statics: RefCell::new(Vec::new()),
+ compiler_used_statics: RefCell::new(Vec::new()),
+ type_lowering: Default::default(),
+ scalar_lltypes: Default::default(),
+ pointee_infos: Default::default(),
+ isize_ty,
+ coverage_cx,
+ dbg_cx,
+ eh_personality: Cell::new(None),
+ eh_catch_typeinfo: Cell::new(None),
+ rust_try_fn: Cell::new(None),
+ intrinsics: Default::default(),
+ local_gen_sym_counter: Cell::new(0),
+ renamed_statics: Default::default(),
+ }
+ }
+
+ pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
+ &self.statics_to_rauw
+ }
+
+ #[inline]
+ pub fn coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
+ self.coverage_cx.as_ref()
+ }
+
+ fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
+ let section = cstr!("llvm.metadata");
+ let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
+
+ unsafe {
+ let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
+ llvm::LLVMSetInitializer(g, array);
+ llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
+ llvm::LLVMSetSection(g, section.as_ptr());
+ }
+ }
+}
+
+impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn vtables(
+ &self,
+ ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
+ {
+ &self.vtables
+ }
+
+ fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
+ get_fn(self, instance)
+ }
+
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
+ get_fn(self, instance)
+ }
+
+ fn eh_personality(&self) -> &'ll Value {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to codegen that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ if let Some(llpersonality) = self.eh_personality.get() {
+ return llpersonality;
+ }
+ let tcx = self.tcx;
+ let llfn = match tcx.lang_items().eh_personality() {
+ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ ty::Instance::resolve(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ tcx.intern_substs(&[]),
+ )
+ .unwrap()
+ .unwrap(),
+ ),
+ _ => {
+ let name = if wants_msvc_seh(self.sess()) {
+ "__CxxFrameHandler3"
+ } else {
+ "rust_eh_personality"
+ };
+ if let Some(llfn) = self.get_declared_value(name) {
+ llfn
+ } else {
+ let fty = self.type_variadic_func(&[], self.type_i32());
+ let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
+ let target_cpu = attributes::target_cpu_attr(self);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
+ llfn
+ }
+ }
+ };
+ self.eh_personality.set(Some(llfn));
+ llfn
+ }
+
+ fn sess(&self) -> &Session {
+ self.tcx.sess
+ }
+
+ fn check_overflow(&self) -> bool {
+ self.check_overflow
+ }
+
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+ self.codegen_unit
+ }
+
+ fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
+ &self.used_statics
+ }
+
+ fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
+ &self.compiler_used_statics
+ }
+
+ fn set_frame_pointer_type(&self, llfn: &'ll Value) {
+ if let Some(attr) = attributes::frame_pointer_type_attr(self) {
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
+ }
+ }
+
+ fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
+ let mut attrs = SmallVec::<[_; 2]>::new();
+ attrs.push(attributes::target_cpu_attr(self));
+ attrs.extend(attributes::tune_cpu_attr(self));
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
+ }
+
+ fn create_used_variable(&self) {
+ self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
+ }
+
+ fn create_compiler_used_variable(&self) {
+ self.create_used_variable_impl(
+ cstr!("llvm.compiler.used"),
+ &*self.compiler_used_statics.borrow(),
+ );
+ }
+
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+ if self.get_declared_value("main").is_none() {
+ Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))
+ } else {
+ // If the symbol already exists, it is an error: for example, the user wrote
+ // #[no_mangle] extern "C" fn main(..) {..}
+ // instead of #[start]
+ None
+ }
+ }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
+ if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
+ return v;
+ }
+
+ self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
+ }
+
+ fn insert_intrinsic(
+ &self,
+ name: &'static str,
+ args: Option<&[&'ll llvm::Type]>,
+ ret: &'ll llvm::Type,
+ ) -> (&'ll llvm::Type, &'ll llvm::Value) {
+ let fn_ty = if let Some(args) = args {
+ self.type_func(args, ret)
+ } else {
+ self.type_variadic_func(&[], ret)
+ };
+ let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
+ self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
+ (fn_ty, f)
+ }
+
+ fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
+ macro_rules! ifn {
+ ($name:expr, fn() -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, Some(&[]), $ret));
+ }
+ );
+ ($name:expr, fn(...) -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, None, $ret));
+ }
+ );
+ ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
+ }
+ );
+ }
+ macro_rules! mk_struct {
+ ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
+ }
+
+ let i8p = self.type_i8p();
+ let void = self.type_void();
+ let i1 = self.type_i1();
+ let t_i8 = self.type_i8();
+ let t_i16 = self.type_i16();
+ let t_i32 = self.type_i32();
+ let t_i64 = self.type_i64();
+ let t_i128 = self.type_i128();
+ let t_isize = self.type_isize();
+ let t_f32 = self.type_f32();
+ let t_f64 = self.type_f64();
+ let t_metadata = self.type_metadata();
+
+ ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
+
+ ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
+ ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
+ ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
+ ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
+ ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
+ ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
+
+ ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
+ ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
+ ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
+ ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
+ ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
+ ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
+
+ ifn!("llvm.trap", fn() -> void);
+ ifn!("llvm.debugtrap", fn() -> void);
+ ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+
+ ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
+ ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
+
+ ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
+ ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
+ ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
+ ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
+ ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
+ ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
+ ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
+
+ ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
+ ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
+ ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
+ ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
+ ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
+
+ ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
+ ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
+ ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
+
+ ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
+ ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+ ifn!("llvm.localescape", fn(...) -> void);
+ ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
+ ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+
+ ifn!("llvm.assume", fn(i1) -> void);
+ ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+
+ // This isn't an "LLVM intrinsic", but LLVM's optimization passes
+ // recognize it like one and we assume it exists in `core::slice::cmp`
+ match self.sess().target.arch.as_ref() {
+ "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
+ _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
+ }
+
+ // variadic intrinsics
+ ifn!("llvm.va_start", fn(i8p) -> void);
+ ifn!("llvm.va_end", fn(i8p) -> void);
+ ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+
+ if self.sess().instrument_coverage() {
+ ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+ }
+
+ ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
+ ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
+
+ if self.sess().opts.debuginfo != DebugInfo::None {
+ ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
+ ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
+ }
+ None
+ }
+
+ pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {
+ if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
+ return eh_catch_typeinfo;
+ }
+ let tcx = self.tcx;
+ assert!(self.sess().target.os == "emscripten");
+ let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
+ Some(def_id) => self.get_static(def_id),
+ _ => {
+ let ty = self
+ .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
+ self.declare_global("rust_eh_catch_typeinfo", ty)
+ }
+ };
+ let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
+ self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
+ eh_catch_typeinfo
+ }
+}
+
+impl CodegenCx<'_, '_> {
+ /// Generates a new symbol name with the given prefix. This symbol name must
+ /// only be used for definitions with `internal` or `private` linkage.
+ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+ let idx = self.local_gen_sym_counter.get();
+ self.local_gen_sym_counter.set(idx + 1);
+ // Include a '.' character, so there can be no accidental conflicts with
+ // user defined names
+ let mut name = String::with_capacity(prefix.len() + 6);
+ name.push_str(prefix);
+ name.push('.');
+ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+ name
+ }
+}
+
+impl HasDataLayout for CodegenCx<'_, '_> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl HasTargetSpec for CodegenCx<'_, '_> {
+ #[inline]
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for CodegenCx<'_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ ty::ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let LayoutError::SizeOverflow(_) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
new file mode 100644
index 000000000..58f391692
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -0,0 +1,334 @@
+use crate::common::CodegenCx;
+use crate::coverageinfo;
+use crate::llvm;
+
+use llvm::coverageinfo::CounterMappingRegion;
+use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
+use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefIdSet;
+use rustc_llvm::RustString;
+use rustc_middle::bug;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::coverage::CodeRegion;
+use rustc_middle::ty::TyCtxt;
+
+use std::ffi::CString;
+
+use tracing::debug;
+
+/// Generates and exports the Coverage Map.
+///
+/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
+/// 5 (LLVM 12, only) and 6 (zero-based encoded as 4 and 5, respectively), as defined at
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
+/// bundled with Rust's fork of LLVM.
+///
+/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
+/// the same version. Clang's implementation of Coverage Map generation was referenced when
+/// implementing this Rust version, and though the format documentation is very explicit and
+/// detailed, some undocumented details in Clang's implementation (that may or may not be important)
+/// were also replicated for Rust's Coverage Map.
+pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+ let tcx = cx.tcx;
+
+ // Ensure the installed version of LLVM supports at least Coverage Map
+ // Version 5 (encoded as a zero-based value: 4), which was introduced with
+ // LLVM 12.
+ let version = coverageinfo::mapping_version();
+ if version < 4 {
+ tcx.sess.fatal("rustc option `-C instrument-coverage` requires LLVM 12 or higher.");
+ }
+
+ debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
+
+ // In order to show that unused functions have coverage counts of zero (0), LLVM requires the
+ // functions exist. Generate synthetic functions with a (required) single counter, and add the
+ // MIR `Coverage` code regions to the `function_coverage_map`, before calling
+ // `ctx.take_function_coverage_map()`.
+ if cx.codegen_unit.is_code_coverage_dead_code_cgu() {
+ add_unused_functions(cx);
+ }
+
+ let function_coverage_map = match cx.coverage_context() {
+ Some(ctx) => ctx.take_function_coverage_map(),
+ None => return,
+ };
+
+ if function_coverage_map.is_empty() {
+ // This module has no functions with coverage instrumentation
+ return;
+ }
+
+ let mut mapgen = CoverageMapGenerator::new(tcx, version);
+
+ // Encode coverage mappings and generate function records
+ let mut function_data = Vec::new();
+ for (instance, function_coverage) in function_coverage_map {
+ debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+ let mangled_function_name = tcx.symbol_name(instance).to_string();
+ let source_hash = function_coverage.source_hash();
+ let is_used = function_coverage.is_used();
+ let (expressions, counter_regions) =
+ function_coverage.get_expressions_and_counter_regions();
+
+ let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
+ mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
+ });
+
+ if coverage_mapping_buffer.is_empty() {
+ if function_coverage.is_used() {
+ bug!(
+ "A used function should have had coverage mapping data but did not: {}",
+ mangled_function_name
+ );
+ } else {
+ debug!("unused function had no coverage mapping data: {}", mangled_function_name);
+ continue;
+ }
+ }
+
+ function_data.push((mangled_function_name, source_hash, is_used, coverage_mapping_buffer));
+ }
+
+ // Encode all filenames referenced by counters/expressions in this module
+ let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
+ coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
+ });
+
+ let filenames_size = filenames_buffer.len();
+ let filenames_val = cx.const_bytes(&filenames_buffer);
+ let filenames_ref = coverageinfo::hash_bytes(filenames_buffer);
+
+ // Generate the LLVM IR representation of the coverage map and store it in a well-known global
+ let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+
+ for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
+ save_function_record(
+ cx,
+ mangled_function_name,
+ source_hash,
+ filenames_ref,
+ coverage_mapping_buffer,
+ is_used,
+ );
+ }
+
+ // Save the coverage data value to LLVM IR
+ coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
+}
+
+struct CoverageMapGenerator {
+ filenames: FxIndexSet<CString>,
+}
+
+impl CoverageMapGenerator {
+ fn new(tcx: TyCtxt<'_>, version: u32) -> Self {
+ let mut filenames = FxIndexSet::default();
+ if version >= 5 {
+ // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
+ // requires setting the first filename to the compilation directory.
+ // Since rustc generates coverage maps with relative paths, the
+ // compilation directory can be combined with the the relative paths
+ // to get absolute paths, if needed.
+ let working_dir = tcx
+ .sess
+ .opts
+ .working_dir
+ .remapped_path_if_available()
+ .to_string_lossy()
+ .to_string();
+ let c_filename =
+ CString::new(working_dir).expect("null error converting filename to C string");
+ filenames.insert(c_filename);
+ }
+ Self { filenames }
+ }
+
+ /// Using the `expressions` and `counter_regions` collected for the current function, generate
+ /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
+ /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
+ /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
+ fn write_coverage_mapping<'a>(
+ &mut self,
+ expressions: Vec<CounterExpression>,
+ counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
+ coverage_mapping_buffer: &RustString,
+ ) {
+ let mut counter_regions = counter_regions.collect::<Vec<_>>();
+ if counter_regions.is_empty() {
+ return;
+ }
+
+ let mut virtual_file_mapping = Vec::new();
+ let mut mapping_regions = Vec::new();
+ let mut current_file_name = None;
+ let mut current_file_id = 0;
+
+ // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
+ // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
+ // `file_id` (indexing files referenced by the current function), and construct the
+ // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
+ // `filenames` array.
+ counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
+ for (counter, region) in counter_regions {
+ let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
+ let same_file = current_file_name.as_ref().map_or(false, |p| *p == file_name);
+ if !same_file {
+ if current_file_name.is_some() {
+ current_file_id += 1;
+ }
+ current_file_name = Some(file_name);
+ let c_filename = CString::new(file_name.to_string())
+ .expect("null error converting filename to C string");
+ debug!(" file_id: {} = '{:?}'", current_file_id, c_filename);
+ let (filenames_index, _) = self.filenames.insert_full(c_filename);
+ virtual_file_mapping.push(filenames_index as u32);
+ }
+ debug!("Adding counter {:?} to map for {:?}", counter, region);
+ mapping_regions.push(CounterMappingRegion::code_region(
+ counter,
+ current_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ ));
+ }
+
+ // Encode and append the current function's coverage mapping data
+ coverageinfo::write_mapping_to_buffer(
+ virtual_file_mapping,
+ expressions,
+ mapping_regions,
+ coverage_mapping_buffer,
+ );
+ }
+
+ /// Construct coverage map header and the array of function records, and combine them into the
+ /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+ /// specific, well-known section and name.
+ fn generate_coverage_map<'ll>(
+ self,
+ cx: &CodegenCx<'ll, '_>,
+ version: u32,
+ filenames_size: usize,
+ filenames_val: &'ll llvm::Value,
+ ) -> &'ll llvm::Value {
+ debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+ // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+ // as of `llvm::coverage::CovMapVersion::Version4`.)
+ let zero_was_n_records_val = cx.const_u32(0);
+ let filenames_size_val = cx.const_u32(filenames_size as u32);
+ let zero_was_coverage_size_val = cx.const_u32(0);
+ let version_val = cx.const_u32(version);
+ let cov_data_header_val = cx.const_struct(
+ &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+ /*packed=*/ false,
+ );
+
+ // Create the complete LLVM coverage data value to add to the LLVM IR
+ cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
+ }
+}
+
+/// Construct a function record and combine it with the function's coverage mapping data.
+/// Save the function record into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn save_function_record(
+ cx: &CodegenCx<'_, '_>,
+ mangled_function_name: String,
+ source_hash: u64,
+ filenames_ref: u64,
+ coverage_mapping_buffer: Vec<u8>,
+ is_used: bool,
+) {
+ // Concatenate the encoded coverage mappings
+ let coverage_mapping_size = coverage_mapping_buffer.len();
+ let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
+
+ let func_name_hash = coverageinfo::hash_str(&mangled_function_name);
+ let func_name_hash_val = cx.const_u64(func_name_hash);
+ let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
+ let source_hash_val = cx.const_u64(source_hash);
+ let filenames_ref_val = cx.const_u64(filenames_ref);
+ let func_record_val = cx.const_struct(
+ &[
+ func_name_hash_val,
+ coverage_mapping_size_val,
+ source_hash_val,
+ filenames_ref_val,
+ coverage_mapping_val,
+ ],
+ /*packed=*/ true,
+ );
+
+ coverageinfo::save_func_record_to_mod(cx, func_name_hash, func_record_val, is_used);
+}
+
+/// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
+/// the functions that went through codegen; such as public functions and "used" functions
+/// (functions referenced by other "used" or public items). Any other functions considered unused,
+/// or "Unreachable", were still parsed and processed through the MIR stage, but were not
+/// codegenned. (Note that `-Clink-dead-code` can force some unused code to be codegenned, but
+/// that flag is known to cause other errors, when combined with `-C instrument-coverage`; and
+/// `-Clink-dead-code` will not generate code for unused generic functions.)
+///
+/// We can find the unused functions (including generic functions) by the set difference of all MIR
+/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query
+/// `codegened_and_inlined_items`).
+///
+/// These unused functions are then codegen'd in one of the CGUs which is marked as the
+/// "code coverage dead code cgu" during the partitioning process. This prevents us from generating
+/// code regions for the same function more than once which can lead to linker errors regarding
+/// duplicate symbols.
+fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+ assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
+
+ let tcx = cx.tcx;
+
+ let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics();
+
+ let eligible_def_ids: DefIdSet = tcx
+ .mir_keys(())
+ .iter()
+ .filter_map(|local_def_id| {
+ let def_id = local_def_id.to_def_id();
+ let kind = tcx.def_kind(def_id);
+ // `mir_keys` will give us `DefId`s for all kinds of things, not
+ // just "functions", like consts, statics, etc. Filter those out.
+ // If `ignore_unused_generics` was specified, filter out any
+ // generic functions from consideration as well.
+ if !matches!(
+ kind,
+ DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator
+ ) {
+ return None;
+ } else if ignore_unused_generics
+ && tcx.generics_of(def_id).requires_monomorphization(tcx)
+ {
+ return None;
+ }
+ Some(local_def_id.to_def_id())
+ })
+ .collect();
+
+ let codegenned_def_ids = tcx.codegened_and_inlined_items(());
+
+ for &non_codegenned_def_id in eligible_def_ids.difference(codegenned_def_ids) {
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
+
+ // If a function is marked `#[no_coverage]`, then skip generating a
+ // dead code stub for it.
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
+ debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+ continue;
+ }
+
+ debug!("generating unused fn: {:?}", non_codegenned_def_id);
+ cx.define_unused_fn(non_codegenned_def_id);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
new file mode 100644
index 000000000..98ba38356
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -0,0 +1,385 @@
+use crate::llvm;
+
+use crate::abi::Abi;
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+
+use libc::c_uint;
+use llvm::coverageinfo::CounterMappingRegion;
+use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
+use rustc_codegen_ssa::traits::{
+ BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, CoverageInfoMethods,
+ MiscMethods, StaticMethods,
+};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_llvm::RustString;
+use rustc_middle::bug;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
+};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::Instance;
+
+use std::cell::RefCell;
+use std::ffi::CString;
+
+use std::iter;
+use tracing::debug;
+
+pub mod mapgen;
+
+const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
+
+const VAR_ALIGN_BYTES: usize = 8;
+
+/// A context object for maintaining all state needed by the coverageinfo module.
+pub struct CrateCoverageContext<'ll, 'tcx> {
+ // Coverage data for each instrumented function identified by DefId.
+ pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
+ pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
+}
+
+impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
+ pub fn new() -> Self {
+ Self {
+ function_coverage_map: Default::default(),
+ pgo_func_name_var_map: Default::default(),
+ }
+ }
+
+ pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
+ self.function_coverage_map.replace(FxHashMap::default())
+ }
+}
+
+impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn coverageinfo_finalize(&self) {
+ mapgen::finalize(self)
+ }
+
+ fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!("getting pgo_func_name_var for instance={:?}", instance);
+ let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
+ pgo_func_name_var_map
+ .entry(instance)
+ .or_insert_with(|| create_pgo_func_name_var(self, instance))
+ } else {
+ bug!("Could not get the `coverage_context`");
+ }
+ }
+
+ /// Functions with MIR-based coverage are normally codegenned _only_ if
+ /// called. LLVM coverage tools typically expect every function to be
+ /// defined (even if unused), with at least one call to LLVM intrinsic
+ /// `instrprof.increment`.
+ ///
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented.
+ ///
+ /// For used/called functions, the coverageinfo was already added to the
+ /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+ /// But in this case, since the unused function was _not_ previously
+ /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+ /// them. The first `CodeRegion` is used to add a single counter, with the
+ /// same counter ID used in the injected `instrprof.increment` intrinsic
+ /// call. Since the function is never called, all other `CodeRegion`s can be
+ /// added as `unreachable_region`s.
+ fn define_unused_fn(&self, def_id: DefId) {
+ let instance = declare_unused_fn(self, def_id);
+ codegen_unused_fn_and_counter(self, instance);
+ add_unused_function_coverage(self, instance, def_id);
+ }
+}
+
+impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+ fn set_function_source_hash(
+ &mut self,
+ instance: Instance<'tcx>,
+ function_source_hash: u64,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+ instance, function_source_hash,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .set_function_source_hash(function_source_hash);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_counter(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: CounterValueReference,
+ region: CodeRegion,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+ instance, id, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter(id, region);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_counter_expression(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
+ region: {:?}",
+ instance, id, lhs, op, rhs, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter_expression(id, lhs, op, rhs, region);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+ instance, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_unreachable_region(region);
+ true
+ } else {
+ false
+ }
+ }
+}
+
+fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
+ let tcx = cx.tcx;
+
+ let instance = Instance::new(
+ def_id,
+ InternalSubsts::for_item(tcx, def_id, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ );
+
+ let llfn = cx.declare_fn(
+ tcx.symbol_name(instance).name,
+ cx.fn_abi_of_fn_ptr(
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ iter::once(tcx.mk_unit()),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )),
+ ty::List::empty(),
+ ),
+ );
+
+ llvm::set_linkage(llfn, llvm::Linkage::PrivateLinkage);
+ llvm::set_visibility(llfn, llvm::Visibility::Default);
+
+ assert!(cx.instances.borrow_mut().insert(instance, llfn).is_none());
+
+ instance
+}
+
+fn codegen_unused_fn_and_counter<'tcx>(cx: &CodegenCx<'_, 'tcx>, instance: Instance<'tcx>) {
+ let llfn = cx.get_fn(instance);
+ let llbb = Builder::append_block(cx, llfn, "unused_function");
+ let mut bx = Builder::build(cx, llbb);
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(0);
+ let num_counters = bx.const_u32(1);
+ let index = bx.const_u32(u32::from(UNUSED_FUNCTION_COUNTER_ID));
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?},
+ index={:?}) for unused function: {:?}",
+ fn_name, hash, num_counters, index, instance
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
+ bx.ret_void();
+}
+
+fn add_unused_function_coverage<'tcx>(
+ cx: &CodegenCx<'_, 'tcx>,
+ instance: Instance<'tcx>,
+ def_id: DefId,
+) {
+ let tcx = cx.tcx;
+
+ let mut function_coverage = FunctionCoverage::unused(tcx, instance);
+ for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
+ if index == 0 {
+ // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
+ // definitions.
+ function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
+ } else {
+ function_coverage.add_unreachable_region(code_region.clone());
+ }
+ }
+
+ if let Some(coverage_context) = cx.coverage_context() {
+ coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
+ } else {
+ bug!("Could not get the `coverage_context`");
+ }
+}
+
+/// Calls llvm::createPGOFuncNameVar() with the given function instance's
+/// mangled function name. The LLVM API returns an llvm::GlobalVariable
+/// containing the function name, with the specific variable name and linkage
+/// required by LLVM InstrProf source-based coverage instrumentation. Use
+/// `bx.get_pgo_func_name_var()` to ensure the variable is only created once per
+/// `Instance`.
+fn create_pgo_func_name_var<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+) -> &'ll llvm::Value {
+ let mangled_fn_name = CString::new(cx.tcx.symbol_name(instance).name)
+ .expect("error converting function name to C string");
+ let llfn = cx.get_fn(instance);
+ unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+}
+
+pub(crate) fn write_filenames_section_to_buffer<'a>(
+ filenames: impl IntoIterator<Item = &'a CString>,
+ buffer: &RustString,
+) {
+ let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
+ unsafe {
+ llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ c_str_vec.as_ptr(),
+ c_str_vec.len(),
+ buffer,
+ );
+ }
+}
+
+pub(crate) fn write_mapping_to_buffer(
+ virtual_file_mapping: Vec<u32>,
+ expressions: Vec<CounterExpression>,
+ mapping_regions: Vec<CounterMappingRegion>,
+ buffer: &RustString,
+) {
+ unsafe {
+ llvm::LLVMRustCoverageWriteMappingToBuffer(
+ virtual_file_mapping.as_ptr(),
+ virtual_file_mapping.len() as c_uint,
+ expressions.as_ptr(),
+ expressions.len() as c_uint,
+ mapping_regions.as_ptr(),
+ mapping_regions.len() as c_uint,
+ buffer,
+ );
+ }
+}
+
+pub(crate) fn hash_str(strval: &str) -> u64 {
+ let strval = CString::new(strval).expect("null error converting hashable str to C string");
+ unsafe { llvm::LLVMRustCoverageHashCString(strval.as_ptr()) }
+}
+
+pub(crate) fn hash_bytes(bytes: Vec<u8>) -> u64 {
+ unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
+}
+
+pub(crate) fn mapping_version() -> u32 {
+ unsafe { llvm::LLVMRustCoverageMappingVersion() }
+}
+
+pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ cov_data_val: &'ll llvm::Value,
+) {
+ let covmap_var_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
+ })
+ .expect("Rust Coverage Mapping var name failed UTF-8 conversion");
+ debug!("covmap var name: {:?}", covmap_var_name);
+
+ let covmap_section_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
+ })
+ .expect("Rust Coverage section name failed UTF-8 conversion");
+ debug!("covmap section name: {:?}", covmap_section_name);
+
+ let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
+ llvm::set_initializer(llglobal, cov_data_val);
+ llvm::set_global_constant(llglobal, true);
+ llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::set_section(llglobal, &covmap_section_name);
+ llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
+ cx.add_used_global(llglobal);
+}
+
+pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ func_name_hash: u64,
+ func_record_val: &'ll llvm::Value,
+ is_used: bool,
+) {
+ // Assign a name to the function record. This is used to merge duplicates.
+ //
+ // In LLVM, a "translation unit" (effectively, a `Crate` in Rust) can describe functions that
+ // are included-but-not-used. If (or when) Rust generates functions that are
+ // included-but-not-used, note that a dummy description for a function included-but-not-used
+ // in a Crate can be replaced by full description provided by a different Crate. The two kinds
+ // of descriptions play distinct roles in LLVM IR; therefore, assign them different names (by
+ // appending "u" to the end of the function record var name, to prevent `linkonce_odr` merging.
+ let func_record_var_name =
+ format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
+ debug!("function record var name: {:?}", func_record_var_name);
+
+ let func_record_section_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
+ })
+ .expect("Rust Coverage function record section name failed UTF-8 conversion");
+ debug!("function record section name: {:?}", func_record_section_name);
+
+ let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
+ llvm::set_initializer(llglobal, func_record_val);
+ llvm::set_global_constant(llglobal, true);
+ llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
+ llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
+ llvm::set_section(llglobal, &func_record_section_name);
+ llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
+ llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
+ cx.add_used_global(llglobal);
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
new file mode 100644
index 000000000..99e4ded62
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -0,0 +1,126 @@
+use super::metadata::file_metadata;
+use super::utils::DIB;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
+use rustc_codegen_ssa::traits::*;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DILocation, DIScope};
+use rustc_middle::mir::{Body, SourceScope};
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::{self, Instance};
+use rustc_session::config::DebugInfo;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+
+/// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
+// FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`.
+pub fn compute_mir_scopes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ mir: &Body<'tcx>,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+) {
+ // Find all scopes with variables defined in them.
+ let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
+ let mut vars = BitSet::new_empty(mir.source_scopes.len());
+ // FIXME(eddyb) take into account that arguments always have debuginfo,
+ // irrespective of their name (assuming full debuginfo is enabled).
+ // NOTE(eddyb) actually, on second thought, those are always in the
+ // function scope, which always exists.
+ for var_debug_info in &mir.var_debug_info {
+ vars.insert(var_debug_info.source_info.scope);
+ }
+ Some(vars)
+ } else {
+ // Nothing to emit, of course.
+ None
+ };
+ let mut instantiated = BitSet::new_empty(mir.source_scopes.len());
+ // Instantiate all scopes.
+ for idx in 0..mir.source_scopes.len() {
+ let scope = SourceScope::new(idx);
+ make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
+ }
+ assert!(instantiated.count() == mir.source_scopes.len());
+}
+
+fn make_mir_scope<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ mir: &Body<'tcx>,
+ variables: &Option<BitSet<SourceScope>>,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+ instantiated: &mut BitSet<SourceScope>,
+ scope: SourceScope,
+) {
+ if instantiated.contains(scope) {
+ return;
+ }
+
+ let scope_data = &mir.source_scopes[scope];
+ let parent_scope = if let Some(parent) = scope_data.parent_scope {
+ make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
+ debug_context.scopes[parent]
+ } else {
+ // The root is the function itself.
+ let loc = cx.lookup_debug_loc(mir.span.lo());
+ debug_context.scopes[scope] = DebugScope {
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ ..debug_context.scopes[scope]
+ };
+ instantiated.insert(scope);
+ return;
+ };
+
+ if let Some(vars) = variables && !vars.contains(scope) && scope_data.inlined.is_none() {
+ // Do not create a DIScope if there are no variables defined in this
+ // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
+ debug_context.scopes[scope] = parent_scope;
+ instantiated.insert(scope);
+ return;
+ }
+
+ let loc = cx.lookup_debug_loc(scope_data.span.lo());
+ let file_metadata = file_metadata(cx, &loc.file);
+
+ let dbg_scope = match scope_data.inlined {
+ Some((callee, _)) => {
+ // FIXME(eddyb) this would be `self.monomorphize(&callee)`
+ // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
+ let callee = cx.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ callee,
+ );
+ let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+ cx.dbg_scope_fn(callee, callee_fn_abi, None)
+ }
+ None => unsafe {
+ llvm::LLVMRustDIBuilderCreateLexicalBlock(
+ DIB(cx),
+ parent_scope.dbg_scope,
+ file_metadata,
+ loc.line,
+ loc.col,
+ )
+ },
+ };
+
+ let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+ // FIXME(eddyb) this doesn't account for the macro-related
+ // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+ let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+ cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
+ });
+
+ debug_context.scopes[scope] = DebugScope {
+ dbg_scope,
+ inlined_at: inlined_at.or(parent_scope.inlined_at),
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ };
+ instantiated.insert(scope);
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.md b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
new file mode 100644
index 000000000..aaec4e68c
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
@@ -0,0 +1,131 @@
+# Debug Info Module
+
+This module serves the purpose of generating debug symbols. We use LLVM's
+[source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
+features for generating the debug information. The general principle is
+this:
+
+Given the right metadata in the LLVM IR, the LLVM code generator is able to
+create DWARF debug symbols for the given code. The
+[metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
+much like DWARF *debugging information entries* (DIE), representing type
+information such as datatype layout, function signatures, block layout,
+variable location and scope information, etc. It is the purpose of this
+module to generate correct metadata and insert it into the LLVM IR.
+
+As the exact format of metadata trees may change between different LLVM
+versions, we now use LLVM
+[DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
+to create metadata where possible. This will hopefully ease the adaption of
+this module to future LLVM versions.
+
+The public API of the module is a set of functions that will insert the
+correct metadata into the LLVM IR when called with the right parameters.
+The module is thus driven from an outside client with functions like
+`debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
+
+Internally the module will try to reuse already created metadata by
+utilizing a cache. The way to get a shared metadata node when needed is
+thus to just call the corresponding function in this module:
+```ignore (illustrative)
+let file_metadata = file_metadata(cx, file);
+```
+The function will take care of probing the cache for an existing node for
+that exact file path.
+
+All private state used by the module is stored within either the
+CodegenUnitDebugContext struct (owned by the CodegenCx) or the
+FunctionDebugContext (owned by the FunctionCx).
+
+This file consists of three conceptual sections:
+1. The public interface of the module
+2. Module-internal metadata creation functions
+3. Minor utility functions
+
+
+## Recursive Types
+
+Some kinds of types, such as structs and enums can be recursive. That means
+that the type definition of some type X refers to some other type which in
+turn (transitively) refers to X. This introduces cycles into the type
+referral graph. A naive algorithm doing an on-demand, depth-first traversal
+of this graph when describing types, can get trapped in an endless loop
+when it reaches such a cycle.
+
+For example, the following simple type for a singly-linked list...
+
+```
+struct List {
+ value: i32,
+ tail: Option<Box<List>>,
+}
+```
+
+will generate the following callstack with a naive DFS algorithm:
+
+```ignore (illustrative)
+describe(t = List)
+ describe(t = i32)
+ describe(t = Option<Box<List>>)
+ describe(t = Box<List>)
+ describe(t = List) // at the beginning again...
+ ...
+```
+
+To break cycles like these, we use "stubs". That is, when
+the algorithm encounters a possibly recursive type (any struct or enum), it
+immediately creates a type description node and inserts it into the cache
+*before* describing the members of the type. This type description is just
+a stub (as type members are not described and added to it yet) but it
+allows the algorithm to already refer to the type. After the stub is
+inserted into the cache, the algorithm continues as before. If it now
+encounters a recursive reference, it will hit the cache and does not try to
+describe the type anew. This behavior is encapsulated in the
+`type_map::build_type_with_children()` function.
+
+
+## Source Locations and Line Information
+
+In addition to data type descriptions the debugging information must also
+allow to map machine code locations back to source code locations in order
+to be useful. This functionality is also handled in this module. The
+following functions allow to control source mappings:
+
++ `set_source_location()`
++ `clear_source_location()`
++ `start_emitting_source_locations()`
+
+`set_source_location()` allows to set the current source location. All IR
+instructions created after a call to this function will be linked to the
+given source location, until another location is specified with
+`set_source_location()` or the source location is cleared with
+`clear_source_location()`. In the later case, subsequent IR instruction
+will not be linked to any source location. As you can see, this is a
+stateful API (mimicking the one in LLVM), so be careful with source
+locations set by previous calls. It's probably best to not rely on any
+specific state being present at a given point in code.
+
+One topic that deserves some extra attention is *function prologues*. At
+the beginning of a function's machine code there are typically a few
+instructions for loading argument values into allocas and checking if
+there's enough stack space for the function to execute. This *prologue* is
+not visible in the source code and LLVM puts a special PROLOGUE END marker
+into the line table at the first non-prologue instruction of the function.
+In order to find out where the prologue ends, LLVM looks for the first
+instruction in the function body that is linked to a source location. So,
+when generating prologue instructions we have to make sure that we don't
+emit source location information until the 'real' function body begins. For
+this reason, source location emission is disabled by default for any new
+function being codegened and is only activated after a call to the third
+function from the list above, `start_emitting_source_locations()`. This
+function should be called right before regularly starting to codegen the
+top-level block of the given function.
+
+There is one exception to the above rule: `llvm.dbg.declare` instruction
+must be linked to the source location of the variable being declared. For
+function parameters these `llvm.dbg.declare` instructions typically occur
+in the middle of the prologue, however, they are ignored by LLVM's prologue
+detection. The `create_argument_metadata()` and related functions take care
+of linking the `llvm.dbg.declare` instructions to the correct source
+locations even while source location emission is still disabled, so there
+is no need to do anything special with source location handling here.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
new file mode 100644
index 000000000..80fd9726f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -0,0 +1,120 @@
+// .debug_gdb_scripts binary section.
+
+use crate::llvm;
+
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::value::Value;
+use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_session::config::{CrateType, DebugInfo};
+
+use rustc_span::symbol::sym;
+use rustc_span::DebuggerVisualizerType;
+
+/// Inserts a side-effect free instruction sequence that makes sure that the
+/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
+pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
+ if needs_gdb_debug_scripts_section(bx) {
+ let gdb_debug_scripts_section =
+ bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
+ // Load just the first byte as that's all that's necessary to force
+ // LLVM to keep around the reference to the global.
+ let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
+ unsafe {
+ llvm::LLVMSetAlignment(volative_load_instruction, 1);
+ }
+ }
+}
+
+/// Allocates the global variable responsible for the .debug_gdb_scripts binary
+/// section.
+pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Value {
+ let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
+ let section_var_name = &c_section_var_name[..c_section_var_name.len() - 1];
+
+ let section_var =
+ unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) };
+
+ section_var.unwrap_or_else(|| {
+ let section_name = b".debug_gdb_scripts\0";
+ let mut section_contents = Vec::new();
+
+ // Add the pretty printers for the standard library first.
+ section_contents.extend_from_slice(b"\x01gdb_load_rust_pretty_printers.py\0");
+
+ // Next, add the pretty printers that were specified via the `#[debugger_visualizer]` attribute.
+ let visualizers = collect_debugger_visualizers_transitive(
+ cx.tcx,
+ DebuggerVisualizerType::GdbPrettyPrinter,
+ );
+ let crate_name = cx.tcx.crate_name(LOCAL_CRATE);
+ for (index, visualizer) in visualizers.iter().enumerate() {
+ // The initial byte `4` instructs GDB that the following pretty printer
+ // is defined inline as opposed to in a standalone file.
+ section_contents.extend_from_slice(b"\x04");
+ let vis_name = format!("pretty-printer-{}-{}\n", crate_name, index);
+ section_contents.extend_from_slice(vis_name.as_bytes());
+ section_contents.extend_from_slice(&visualizer.src);
+
+ // The final byte `0` tells GDB that the pretty printer has been
+ // fully defined and can continue searching for additional
+ // pretty printers.
+ section_contents.extend_from_slice(b"\0");
+ }
+
+ unsafe {
+ let section_contents = section_contents.as_slice();
+ let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64);
+
+ let section_var = cx
+ .define_global(section_var_name, llvm_type)
+ .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
+ llvm::LLVMSetSection(section_var, section_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
+ llvm::LLVMSetGlobalConstant(section_var, llvm::True);
+ llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
+ llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
+ // This should make sure that the whole section is not larger than
+ // the string it contains. Otherwise we get a warning from GDB.
+ llvm::LLVMSetAlignment(section_var, 1);
+ section_var
+ }
+ })
+}
+
+pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
+ let omit_gdb_pretty_printer_section =
+ cx.tcx.sess.contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
+
+ // To ensure the section `__rustc_debug_gdb_scripts_section__` will not create
+ // ODR violations at link time, this section will not be emitted for rlibs since
+ // each rlib could produce a different set of visualizers that would be embedded
+ // in the `.debug_gdb_scripts` section. For that reason, we make sure that the
+ // section is only emitted for leaf crates.
+ let embed_visualizers = cx.sess().crate_types().iter().any(|&crate_type| match crate_type {
+ CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
+ // These are crate types for which we will embed pretty printers since they
+ // are treated as leaf crates.
+ true
+ }
+ CrateType::ProcMacro => {
+ // We could embed pretty printers for proc macro crates too but it does not
+ // seem like a good default, since this is a rare use case and we don't
+ // want to slow down the common case.
+ false
+ }
+ CrateType::Rlib => {
+ // As per the above description, embedding pretty printers for rlibs could
+ // lead to ODR violations so we skip this crate type as well.
+ false
+ }
+ });
+
+ !omit_gdb_pretty_printer_section
+ && cx.sess().opts.debuginfo != DebugInfo::None
+ && cx.sess().target.emit_debug_gdb_scripts
+ && embed_visualizers
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
new file mode 100644
index 000000000..bd84100e0
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -0,0 +1,1618 @@
+use self::type_map::DINodeCreationResult;
+use self::type_map::Stub;
+use self::type_map::UniqueTypeId;
+
+use super::namespace::mangled_name_of_instance;
+use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name};
+use super::utils::{
+ create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB,
+};
+use super::CodegenUnitDebugContext;
+
+use crate::abi;
+use crate::common::CodegenCx;
+use crate::debuginfo::metadata::type_map::build_type_with_children;
+use crate::debuginfo::utils::fat_pointer_kind;
+use crate::debuginfo::utils::FatPtrKind;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+ DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind,
+};
+use crate::value::Value;
+
+use cstr::cstr;
+use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo;
+use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_fs_util::path_to_c_string;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::bug;
+use rustc_middle::mir::{self, GeneratorLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{
+ self, AdtKind, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt, Visibility,
+};
+use rustc_session::config::{self, DebugInfo, Lto};
+use rustc_span::symbol::Symbol;
+use rustc_span::FileName;
+use rustc_span::{self, FileNameDisplayPreference, SourceFile};
+use rustc_symbol_mangling::typeid_for_trait_ref;
+use rustc_target::abi::{Align, Size};
+use smallvec::smallvec;
+use tracing::debug;
+
+use libc::{c_char, c_longlong, c_uint};
+use std::borrow::Cow;
+use std::fmt::{self, Write};
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use tracing::instrument;
+
+impl PartialEq for llvm::Metadata {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for llvm::Metadata {}
+
+impl Hash for llvm::Metadata {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (self as *const Self).hash(hasher);
+ }
+}
+
+impl fmt::Debug for llvm::Metadata {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self as *const Self).fmt(f)
+ }
+}
+
+// From DWARF 5.
+// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1.
+const DW_LANG_RUST: c_uint = 0x1c;
+#[allow(non_upper_case_globals)]
+const DW_ATE_boolean: c_uint = 0x02;
+#[allow(non_upper_case_globals)]
+const DW_ATE_float: c_uint = 0x04;
+#[allow(non_upper_case_globals)]
+const DW_ATE_signed: c_uint = 0x05;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned: c_uint = 0x07;
+#[allow(non_upper_case_globals)]
+const DW_ATE_UTF: c_uint = 0x10;
+
+pub(super) const UNKNOWN_LINE_NUMBER: c_uint = 0;
+pub(super) const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+
+const NO_SCOPE_METADATA: Option<&DIScope> = None;
+/// A function that returns an empty list of generic parameter debuginfo nodes.
+const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<&'ll DIType> = |_| SmallVec::new();
+
+// SmallVec is used quite a bit in this module, so create a shorthand.
+// The actual number of elements is not so important.
+pub type SmallVec<T> = smallvec::SmallVec<[T; 16]>;
+
+mod enums;
+mod type_map;
+
+pub(crate) use type_map::TypeMap;
+
+/// Returns from the enclosing function if the type debuginfo node with the given
+/// unique ID can be found in the type map.
+macro_rules! return_if_di_node_created_in_meantime {
+ ($cx: expr, $unique_type_id: expr) => {
+ if let Some(di_node) = debug_context($cx).type_map.di_node_for_unique_id($unique_type_id) {
+ return DINodeCreationResult::new(di_node, true);
+ }
+ };
+}
+
+/// Extract size and alignment from a TyAndLayout.
+fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) {
+ (ty_and_layout.size, ty_and_layout.align.abi)
+}
+
+/// Creates debuginfo for a fixed size array (e.g. `[u64; 123]`).
+/// For slices (that is, "arrays" of unknown size) use [build_slice_type_di_node].
+fn build_fixed_size_array_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ array_type: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let ty::Array(element_type, len) = array_type.kind() else {
+ bug!("build_fixed_size_array_di_node() called with non-ty::Array type `{:?}`", array_type)
+ };
+
+ let element_type_di_node = type_di_node(cx, *element_type);
+
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+
+ let (size, align) = cx.size_and_align_of(array_type);
+
+ let upper_bound = len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
+
+ let subrange =
+ unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
+
+ let subscripts = create_DIArray(DIB(cx), &[subrange]);
+ let di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateArrayType(
+ DIB(cx),
+ size.bits(),
+ align.bits() as u32,
+ element_type_di_node,
+ subscripts,
+ )
+ };
+
+ DINodeCreationResult::new(di_node, false)
+}
+
+/// Creates debuginfo for built-in pointer-like things:
+///
+/// - ty::Ref
+/// - ty::RawPtr
+/// - ty::Adt in the case it's Box
+///
+/// At some point we might want to remove the special handling of Box
+/// and treat it the same as other smart pointers (like Rc, Arc, ...).
+fn build_pointer_or_reference_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ptr_type: Ty<'tcx>,
+ pointee_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ // The debuginfo generated by this function is only valid if `ptr_type` is really just
+ // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
+ debug_assert_eq!(
+ cx.size_and_align_of(ptr_type),
+ cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
+ );
+
+ let pointee_type_di_node = type_di_node(cx, pointee_type);
+
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+
+ let (thin_pointer_size, thin_pointer_align) =
+ cx.size_and_align_of(cx.tcx.mk_imm_ptr(cx.tcx.types.unit));
+ let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true);
+
+ match fat_pointer_kind(cx, pointee_type) {
+ None => {
+ // This is a thin pointer. Create a regular pointer type and give it the correct name.
+ debug_assert_eq!(
+ (thin_pointer_size, thin_pointer_align),
+ cx.size_and_align_of(ptr_type),
+ "ptr_type={}, pointee_type={}",
+ ptr_type,
+ pointee_type,
+ );
+
+ let di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ pointee_type_di_node,
+ thin_pointer_size.bits(),
+ thin_pointer_align.bits() as u32,
+ 0, // Ignore DWARF address space.
+ ptr_type_debuginfo_name.as_ptr().cast(),
+ ptr_type_debuginfo_name.len(),
+ )
+ };
+
+ DINodeCreationResult { di_node, already_stored_in_typemap: false }
+ }
+ Some(fat_pointer_kind) => {
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &ptr_type_debuginfo_name,
+ cx.size_and_align_of(ptr_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, owner| {
+ // FIXME: If this fat pointer is a `Box` then we don't want to use its
+ // type layout and instead use the layout of the raw pointer inside
+ // of it.
+ // The proper way to handle this is to not treat Box as a pointer
+ // at all and instead emit regular struct debuginfo for it. We just
+ // need to make sure that we don't break existing debuginfo consumers
+ // by doing that (at least not without a warning period).
+ let layout_type =
+ if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
+
+ let layout = cx.layout_of(layout_type);
+ let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
+ let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);
+
+ let (addr_field_name, extra_field_name) = match fat_pointer_kind {
+ FatPtrKind::Dyn => ("pointer", "vtable"),
+ FatPtrKind::Slice => ("data_ptr", "length"),
+ };
+
+ debug_assert_eq!(abi::FAT_PTR_ADDR, 0);
+ debug_assert_eq!(abi::FAT_PTR_EXTRA, 1);
+
+ // The data pointer type is a regular, thin pointer, regardless of whether this
+ // is a slice or a trait object.
+ let data_ptr_type_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ pointee_type_di_node,
+ addr_field.size.bits(),
+ addr_field.align.abi.bits() as u32,
+ 0, // Ignore DWARF address space.
+ std::ptr::null(),
+ 0,
+ )
+ };
+
+ smallvec![
+ build_field_di_node(
+ cx,
+ owner,
+ addr_field_name,
+ (addr_field.size, addr_field.align.abi),
+ layout.fields.offset(abi::FAT_PTR_ADDR),
+ DIFlags::FlagZero,
+ data_ptr_type_di_node,
+ ),
+ build_field_di_node(
+ cx,
+ owner,
+ extra_field_name,
+ (extra_field.size, extra_field.align.abi),
+ layout.fields.offset(abi::FAT_PTR_EXTRA),
+ DIFlags::FlagZero,
+ type_di_node(cx, extra_field.ty),
+ ),
+ ]
+ },
+ NO_GENERICS,
+ )
+ }
+ }
+}
+
+fn build_subroutine_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ // It's possible to create a self-referential
+ // type in Rust by using 'impl trait':
+ //
+ // fn foo() -> impl Copy { foo }
+ //
+ // Unfortunately LLVM's API does not allow us to create recursive subroutine types.
+ // In order to work around that restriction we place a marker type in the type map,
+ // before creating the actual type. If the actual type is recursive, it will hit the
+ // marker type. So we end up with a type that looks like
+ //
+ // fn foo() -> <recursive_type>
+ //
+ // Once that is created, we replace the marker in the typemap with the actual type.
+ debug_context(cx)
+ .type_map
+ .unique_id_to_di_node
+ .borrow_mut()
+ .insert(unique_type_id, recursion_marker_type_di_node(cx));
+
+ let fn_ty = unique_type_id.expect_ty();
+ let signature = cx
+ .tcx
+ .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), fn_ty.fn_sig(cx.tcx));
+
+ let signature_di_nodes: SmallVec<_> = iter::once(
+ // return type
+ match signature.output().kind() {
+ ty::Tuple(tys) if tys.is_empty() => {
+ // this is a "void" function
+ None
+ }
+ _ => Some(type_di_node(cx, signature.output())),
+ },
+ )
+ .chain(
+ // regular arguments
+ signature.inputs().iter().map(|&argument_type| Some(type_di_node(cx, argument_type))),
+ )
+ .collect();
+
+ debug_context(cx).type_map.unique_id_to_di_node.borrow_mut().remove(&unique_type_id);
+
+ let fn_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateSubroutineType(
+ DIB(cx),
+ create_DIArray(DIB(cx), &signature_di_nodes[..]),
+ )
+ };
+
+ // This is actually a function pointer, so wrap it in pointer DI.
+ let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
+ let di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ fn_di_node,
+ cx.tcx.data_layout.pointer_size.bits(),
+ cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+ 0, // Ignore DWARF address space.
+ name.as_ptr().cast(),
+ name.len(),
+ )
+ };
+
+ DINodeCreationResult::new(di_node, false)
+}
+
+/// Create debuginfo for `dyn SomeTrait` types. Currently these are empty structs
+/// we with the correct type name (e.g. "dyn SomeTrait<Foo, Item=u32> + Sync").
+fn build_dyn_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ dyn_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ if let ty::Dynamic(..) = dyn_type.kind() {
+ let type_name = compute_debuginfo_type_name(cx.tcx, dyn_type, true);
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ cx.size_and_align_of(dyn_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |_, _| smallvec![],
+ NO_GENERICS,
+ )
+ } else {
+ bug!(
+ "Only ty::Dynamic is valid for build_dyn_type_di_node(). Found {:?} instead.",
+ dyn_type
+ )
+ }
+}
+
+/// Create debuginfo for `[T]` and `str`. These are unsized.
+///
+/// NOTE: We currently emit just emit the debuginfo for the element type here
+/// (i.e. `T` for slices and `u8` for `str`), so that we end up with
+/// `*const T` for the `data_ptr` field of the corresponding fat-pointer
+/// debuginfo of `&[T]`.
+///
+/// It would be preferable and more accurate if we emitted a DIArray of T
+/// without an upper bound instead. That is, LLVM already supports emitting
+/// debuginfo of arrays of unknown size. But GDB currently seems to end up
+/// in an infinite loop when confronted with such a type.
+///
+/// As a side effect of the current encoding every instance of a type like
+/// `struct Foo { unsized_field: [u8] }` will look like
+/// `struct Foo { unsized_field: u8 }` in debuginfo. If the length of the
+/// slice is zero, then accessing `unsized_field` in the debugger would
+/// result in an out-of-bounds access.
+fn build_slice_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ slice_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let element_type = match slice_type.kind() {
+ ty::Slice(element_type) => *element_type,
+ ty::Str => cx.tcx.types.u8,
+ _ => {
+ bug!(
+ "Only ty::Slice is valid for build_slice_type_di_node(). Found {:?} instead.",
+ slice_type
+ )
+ }
+ };
+
+ let element_type_di_node = type_di_node(cx, element_type);
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+ DINodeCreationResult { di_node: element_type_di_node, already_stored_in_typemap: false }
+}
+
+/// Get the debuginfo node for the given type.
+///
+/// This function will look up the debuginfo node in the TypeMap. If it can't find it, it
+/// will create the node by dispatching to the corresponding `build_*_di_node()` function.
+pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+ let unique_type_id = UniqueTypeId::for_ty(cx.tcx, t);
+
+ if let Some(existing_di_node) = debug_context(cx).type_map.di_node_for_unique_id(unique_type_id)
+ {
+ return existing_di_node;
+ }
+
+ debug!("type_di_node: {:?}", t);
+
+ let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
+ ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
+ build_basic_type_di_node(cx, t)
+ }
+ ty::Tuple(elements) if elements.is_empty() => build_basic_type_di_node(cx, t),
+ ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t),
+ ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id),
+ ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id),
+ ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id),
+ ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
+ build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
+ }
+ // Box<T, A> may have a non-ZST allocator A. In that case, we
+ // cannot treat Box<T, A> as just an owned alias of `*mut T`.
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
+ }
+ ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
+ ty::Closure(..) => build_closure_env_di_node(cx, unique_type_id),
+ ty::Generator(..) => enums::build_generator_di_node(cx, unique_type_id),
+ ty::Adt(def, ..) => match def.adt_kind() {
+ AdtKind::Struct => build_struct_type_di_node(cx, unique_type_id),
+ AdtKind::Union => build_union_type_di_node(cx, unique_type_id),
+ AdtKind::Enum => enums::build_enum_type_di_node(cx, unique_type_id),
+ },
+ ty::Tuple(_) => build_tuple_type_di_node(cx, unique_type_id),
+ // Type parameters from polymorphized functions.
+ ty::Param(_) => build_param_type_di_node(cx, t),
+ _ => bug!("debuginfo: unexpected type in type_di_node(): {:?}", t),
+ };
+
+ {
+ if already_stored_in_typemap {
+ // Make sure that we really do have a `TypeMap` entry for the unique type ID.
+ let di_node_for_uid =
+ match debug_context(cx).type_map.di_node_for_unique_id(unique_type_id) {
+ Some(di_node) => di_node,
+ None => {
+ bug!(
+ "expected type debuginfo node for unique \
+ type ID '{:?}' to already be in \
+ the `debuginfo::TypeMap` but it \
+ was not.",
+ unique_type_id,
+ );
+ }
+ };
+
+ debug_assert_eq!(di_node_for_uid as *const _, di_node as *const _);
+ } else {
+ debug_context(cx).type_map.insert(unique_type_id, di_node);
+ }
+ }
+
+ di_node
+}
+
+// FIXME(mw): Cache this via a regular UniqueTypeId instead of an extra field in the debug context.
+fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll DIType {
+ *debug_context(cx).recursion_marker_type.get_or_init(move || {
+ unsafe {
+ // The choice of type here is pretty arbitrary -
+ // anything reading the debuginfo for a recursive
+ // type is going to see *something* weird - the only
+ // question is what exactly it will see.
+ //
+ // FIXME: the name `<recur_type>` does not fit the naming scheme
+ // of other types.
+ //
+ // FIXME: it might make sense to use an actual pointer type here
+ // so that debuggers can show the address.
+ let name = "<recur_type>";
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ cx.tcx.data_layout.pointer_size.bits(),
+ DW_ATE_unsigned,
+ )
+ }
+ })
+}
+
+fn hex_encode(data: &[u8]) -> String {
+ let mut hex_string = String::with_capacity(data.len() * 2);
+ for byte in data.iter() {
+ write!(&mut hex_string, "{:02x}", byte).unwrap();
+ }
+ hex_string
+}
+
+pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile {
+ let cache_key = Some((source_file.name_hash, source_file.src_hash));
+ return debug_context(cx)
+ .created_files
+ .borrow_mut()
+ .entry(cache_key)
+ .or_insert_with(|| alloc_new_file_metadata(cx, source_file));
+
+ #[instrument(skip(cx, source_file), level = "debug")]
+ fn alloc_new_file_metadata<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ source_file: &SourceFile,
+ ) -> &'ll DIFile {
+ debug!(?source_file.name);
+
+ let (directory, file_name) = match &source_file.name {
+ FileName::Real(filename) => {
+ let working_directory = &cx.sess().opts.working_dir;
+ debug!(?working_directory);
+
+ let filename = cx
+ .sess()
+ .source_map()
+ .path_mapping()
+ .to_embeddable_absolute_path(filename.clone(), working_directory);
+
+ // Construct the absolute path of the file
+ let abs_path = filename.remapped_path_if_available();
+ debug!(?abs_path);
+
+ if let Ok(rel_path) =
+ abs_path.strip_prefix(working_directory.remapped_path_if_available())
+ {
+ // If the compiler's working directory (which also is the DW_AT_comp_dir of
+ // the compilation unit) is a prefix of the path we are about to emit, then
+ // only emit the part relative to the working directory.
+ // Because of path remapping we sometimes see strange things here: `abs_path`
+ // might actually look like a relative path
+ // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
+ // taking the working directory into account, downstream tooling will
+ // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
+ // which makes no sense. Usually in such cases the working directory will also
+ // be remapped to `<crate-name-and-version>` or some other prefix of the path
+ // we are remapping, so we end up with
+ // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
+ // By moving the working directory portion into the `directory` part of the
+ // DIFile, we allow LLVM to emit just the relative path for DWARF, while
+ // still emitting the correct absolute path for CodeView.
+ (
+ working_directory.to_string_lossy(FileNameDisplayPreference::Remapped),
+ rel_path.to_string_lossy().into_owned(),
+ )
+ } else {
+ ("".into(), abs_path.to_string_lossy().into_owned())
+ }
+ }
+ other => ("".into(), other.prefer_remapped().to_string_lossy().into_owned()),
+ };
+
+ let hash_kind = match source_file.src_hash.kind {
+ rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
+ rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
+ rustc_span::SourceFileHashAlgorithm::Sha256 => llvm::ChecksumKind::SHA256,
+ };
+ let hash_value = hex_encode(source_file.src_hash.hash_bytes());
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateFile(
+ DIB(cx),
+ file_name.as_ptr().cast(),
+ file_name.len(),
+ directory.as_ptr().cast(),
+ directory.len(),
+ hash_kind,
+ hash_value.as_ptr().cast(),
+ hash_value.len(),
+ )
+ }
+ }
+}
+
+pub fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile {
+ debug_context(cx).created_files.borrow_mut().entry(None).or_insert_with(|| unsafe {
+ let file_name = "<unknown>";
+ let directory = "";
+ let hash_value = "";
+
+ llvm::LLVMRustDIBuilderCreateFile(
+ DIB(cx),
+ file_name.as_ptr().cast(),
+ file_name.len(),
+ directory.as_ptr().cast(),
+ directory.len(),
+ llvm::ChecksumKind::None,
+ hash_value.as_ptr().cast(),
+ hash_value.len(),
+ )
+ })
+}
+
+trait MsvcBasicName {
+ fn msvc_basic_name(self) -> &'static str;
+}
+
+impl MsvcBasicName for ty::IntTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ty::IntTy::Isize => "ptrdiff_t",
+ ty::IntTy::I8 => "__int8",
+ ty::IntTy::I16 => "__int16",
+ ty::IntTy::I32 => "__int32",
+ ty::IntTy::I64 => "__int64",
+ ty::IntTy::I128 => "__int128",
+ }
+ }
+}
+
+impl MsvcBasicName for ty::UintTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ty::UintTy::Usize => "size_t",
+ ty::UintTy::U8 => "unsigned __int8",
+ ty::UintTy::U16 => "unsigned __int16",
+ ty::UintTy::U32 => "unsigned __int32",
+ ty::UintTy::U64 => "unsigned __int64",
+ ty::UintTy::U128 => "unsigned __int128",
+ }
+ }
+}
+
+impl MsvcBasicName for ty::FloatTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ty::FloatTy::F32 => "float",
+ ty::FloatTy::F64 => "double",
+ }
+ }
+}
+
+fn build_basic_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_basic_type_di_node: {:?}", t);
+
+ // When targeting MSVC, emit MSVC style type names for compatibility with
+ // .natvis visualizers (and perhaps other existing native debuggers?)
+ let cpp_like_debuginfo = cpp_like_debuginfo(cx.tcx);
+
+ let (name, encoding) = match t.kind() {
+ ty::Never => ("!", DW_ATE_unsigned),
+ ty::Tuple(elements) if elements.is_empty() => {
+ if cpp_like_debuginfo {
+ return build_tuple_type_di_node(cx, UniqueTypeId::for_ty(cx.tcx, t));
+ } else {
+ ("()", DW_ATE_unsigned)
+ }
+ }
+ ty::Bool => ("bool", DW_ATE_boolean),
+ ty::Char => ("char", DW_ATE_UTF),
+ ty::Int(int_ty) if cpp_like_debuginfo => (int_ty.msvc_basic_name(), DW_ATE_signed),
+ ty::Uint(uint_ty) if cpp_like_debuginfo => (uint_ty.msvc_basic_name(), DW_ATE_unsigned),
+ ty::Float(float_ty) if cpp_like_debuginfo => (float_ty.msvc_basic_name(), DW_ATE_float),
+ ty::Int(int_ty) => (int_ty.name_str(), DW_ATE_signed),
+ ty::Uint(uint_ty) => (uint_ty.name_str(), DW_ATE_unsigned),
+ ty::Float(float_ty) => (float_ty.name_str(), DW_ATE_float),
+ _ => bug!("debuginfo::build_basic_type_di_node - `t` is invalid type"),
+ };
+
+ let ty_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ cx.size_of(t).bits(),
+ encoding,
+ )
+ };
+
+ if !cpp_like_debuginfo {
+ return DINodeCreationResult::new(ty_di_node, false);
+ }
+
+ let typedef_name = match t.kind() {
+ ty::Int(int_ty) => int_ty.name_str(),
+ ty::Uint(uint_ty) => uint_ty.name_str(),
+ ty::Float(float_ty) => float_ty.name_str(),
+ _ => return DINodeCreationResult::new(ty_di_node, false),
+ };
+
+ let typedef_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateTypedef(
+ DIB(cx),
+ ty_di_node,
+ typedef_name.as_ptr().cast(),
+ typedef_name.len(),
+ unknown_file_metadata(cx),
+ 0,
+ None,
+ )
+ };
+
+ DINodeCreationResult::new(typedef_di_node, false)
+}
+
+fn build_foreign_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_foreign_type_di_node: {:?}", t);
+
+ let &ty::Foreign(def_id) = unique_type_id.expect_ty().kind() else {
+ bug!("build_foreign_type_di_node() called with unexpected type: {:?}", unique_type_id.expect_ty());
+ };
+
+ build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &compute_debuginfo_type_name(cx.tcx, t, false),
+ cx.size_and_align_of(t),
+ Some(get_namespace_for_item(cx, def_id)),
+ DIFlags::FlagZero,
+ ),
+ |_, _| smallvec![],
+ NO_GENERICS,
+ )
+}
+
+fn build_param_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_param_type_di_node: {:?}", t);
+ let name = format!("{:?}", t);
+ DINodeCreationResult {
+ di_node: unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ Size::ZERO.bits(),
+ DW_ATE_unsigned,
+ )
+ },
+ already_stored_in_typemap: false,
+ }
+}
+
+pub fn build_compile_unit_di_node<'ll, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ codegen_unit_name: &str,
+ debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
+) -> &'ll DIDescriptor {
+ let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
+ Some(ref path) => path.clone(),
+ None => PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()),
+ };
+
+ // To avoid breaking split DWARF, we need to ensure that each codegen unit
+ // has a unique `DW_AT_name`. This is because there's a remote chance that
+ // different codegen units for the same module will have entirely
+ // identical DWARF entries for the purpose of the DWO ID, which would
+ // violate Appendix F ("Split Dwarf Object Files") of the DWARF 5
+ // specification. LLVM uses the algorithm specified in section 7.32 "Type
+ // Signature Computation" to compute the DWO ID, which does not include
+ // any fields that would distinguish compilation units. So we must embed
+ // the codegen unit name into the `DW_AT_name`. (Issue #88521.)
+ //
+ // Additionally, the OSX linker has an idiosyncrasy where it will ignore
+ // some debuginfo if multiple object files with the same `DW_AT_name` are
+ // linked together.
+ //
+ // As a workaround for these two issues, we generate unique names for each
+ // object file. Those do not correspond to an actual source file but that
+ // is harmless.
+ name_in_debuginfo.push("@");
+ name_in_debuginfo.push(codegen_unit_name);
+
+ debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
+ let rustc_producer =
+ format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"),);
+ // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
+ let producer = format!("clang LLVM ({})", rustc_producer);
+
+ let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
+ let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
+ let flags = "\0";
+ let output_filenames = tcx.output_filenames(());
+ let split_name = if tcx.sess.target_can_use_split_dwarf() {
+ output_filenames
+ .split_dwarf_path(
+ tcx.sess.split_debuginfo(),
+ tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ Some(codegen_unit_name),
+ )
+ // We get a path relative to the working directory from split_dwarf_path
+ .map(|f| tcx.sess.source_map().path_mapping().map_prefix(f).0)
+ } else {
+ None
+ }
+ .unwrap_or_default();
+ let split_name = split_name.to_str().unwrap();
+
+ // FIXME(#60020):
+ //
+ // This should actually be
+ //
+ // let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo);
+ //
+ // That is, we should set LLVM's emission kind to `LineTablesOnly` if
+ // we are compiling with "limited" debuginfo. However, some of the
+ // existing tools relied on slightly more debuginfo being generated than
+ // would be the case with `LineTablesOnly`, and we did not want to break
+ // these tools in a "drive-by fix", without a good idea or plan about
+ // what limited debuginfo should exactly look like. So for now we keep
+ // the emission kind as `FullDebug`.
+ //
+ // See https://github.com/rust-lang/rust/issues/60020 for details.
+ let kind = DebugEmissionKind::FullDebug;
+ assert!(tcx.sess.opts.debuginfo != DebugInfo::None);
+
+ unsafe {
+ let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile(
+ debug_context.builder,
+ name_in_debuginfo.as_ptr().cast(),
+ name_in_debuginfo.len(),
+ work_dir.as_ptr().cast(),
+ work_dir.len(),
+ llvm::ChecksumKind::None,
+ ptr::null(),
+ 0,
+ );
+
+ let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit(
+ debug_context.builder,
+ DW_LANG_RUST,
+ compile_unit_file,
+ producer.as_ptr().cast(),
+ producer.len(),
+ tcx.sess.opts.optimize != config::OptLevel::No,
+ flags.as_ptr().cast(),
+ 0,
+ // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
+ // put the path supplied to `MCSplitDwarfFile` into the debug info of the final
+ // output(s).
+ split_name.as_ptr().cast(),
+ split_name.len(),
+ kind,
+ 0,
+ tcx.sess.opts.unstable_opts.split_dwarf_inlining,
+ );
+
+ if tcx.sess.opts.unstable_opts.profile {
+ let cu_desc_metadata =
+ llvm::LLVMRustMetadataAsValue(debug_context.llcontext, unit_metadata);
+ let default_gcda_path = &output_filenames.with_extension("gcda");
+ let gcda_path =
+ tcx.sess.opts.unstable_opts.profile_emit.as_ref().unwrap_or(default_gcda_path);
+
+ let gcov_cu_info = [
+ path_to_mdstring(debug_context.llcontext, &output_filenames.with_extension("gcno")),
+ path_to_mdstring(debug_context.llcontext, gcda_path),
+ cu_desc_metadata,
+ ];
+ let gcov_metadata = llvm::LLVMMDNodeInContext(
+ debug_context.llcontext,
+ gcov_cu_info.as_ptr(),
+ gcov_cu_info.len() as c_uint,
+ );
+
+ let llvm_gcov_ident = cstr!("llvm.gcov");
+ llvm::LLVMAddNamedMetadataOperand(
+ debug_context.llmod,
+ llvm_gcov_ident.as_ptr(),
+ gcov_metadata,
+ );
+ }
+
+ // Insert `llvm.ident` metadata on the wasm targets since that will
+ // get hooked up to the "producer" sections `processed-by` information.
+ if tcx.sess.target.is_like_wasm {
+ let name_metadata = llvm::LLVMMDStringInContext(
+ debug_context.llcontext,
+ rustc_producer.as_ptr().cast(),
+ rustc_producer.as_bytes().len() as c_uint,
+ );
+ llvm::LLVMAddNamedMetadataOperand(
+ debug_context.llmod,
+ cstr!("llvm.ident").as_ptr(),
+ llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
+ );
+ }
+
+ return unit_metadata;
+ };
+
+ fn path_to_mdstring<'ll>(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value {
+ let path_str = path_to_c_string(path);
+ unsafe {
+ llvm::LLVMMDStringInContext(
+ llcx,
+ path_str.as_ptr(),
+ path_str.as_bytes().len() as c_uint,
+ )
+ }
+ }
+}
+
+/// Creates a `DW_TAG_member` entry inside the DIE represented by the given `type_di_node`.
+fn build_field_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ owner: &'ll DIScope,
+ name: &str,
+ size_and_align: (Size, Align),
+ offset: Size,
+ flags: DIFlags,
+ type_di_node: &'ll DIType,
+) -> &'ll DIType {
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ owner,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size_and_align.0.bits(),
+ size_and_align.1.bits() as u32,
+ offset.bits(),
+ flags,
+ type_di_node,
+ )
+ }
+}
+
+/// Creates the debuginfo node for a Rust struct type. Maybe be a regular struct or a tuple-struct.
+fn build_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let struct_type = unique_type_id.expect_ty();
+ let ty::Adt(adt_def, _) = struct_type.kind() else {
+ bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type);
+ };
+ debug_assert!(adt_def.is_struct());
+ let containing_scope = get_namespace_for_item(cx, adt_def.did());
+ let struct_type_and_layout = cx.layout_of(struct_type);
+ let variant_def = adt_def.non_enum_variant();
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &compute_debuginfo_type_name(cx.tcx, struct_type, false),
+ size_and_align_of(struct_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| {
+ variant_def
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field_name = if variant_def.ctor_kind == CtorKind::Fn {
+ // This is a tuple struct
+ tuple_field_name(i)
+ } else {
+ // This is struct with named fields
+ Cow::Borrowed(f.name.as_str())
+ };
+ let field_layout = struct_type_and_layout.field(cx, i);
+ build_field_di_node(
+ cx,
+ owner,
+ &field_name[..],
+ (field_layout.size, field_layout.align.abi),
+ struct_type_and_layout.fields.offset(i),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, struct_type),
+ )
+}
+
+//=-----------------------------------------------------------------------------
+// Tuples
+//=-----------------------------------------------------------------------------
+
+/// Returns names of captured upvars for closures and generators.
+///
+/// Here are some examples:
+/// - `name__field1__field2` when the upvar is captured by value.
+/// - `_ref__name__field` when the upvar is captured by reference.
+///
+/// For generators this only contains upvars that are shared by all states.
+fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'_>, def_id: DefId) -> SmallVec<String> {
+ let body = tcx.optimized_mir(def_id);
+
+ body.var_debug_info
+ .iter()
+ .filter_map(|var| {
+ let is_ref = match var.value {
+ mir::VarDebugInfoContents::Place(place) if place.local == mir::Local::new(1) => {
+ // The projection is either `[.., Field, Deref]` or `[.., Field]`. It
+ // implies whether the variable is captured by value or by reference.
+ matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref)
+ }
+ _ => return None,
+ };
+ let prefix = if is_ref { "_ref__" } else { "" };
+ Some(prefix.to_owned() + var.name.as_str())
+ })
+ .collect()
+}
+
+/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or generator.
+/// For a generator, this will handle upvars shared by all states.
+fn build_upvar_field_di_nodes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ closure_or_generator_ty: Ty<'tcx>,
+ closure_or_generator_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+ let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
+ ty::Generator(def_id, substs, _) => {
+ let upvar_tys: SmallVec<_> = substs.as_generator().prefix_tys().collect();
+ (def_id, upvar_tys)
+ }
+ ty::Closure(def_id, substs) => {
+ let upvar_tys: SmallVec<_> = substs.as_closure().upvar_tys().collect();
+ (def_id, upvar_tys)
+ }
+ _ => {
+ bug!(
+ "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
+ closure_or_generator_ty
+ )
+ }
+ };
+
+ debug_assert!(
+ up_var_tys
+ .iter()
+ .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+ );
+
+ let capture_names = closure_saved_names_of_captured_variables(cx.tcx, def_id);
+ let layout = cx.layout_of(closure_or_generator_ty);
+
+ up_var_tys
+ .into_iter()
+ .zip(capture_names.iter())
+ .enumerate()
+ .map(|(index, (up_var_ty, capture_name))| {
+ build_field_di_node(
+ cx,
+ closure_or_generator_di_node,
+ capture_name,
+ cx.size_and_align_of(up_var_ty),
+ layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, up_var_ty),
+ )
+ })
+ .collect()
+}
+
+/// Builds the DW_TAG_structure_type debuginfo node for a Rust tuple type.
+fn build_tuple_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let tuple_type = unique_type_id.expect_ty();
+ let &ty::Tuple(component_types) = tuple_type.kind() else {
+ bug!("build_tuple_type_di_node() called with non-tuple-type: {:?}", tuple_type)
+ };
+
+ let tuple_type_and_layout = cx.layout_of(tuple_type);
+ let type_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ size_and_align_of(tuple_type_and_layout),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, tuple_di_node| {
+ component_types
+ .into_iter()
+ .enumerate()
+ .map(|(index, component_type)| {
+ build_field_di_node(
+ cx,
+ tuple_di_node,
+ &tuple_field_name(index),
+ cx.size_and_align_of(component_type),
+ tuple_type_and_layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, component_type),
+ )
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+}
+
+/// Builds the debuginfo node for a closure environment.
+fn build_closure_env_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let closure_env_type = unique_type_id.expect_ty();
+ let &ty::Closure(def_id, _substs) = closure_env_type.kind() else {
+ bug!("build_closure_env_di_node() called with non-closure-type: {:?}", closure_env_type)
+ };
+ let containing_scope = get_namespace_for_item(cx, def_id);
+ let type_name = compute_debuginfo_type_name(cx.tcx, closure_env_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ cx.size_and_align_of(closure_env_type),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| build_upvar_field_di_nodes(cx, closure_env_type, owner),
+ NO_GENERICS,
+ )
+}
+
+/// Build the debuginfo node for a Rust `union` type.
+fn build_union_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let union_type = unique_type_id.expect_ty();
+ let (union_def_id, variant_def) = match union_type.kind() {
+ ty::Adt(def, _) => (def.did(), def.non_enum_variant()),
+ _ => bug!("build_union_type_di_node on a non-ADT"),
+ };
+ let containing_scope = get_namespace_for_item(cx, union_def_id);
+ let union_ty_and_layout = cx.layout_of(union_type);
+ let type_name = compute_debuginfo_type_name(cx.tcx, union_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Union,
+ unique_type_id,
+ &type_name,
+ size_and_align_of(union_ty_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| {
+ variant_def
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field_layout = union_ty_and_layout.field(cx, i);
+ build_field_di_node(
+ cx,
+ owner,
+ f.name.as_str(),
+ size_and_align_of(field_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ // Generics:
+ |cx| build_generic_type_param_di_nodes(cx, union_type),
+ )
+}
+
+// FIXME(eddyb) maybe precompute this? Right now it's computed once
+// per generator monomorphization, but it doesn't depend on substs.
+fn generator_layout_and_saved_local_names<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> (&'tcx GeneratorLayout<'tcx>, IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>) {
+ let body = tcx.optimized_mir(def_id);
+ let generator_layout = body.generator_layout().unwrap();
+ let mut generator_saved_local_names = IndexVec::from_elem(None, &generator_layout.field_tys);
+
+ let state_arg = mir::Local::new(1);
+ for var in &body.var_debug_info {
+ let mir::VarDebugInfoContents::Place(place) = &var.value else { continue };
+ if place.local != state_arg {
+ continue;
+ }
+ match place.projection[..] {
+ [
+ // Deref of the `Pin<&mut Self>` state argument.
+ mir::ProjectionElem::Field(..),
+ mir::ProjectionElem::Deref,
+ // Field of a variant of the state.
+ mir::ProjectionElem::Downcast(_, variant),
+ mir::ProjectionElem::Field(field, _),
+ ] => {
+ let name = &mut generator_saved_local_names
+ [generator_layout.variant_fields[variant][field]];
+ if name.is_none() {
+ name.replace(var.name);
+ }
+ }
+ _ => {}
+ }
+ }
+ (generator_layout, generator_saved_local_names)
+}
+
+/// Computes the type parameters for a type, if any, for the given metadata.
+fn build_generic_type_param_di_nodes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+) -> SmallVec<&'ll DIType> {
+ if let ty::Adt(def, substs) = *ty.kind() {
+ if substs.types().next().is_some() {
+ let generics = cx.tcx.generics_of(def.did());
+ let names = get_parameter_names(cx, generics);
+ let template_params: SmallVec<_> = iter::zip(substs, names)
+ .filter_map(|(kind, name)| {
+ if let GenericArgKind::Type(ty) = kind.unpack() {
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+ let actual_type_di_node = type_di_node(cx, actual_type);
+ let name = name.as_str();
+ Some(unsafe {
+ llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ DIB(cx),
+ None,
+ name.as_ptr().cast(),
+ name.len(),
+ actual_type_di_node,
+ )
+ })
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ return template_params;
+ }
+ }
+
+ return smallvec![];
+
+ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+ let mut names = generics
+ .parent
+ .map_or_else(Vec::new, |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+ names.extend(generics.params.iter().map(|param| param.name));
+ names
+ }
+}
+
+/// Creates debug information for the given global variable.
+///
+/// Adds the created debuginfo nodes directly to the crate's IR.
+pub fn build_global_var_di_node<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) {
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ // Only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo != DebugInfo::Full {
+ return;
+ }
+
+ let tcx = cx.tcx;
+
+ // We may want to remove the namespace scope if we're in an extern block (see
+ // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952).
+ let var_scope = get_namespace_for_item(cx, def_id);
+ let span = tcx.def_span(def_id);
+
+ let (file_metadata, line_number) = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ (file_metadata(cx, &loc.file), loc.line)
+ } else {
+ (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER)
+ };
+
+ let is_local_to_unit = is_node_local_to_unit(cx, def_id);
+ let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx, ty::ParamEnv::reveal_all());
+ let type_di_node = type_di_node(cx, variable_type);
+ let var_name = tcx.item_name(def_id);
+ let var_name = var_name.as_str();
+ let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)).name;
+ // When empty, linkage_name field is omitted,
+ // which is what we want for no_mangle statics
+ let linkage_name = if var_name == linkage_name { "" } else { linkage_name };
+
+ let global_align = cx.align_of(variable_type);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticVariable(
+ DIB(cx),
+ Some(var_scope),
+ var_name.as_ptr().cast(),
+ var_name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ file_metadata,
+ line_number,
+ type_di_node,
+ is_local_to_unit,
+ global,
+ None,
+ global_align.bits() as u32,
+ );
+ }
+}
+
+/// Generates LLVM debuginfo for a vtable.
+///
+/// The vtable type looks like a struct with a field for each function pointer and super-trait
+/// pointer it contains (plus the `size` and `align` fields).
+///
+/// Except for `size`, `align`, and `drop_in_place`, the field names don't try to mirror
+/// the name of the method they implement. This can be implemented in the future once there
+/// is a proper disambiguation scheme for dealing with methods from different traits that have
+/// the same name.
+fn build_vtable_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> &'ll DIType {
+ let tcx = cx.tcx;
+
+ let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
+ let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
+ let trait_ref = tcx.erase_regions(trait_ref);
+
+ tcx.vtable_entries(trait_ref)
+ } else {
+ TyCtxt::COMMON_VTABLE_ENTRIES
+ };
+
+ // All function pointers are described as opaque pointers. This could be improved in the future
+ // by describing them as actual function pointers.
+ let void_pointer_ty = tcx.mk_imm_ptr(tcx.types.unit);
+ let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
+ let usize_di_node = type_di_node(cx, tcx.types.usize);
+ let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
+ // If `usize` is not pointer-sized and -aligned then the size and alignment computations
+ // for the vtable as a whole would be wrong. Let's make sure this holds even on weird
+ // platforms.
+ assert_eq!(cx.size_and_align_of(tcx.types.usize), (pointer_size, pointer_align));
+
+ let vtable_type_name =
+ compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::Type);
+ let unique_type_id = UniqueTypeId::for_vtable_ty(tcx, ty, poly_trait_ref);
+ let size = pointer_size * vtable_entries.len() as u64;
+
+ // This gets mapped to a DW_AT_containing_type attribute which allows GDB to correlate
+ // the vtable to the type it is for.
+ let vtable_holder = type_di_node(cx, ty);
+
+ build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::VTableTy { vtable_holder },
+ unique_type_id,
+ &vtable_type_name,
+ (size, pointer_align),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagArtificial,
+ ),
+ |cx, vtable_type_di_node| {
+ vtable_entries
+ .iter()
+ .enumerate()
+ .filter_map(|(index, vtable_entry)| {
+ let (field_name, field_type_di_node) = match vtable_entry {
+ ty::VtblEntry::MetadataDropInPlace => {
+ ("drop_in_place".to_string(), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::Method(_) => {
+ // Note: This code does not try to give a proper name to each method
+ // because their might be multiple methods with the same name
+ // (coming from different traits).
+ (format!("__method{}", index), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::TraitVPtr(_) => {
+ (format!("__super_trait_ptr{}", index), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
+ ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
+ ty::VtblEntry::Vacant => return None,
+ };
+
+ let field_offset = pointer_size * index as u64;
+
+ Some(build_field_di_node(
+ cx,
+ vtable_type_di_node,
+ &field_name,
+ (pointer_size, pointer_align),
+ field_offset,
+ DIFlags::FlagZero,
+ field_type_di_node,
+ ))
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+ .di_node
+}
+
+fn vcall_visibility_metadata<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+ trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
+ vtable: &'ll Value,
+) {
+ enum VCallVisibility {
+ Public = 0,
+ LinkageUnit = 1,
+ TranslationUnit = 2,
+ }
+
+ let Some(trait_ref) = trait_ref else { return };
+
+ let trait_ref_self = trait_ref.with_self_ty(cx.tcx, ty);
+ let trait_ref_self = cx.tcx.erase_regions(trait_ref_self);
+ let trait_def_id = trait_ref_self.def_id();
+ let trait_vis = cx.tcx.visibility(trait_def_id);
+
+ let cgus = cx.sess().codegen_units();
+ let single_cgu = cgus == 1;
+
+ let lto = cx.sess().lto();
+
+ // Since LLVM requires full LTO for the virtual function elimination optimization to apply,
+ // only the `Lto::Fat` cases are relevant currently.
+ let vcall_visibility = match (lto, trait_vis, single_cgu) {
+ // If there is not LTO and the visibility in public, we have to assume that the vtable can
+ // be seen from anywhere. With multiple CGUs, the vtable is quasi-public.
+ (Lto::No | Lto::ThinLocal, Visibility::Public, _)
+ | (Lto::No, Visibility::Restricted(_) | Visibility::Invisible, false) => {
+ VCallVisibility::Public
+ }
+ // With LTO and a quasi-public visibility, the usages of the functions of the vtable are
+ // all known by the `LinkageUnit`.
+ // FIXME: LLVM only supports this optimization for `Lto::Fat` currently. Once it also
+ // supports `Lto::Thin` the `VCallVisibility` may have to be adjusted for those.
+ (Lto::Fat | Lto::Thin, Visibility::Public, _)
+ | (
+ Lto::ThinLocal | Lto::Thin | Lto::Fat,
+ Visibility::Restricted(_) | Visibility::Invisible,
+ false,
+ ) => VCallVisibility::LinkageUnit,
+ // If there is only one CGU, private vtables can only be seen by that CGU/translation unit
+ // and therefore we know of all usages of functions in the vtable.
+ (_, Visibility::Restricted(_) | Visibility::Invisible, true) => {
+ VCallVisibility::TranslationUnit
+ }
+ };
+
+ let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref);
+
+ unsafe {
+ let typeid = llvm::LLVMMDStringInContext(
+ cx.llcx,
+ trait_ref_typeid.as_ptr() as *const c_char,
+ trait_ref_typeid.as_bytes().len() as c_uint,
+ );
+ let v = [cx.const_usize(0), typeid];
+ llvm::LLVMRustGlobalAddMetadata(
+ vtable,
+ llvm::MD_type as c_uint,
+ llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+ cx.llcx,
+ v.as_ptr(),
+ v.len() as c_uint,
+ )),
+ );
+ let vcall_visibility = llvm::LLVMValueAsMetadata(cx.const_u64(vcall_visibility as u64));
+ let vcall_visibility_metadata = llvm::LLVMMDNodeInContext2(cx.llcx, &vcall_visibility, 1);
+ llvm::LLVMGlobalSetMetadata(
+ vtable,
+ llvm::MetadataType::MD_vcall_visibility as c_uint,
+ vcall_visibility_metadata,
+ );
+ }
+}
+
+/// Creates debug information for the given vtable, which is for the
+/// given type.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_vtable_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ vtable: &'ll Value,
+) {
+ // FIXME(flip1995): The virtual function elimination optimization only works with full LTO in
+ // LLVM at the moment.
+ if cx.sess().opts.unstable_opts.virtual_function_elimination && cx.sess().lto() == Lto::Fat {
+ vcall_visibility_metadata(cx, ty, poly_trait_ref, vtable);
+ }
+
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ // Only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo != DebugInfo::Full {
+ return;
+ }
+
+ let vtable_name =
+ compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable);
+ let vtable_type_di_node = build_vtable_type_di_node(cx, ty, poly_trait_ref);
+ let linkage_name = "";
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticVariable(
+ DIB(cx),
+ NO_SCOPE_METADATA,
+ vtable_name.as_ptr().cast(),
+ vtable_name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ vtable_type_di_node,
+ true,
+ vtable,
+ None,
+ 0,
+ );
+ }
+}
+
+/// Creates an "extension" of an existing `DIScope` into another file.
+pub fn extend_scope_to_file<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ scope_metadata: &'ll DIScope,
+ file: &SourceFile,
+) -> &'ll DILexicalBlock {
+ let file_metadata = file_metadata(cx, file);
+ unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
+}
+
+pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
+ const TUPLE_FIELD_NAMES: [&'static str; 16] = [
+ "__0", "__1", "__2", "__3", "__4", "__5", "__6", "__7", "__8", "__9", "__10", "__11",
+ "__12", "__13", "__14", "__15",
+ ];
+ TUPLE_FIELD_NAMES
+ .get(field_index)
+ .map(|s| Cow::from(*s))
+ .unwrap_or_else(|| Cow::from(format!("__{}", field_index)))
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
new file mode 100644
index 000000000..d6e2c8ccd
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -0,0 +1,514 @@
+use std::borrow::Cow;
+
+use libc::c_uint;
+use rustc_codegen_ssa::debuginfo::{
+ type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo,
+};
+use rustc_middle::{
+ bug,
+ ty::{
+ self,
+ layout::{LayoutOf, TyAndLayout},
+ util::Discr,
+ AdtDef, GeneratorSubsts,
+ },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ build_field_di_node, closure_saved_names_of_captured_variables,
+ enums::tag_base_type,
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of,
+ type_map::{self, UniqueTypeId},
+ unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
+ UNKNOWN_LINE_NUMBER,
+ },
+ utils::DIB,
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFile, DIFlags, DIType},
+ },
+};
+
+/// In CPP-like mode, we generate a union of structs for each variant and an
+/// explicit discriminant field roughly equivalent to the following C/C++ code:
+///
+/// ```c
+/// union enum$<{fully-qualified-name}> {
+/// struct {variant 0 name} {
+/// <variant 0 fields>
+/// } variant0;
+/// <other variant structs>
+/// {name} discriminant;
+/// }
+/// ```
+///
+/// As you can see, the type name is wrapped `enum$`. This way we can have a
+/// single NatVis rule for handling all enums.
+///
+/// At the LLVM IR level this looks like
+///
+/// ```txt
+/// DW_TAG_union_type (top-level type for enum)
+/// DW_TAG_member (member for variant 1)
+/// DW_TAG_member (member for variant 2)
+/// DW_TAG_member (member for variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// DW_TAG_enumeration_type (type of tag)
+/// ```
+///
+/// The above encoding applies for enums with a direct tag. For niche-tag we have to do things
+/// differently in order to allow a NatVis visualizer to extract all the information needed:
+/// We generate a union of two fields, one for the dataful variant
+/// and one that just points to the discriminant (which is some field within the dataful variant).
+/// We also create a DW_TAG_enumeration_type DIE that contains tag values for the non-dataful
+/// variants and make the discriminant field that type. We then use NatVis to render the enum type
+/// correctly in Windbg/VS. This will generate debuginfo roughly equivalent to the following C:
+///
+/// ```c
+/// union enum$<{name}, {min niche}, {max niche}, {dataful variant name}> {
+/// struct <dataful variant name> {
+/// <fields in dataful variant>
+/// } dataful_variant;
+/// enum Discriminant$ {
+/// <non-dataful variants>
+/// } discriminant;
+/// }
+/// ```
+///
+/// The NatVis in `intrinsic.natvis` matches on the type name `enum$<*, *, *, *>`
+/// and evaluates `this.discriminant`. If the value is between the min niche and max
+/// niche, then the enum is in the dataful variant and `this.dataful_variant` is
+/// rendered. Otherwise, the enum is in one of the non-dataful variants. In that
+/// case, we just need to render the name of the `this.discriminant` enum.
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let enum_type_and_layout = cx.layout_of(enum_type);
+ let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ type_map::Stub::Union,
+ unique_type_id,
+ &enum_type_name,
+ cx.size_and_align_of(enum_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, enum_type_di_node| {
+ match enum_type_and_layout.variants {
+ Variants::Single { index: variant_index } => {
+ if enum_adt_def.variants().is_empty() {
+ // Uninhabited enums have Variants::Single. We don't generate
+ // any members for them.
+ return smallvec![];
+ }
+
+ build_single_variant_union_fields(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ variant_index,
+ )
+ }
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ ref variants,
+ tag_field,
+ ..
+ } => build_union_fields_for_direct_tag_enum(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &mut variants.indices(),
+ tag_field,
+ ),
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ ref variants,
+ tag_field,
+ ..
+ } => build_union_fields_for_niche_tag_enum(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ dataful_variant,
+ &mut variants.indices(),
+ tag_field,
+ ),
+ }
+ },
+ NO_GENERICS,
+ )
+}
+
+/// A generator debuginfo node looks the same as a that of an enum type.
+///
+/// See [build_enum_type_di_node] for more information.
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let generator_type = unique_type_id.expect_ty();
+ let generator_type_and_layout = cx.layout_of(generator_type);
+ let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ type_map::Stub::Union,
+ unique_type_id,
+ &generator_type_name,
+ size_and_align_of(generator_type_and_layout),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, generator_type_di_node| match generator_type_and_layout.variants {
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => {
+ build_union_fields_for_direct_tag_generator(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ )
+ }
+ Variants::Single { .. }
+ | Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => {
+ bug!(
+ "Encountered generator with non-direct-tag layout: {:?}",
+ generator_type_and_layout
+ )
+ }
+ },
+ NO_GENERICS,
+ )
+}
+
+fn build_single_variant_union_fields<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+) -> SmallVec<&'ll DIType> {
+ let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+ let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ variant_layout,
+ );
+
+ // NOTE: The field name of the union is the same as the variant name, not "variant0".
+ let variant_name = enum_adt_def.variant(variant_index).name.as_str();
+
+ smallvec![build_field_di_node(
+ cx,
+ enum_type_di_node,
+ variant_name,
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ variant_struct_type_di_node,
+ )]
+}
+
+fn build_union_fields_for_direct_tag_enum<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_indices
+ .map(|variant_index| {
+ let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+
+ VariantFieldInfo {
+ variant_index,
+ variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ variant_layout,
+ ),
+ source_info: None,
+ }
+ })
+ .collect();
+
+ let discr_type_name = cx.tcx.item_name(enum_adt_def.did());
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ discr_type_name.as_str(),
+ tag_base_type,
+ &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ }),
+ enum_type_di_node,
+ );
+
+ build_union_fields_for_direct_tag_enum_or_generator(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &variant_field_infos,
+ discr_type_di_node,
+ tag_field,
+ )
+}
+
+fn build_union_fields_for_niche_tag_enum<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ dataful_variant_index: VariantIdx,
+ variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let dataful_variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ dataful_variant_index,
+ &enum_adt_def.variant(dataful_variant_index),
+ enum_type_and_layout.for_variant(cx, dataful_variant_index),
+ );
+
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+ // Create an DW_TAG_enumerator for each variant except the dataful one.
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ "Discriminant$",
+ tag_base_type,
+ &mut variant_indices.filter_map(|variant_index| {
+ if let Some(discr_val) =
+ super::compute_discriminant_value(cx, enum_type_and_layout, variant_index)
+ {
+ let discr = Discr { val: discr_val as u128, ty: tag_base_type };
+ let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+ Some((discr, variant_name))
+ } else {
+ debug_assert_eq!(variant_index, dataful_variant_index);
+ None
+ }
+ }),
+ enum_type_di_node,
+ );
+
+ smallvec![
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "dataful_variant",
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ dataful_variant_struct_type_di_node,
+ ),
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "discriminant",
+ cx.size_and_align_of(tag_base_type),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ discr_type_di_node,
+ ),
+ ]
+}
+
+fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ generator_type_and_layout: TyAndLayout<'tcx>,
+ generator_type_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } = generator_type_and_layout.variants else {
+ bug!("This function only supports layouts with directly encoded tags.")
+ };
+
+ let (generator_def_id, generator_substs) = match generator_type_and_layout.ty.kind() {
+ &ty::Generator(def_id, substs, _) => (def_id, substs.as_generator()),
+ _ => unreachable!(),
+ };
+
+ let (generator_layout, state_specific_upvar_names) =
+ generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+
+ let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+ let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
+
+ // Build the type node for each field.
+ let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
+ .map(|variant_index| {
+ let variant_struct_type_di_node = super::build_generator_variant_struct_type_di_node(
+ cx,
+ variant_index,
+ generator_type_and_layout,
+ generator_type_di_node,
+ generator_layout,
+ &state_specific_upvar_names,
+ &common_upvar_names,
+ );
+
+ let span = generator_layout.variant_source_info[variant_index].span;
+ let source_info = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ Some((file_metadata(cx, &loc.file), loc.line as c_uint))
+ } else {
+ None
+ };
+
+ VariantFieldInfo { variant_index, variant_struct_type_di_node, source_info }
+ })
+ .collect();
+
+ let tag_base_type = tag_base_type(cx, generator_type_and_layout);
+ let discr_type_name = "Discriminant$";
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ discr_type_name,
+ tag_base_type,
+ &mut generator_substs
+ .discriminants(generator_def_id, cx.tcx)
+ .map(|(variant_index, discr)| (discr, GeneratorSubsts::variant_name(variant_index))),
+ generator_type_di_node,
+ );
+
+ build_union_fields_for_direct_tag_enum_or_generator(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ &variant_field_infos[..],
+ discr_type_di_node,
+ tag_field,
+ )
+}
+
+/// This is a helper function shared between enums and generators that makes sure fields have the
+/// expect names.
+fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_field_infos: &[VariantFieldInfo<'ll>],
+ discr_type_di_node: &'ll DIType,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1);
+
+ // We create a field in the union for each variant ...
+ unions_fields.extend(variant_field_infos.into_iter().map(|variant_member_info| {
+ let (file_di_node, line_number) = variant_member_info
+ .source_info
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+ let field_name = variant_union_field_name(variant_member_info.variant_index);
+ let (size, align) = size_and_align_of(enum_type_and_layout);
+
+ // We use LLVMRustDIBuilderCreateMemberType() member type directly because
+ // the build_field_di_node() function does not support specifying a source location,
+ // which is something that we don't do anywhere else.
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ enum_type_di_node,
+ field_name.as_ptr().cast(),
+ field_name.len(),
+ file_di_node,
+ line_number,
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size.bits(),
+ align.bits() as u32,
+ // Union fields are always at offset zero
+ Size::ZERO.bits(),
+ DIFlags::FlagZero,
+ variant_member_info.variant_struct_type_di_node,
+ )
+ }
+ }));
+
+ debug_assert_eq!(
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
+ );
+
+ // ... and a field for the discriminant.
+ unions_fields.push(build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "discriminant",
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ discr_type_di_node,
+ ));
+
+ unions_fields
+}
+
+/// Information about a single field of the top-level DW_TAG_union_type.
+struct VariantFieldInfo<'ll> {
+ variant_index: VariantIdx,
+ variant_struct_type_di_node: &'ll DIType,
+ source_info: Option<(&'ll DIFile, c_uint)>,
+}
+
+fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> {
+ const PRE_ALLOCATED: [&str; 16] = [
+ "variant0",
+ "variant1",
+ "variant2",
+ "variant3",
+ "variant4",
+ "variant5",
+ "variant6",
+ "variant7",
+ "variant8",
+ "variant9",
+ "variant10",
+ "variant11",
+ "variant12",
+ "variant13",
+ "variant14",
+ "variant15",
+ ];
+
+ PRE_ALLOCATED
+ .get(variant_index.as_usize())
+ .map(|&s| Cow::from(s))
+ .unwrap_or_else(|| format!("variant{}", variant_index.as_usize()).into())
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
new file mode 100644
index 000000000..73e01d045
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -0,0 +1,437 @@
+use rustc_codegen_ssa::debuginfo::{
+ type_names::{compute_debuginfo_type_name, cpp_like_debuginfo},
+ wants_c_like_enum_debuginfo,
+};
+use rustc_hir::def::CtorKind;
+use rustc_index::vec::IndexVec;
+use rustc_middle::{
+ bug,
+ mir::{Field, GeneratorLayout, GeneratorSavedLocal},
+ ty::{
+ self,
+ layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
+ util::Discr,
+ AdtDef, GeneratorSubsts, Ty, VariantDef,
+ },
+};
+use rustc_span::Symbol;
+use rustc_target::abi::{HasDataLayout, Integer, Primitive, TagEncoding, VariantIdx, Variants};
+use std::borrow::Cow;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ build_field_di_node, build_generic_type_param_di_nodes, type_di_node,
+ type_map::{self, Stub},
+ unknown_file_metadata, UNKNOWN_LINE_NUMBER,
+ },
+ utils::{create_DIArray, get_namespace_for_item, DIB},
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFlags, DIType},
+ },
+};
+
+use super::{
+ size_and_align_of,
+ type_map::{DINodeCreationResult, UniqueTypeId},
+ SmallVec,
+};
+
+mod cpp_like;
+mod native;
+
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let enum_type_and_layout = cx.layout_of(enum_type);
+
+ if wants_c_like_enum_debuginfo(enum_type_and_layout) {
+ return build_c_style_enum_di_node(cx, enum_adt_def, enum_type_and_layout);
+ }
+
+ if cpp_like_debuginfo(cx.tcx) {
+ cpp_like::build_enum_type_di_node(cx, unique_type_id)
+ } else {
+ native::build_enum_type_di_node(cx, unique_type_id)
+ }
+}
+
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ if cpp_like_debuginfo(cx.tcx) {
+ cpp_like::build_generator_di_node(cx, unique_type_id)
+ } else {
+ native::build_generator_di_node(cx, unique_type_id)
+ }
+}
+
+/// Build the debuginfo node for a C-style enum, i.e. an enum the variants of which have no fields.
+///
+/// The resulting debuginfo will be a DW_TAG_enumeration_type.
+fn build_c_style_enum_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+ DINodeCreationResult {
+ di_node: build_enumeration_type_di_node(
+ cx,
+ &compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false),
+ tag_base_type(cx, enum_type_and_layout),
+ &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ }),
+ containing_scope,
+ ),
+ already_stored_in_typemap: false,
+ }
+}
+
+/// Extract the type with which we want to describe the tag of the given enum or generator.
+fn tag_base_type<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+) -> Ty<'tcx> {
+ debug_assert!(match enum_type_and_layout.ty.kind() {
+ ty::Generator(..) => true,
+ ty::Adt(adt_def, _) => adt_def.is_enum(),
+ _ => false,
+ });
+
+ match enum_type_and_layout.layout.variants() {
+ // A single-variant enum has no discriminant.
+ Variants::Single { .. } => {
+ bug!("tag_base_type() called for enum without tag: {:?}", enum_type_and_layout)
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
+ // Niche tags are always normalized to unsized integers of the correct size.
+ match tag.primitive() {
+ Primitive::Int(t, _) => t,
+ Primitive::F32 => Integer::I32,
+ Primitive::F64 => Integer::I64,
+ Primitive::Pointer => {
+ // If the niche is the NULL value of a reference, then `discr_enum_ty` will be
+ // a RawPtr. CodeView doesn't know what to do with enums whose base type is a
+ // pointer so we fix this up to just be `usize`.
+ // DWARF might be able to deal with this but with an integer type we are on
+ // the safe side there too.
+ cx.data_layout().ptr_sized_integer()
+ }
+ }
+ .to_ty(cx.tcx, false)
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
+ // Direct tags preserve the sign.
+ tag.primitive().to_ty(cx.tcx)
+ }
+ }
+}
+
+/// Build a DW_TAG_enumeration_type debuginfo node, with the given base type and variants.
+/// This is a helper function and does not register anything in the type map by itself.
+///
+/// `variants` is an iterator of (discr-value, variant-name).
+///
+// NOTE: Handling of discriminant values is somewhat inconsistent. They can appear as u128,
+// u64, and i64. Here everything gets mapped to i64 because that's what LLVM's API expects.
+fn build_enumeration_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ type_name: &str,
+ base_type: Ty<'tcx>,
+ variants: &mut dyn Iterator<Item = (Discr<'tcx>, Cow<'tcx, str>)>,
+ containing_scope: &'ll DIType,
+) -> &'ll DIType {
+ let is_unsigned = match base_type.kind() {
+ ty::Int(_) => false,
+ ty::Uint(_) => true,
+ _ => bug!("build_enumeration_type_di_node() called with non-integer tag type."),
+ };
+
+ let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = variants
+ .map(|(discr, variant_name)| {
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ variant_name.as_ptr().cast(),
+ variant_name.len(),
+ // FIXME: what if enumeration has i128 discriminant?
+ discr.val as i64,
+ is_unsigned,
+ ))
+ }
+ })
+ .collect();
+
+ let (size, align) = cx.size_and_align_of(base_type);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateEnumerationType(
+ DIB(cx),
+ containing_scope,
+ type_name.as_ptr().cast(),
+ type_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ create_DIArray(DIB(cx), &enumerator_di_nodes[..]),
+ type_di_node(cx, base_type),
+ true,
+ )
+ }
+}
+
+/// Build the debuginfo node for the struct type describing a single variant of an enum.
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+/// ```
+///
+/// In CPP-like mode, we have the exact same descriptions for each variant too:
+///
+/// ```txt
+/// DW_TAG_union_type (top-level type for enum)
+/// DW_TAG_member (member for variant 1)
+/// DW_TAG_member (member for variant 2)
+/// DW_TAG_member (member for variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+/// DW_TAG_enumeration_type (type of tag)
+/// ```
+///
+/// The node looks like:
+///
+/// ```txt
+/// DW_TAG_structure_type
+/// DW_AT_name <name-of-variant>
+/// DW_AT_byte_size 0x00000010
+/// DW_AT_alignment 0x00000008
+/// DW_TAG_member
+/// DW_AT_name <name-of-field-0>
+/// DW_AT_type <0x0000018e>
+/// DW_AT_alignment 0x00000004
+/// DW_AT_data_member_location 4
+/// DW_TAG_member
+/// DW_AT_name <name-of-field-1>
+/// DW_AT_type <0x00000195>
+/// DW_AT_alignment 0x00000008
+/// DW_AT_data_member_location 8
+/// ...
+/// ```
+///
+/// The type of a variant is always a struct type with the name of the variant
+/// and a DW_TAG_member for each field (but not the discriminant).
+fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type: Ty<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+ variant_def: &VariantDef,
+ variant_layout: TyAndLayout<'tcx>,
+) -> &'ll DIType {
+ debug_assert_eq!(variant_layout.ty, enum_type);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ UniqueTypeId::for_enum_variant_struct_type(cx.tcx, enum_type, variant_index),
+ variant_def.name.as_str(),
+ // NOTE: We use size and align of enum_type, not from variant_layout:
+ cx.size_and_align_of(enum_type),
+ Some(enum_type_di_node),
+ DIFlags::FlagZero,
+ ),
+ |cx, struct_type_di_node| {
+ (0..variant_layout.fields.count())
+ .map(|field_index| {
+ let field_name = if variant_def.ctor_kind != CtorKind::Fn {
+ // Fields have names
+ Cow::from(variant_def.fields[field_index].name.as_str())
+ } else {
+ // Tuple-like
+ super::tuple_field_name(field_index)
+ };
+
+ let field_layout = variant_layout.field(cx, field_index);
+
+ build_field_di_node(
+ cx,
+ struct_type_di_node,
+ &field_name,
+ (field_layout.size, field_layout.align.abi),
+ variant_layout.fields.offset(field_index),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, enum_type),
+ )
+ .di_node
+}
+
+/// Build the struct type for describing a single generator state.
+/// See [build_generator_variant_struct_type_di_node].
+///
+/// ```txt
+///
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ variant_index: VariantIdx,
+ generator_type_and_layout: TyAndLayout<'tcx>,
+ generator_type_di_node: &'ll DIType,
+ generator_layout: &GeneratorLayout<'tcx>,
+ state_specific_upvar_names: &IndexVec<GeneratorSavedLocal, Option<Symbol>>,
+ common_upvar_names: &[String],
+) -> &'ll DIType {
+ let variant_name = GeneratorSubsts::variant_name(variant_index);
+ let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
+ cx.tcx,
+ generator_type_and_layout.ty,
+ variant_index,
+ );
+
+ let variant_layout = generator_type_and_layout.for_variant(cx, variant_index);
+
+ let generator_substs = match generator_type_and_layout.ty.kind() {
+ ty::Generator(_, substs, _) => substs.as_generator(),
+ _ => unreachable!(),
+ };
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &variant_name,
+ size_and_align_of(generator_type_and_layout),
+ Some(generator_type_di_node),
+ DIFlags::FlagZero,
+ ),
+ |cx, variant_struct_type_di_node| {
+ // Fields that just belong to this variant/state
+ let state_specific_fields: SmallVec<_> = (0..variant_layout.fields.count())
+ .map(|field_index| {
+ let generator_saved_local = generator_layout.variant_fields[variant_index]
+ [Field::from_usize(field_index)];
+ let field_name_maybe = state_specific_upvar_names[generator_saved_local];
+ let field_name = field_name_maybe
+ .as_ref()
+ .map(|s| Cow::from(s.as_str()))
+ .unwrap_or_else(|| super::tuple_field_name(field_index));
+
+ let field_type = variant_layout.field(cx, field_index).ty;
+
+ build_field_di_node(
+ cx,
+ variant_struct_type_di_node,
+ &field_name,
+ cx.size_and_align_of(field_type),
+ variant_layout.fields.offset(field_index),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_type),
+ )
+ })
+ .collect();
+
+ // Fields that are common to all states
+ let common_fields: SmallVec<_> = generator_substs
+ .prefix_tys()
+ .enumerate()
+ .map(|(index, upvar_ty)| {
+ build_field_di_node(
+ cx,
+ variant_struct_type_di_node,
+ &common_upvar_names[index],
+ cx.size_and_align_of(upvar_ty),
+ generator_type_and_layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, upvar_ty),
+ )
+ })
+ .collect();
+
+ state_specific_fields.into_iter().chain(common_fields.into_iter()).collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, generator_type_and_layout.ty),
+ )
+ .di_node
+}
+
+/// Returns the discriminant value corresponding to the variant index.
+///
+/// Will return `None` if there is less than two variants (because then the enum won't have)
+/// a tag, and if this is the dataful variant of a niche-layout enum (because then there is no
+/// single discriminant value).
+fn compute_discriminant_value<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ variant_index: VariantIdx,
+) -> Option<u64> {
+ match enum_type_and_layout.layout.variants() {
+ &Variants::Single { .. } => None,
+ &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => Some(
+ enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val
+ as u64,
+ ),
+ &Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
+ tag,
+ ..
+ } => {
+ if variant_index == dataful_variant {
+ None
+ } else {
+ let value = (variant_index.as_u32() as u128)
+ .wrapping_sub(niche_variants.start().as_u32() as u128)
+ .wrapping_add(niche_start);
+ let value = tag.size(cx).truncate(value);
+ // NOTE(eddyb) do *NOT* remove this assert, until
+ // we pass the full 128-bit value to LLVM, otherwise
+ // truncation will be silent and remain undetected.
+ assert_eq!(value as u64 as u128, value);
+ Some(value as u64)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
new file mode 100644
index 000000000..f1935e0ec
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -0,0 +1,441 @@
+use std::borrow::Cow;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ closure_saved_names_of_captured_variables,
+ enums::tag_base_type,
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+ type_map::{self, Stub, StubInfo, UniqueTypeId},
+ unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS,
+ UNKNOWN_LINE_NUMBER,
+ },
+ utils::{create_DIArray, get_namespace_for_item, DIB},
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFile, DIFlags, DIType},
+ },
+};
+use libc::c_uint;
+use rustc_codegen_ssa::{
+ debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo},
+ traits::ConstMethods,
+};
+use rustc_middle::{
+ bug,
+ ty::{
+ self,
+ layout::{LayoutOf, TyAndLayout},
+ },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+/// Build the debuginfo node for an enum type. The listing below shows how such a
+/// type looks like at the LLVM IR/DWARF level. It is a `DW_TAG_structure_type`
+/// with a single `DW_TAG_variant_part` that in turn contains a `DW_TAG_variant`
+/// for each variant of the enum. The variant-part also contains a single member
+/// describing the discriminant, and a nested struct type for each of the variants.
+///
+/// ```txt
+/// ---> DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+ let enum_type_and_layout = cx.layout_of(enum_type);
+ let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &enum_type_name,
+ size_and_align_of(enum_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ |cx, enum_type_di_node| {
+ // Build the struct type for each variant. These will be referenced by the
+ // DW_TAG_variant DIEs inside of the DW_TAG_variant_part DIE.
+ // We also called the names for the corresponding DW_TAG_variant DIEs here.
+ let variant_member_infos: SmallVec<_> = enum_adt_def
+ .variant_range()
+ .map(|variant_index| VariantMemberInfo {
+ variant_index,
+ variant_name: Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
+ variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ enum_type_and_layout.for_variant(cx, variant_index),
+ ),
+ source_info: None,
+ })
+ .collect();
+
+ smallvec![build_enum_variant_part_di_node(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &variant_member_infos[..],
+ )]
+ },
+ // We don't seem to be emitting generic args on the enum type, it seems. Rather
+ // they get attached to the struct type of each variant.
+ NO_GENERICS,
+ )
+}
+
+/// Build the debuginfo node for a generator environment. It looks the same as the debuginfo for
+/// an enum. See [build_enum_type_di_node] for more information.
+///
+/// ```txt
+///
+/// ---> DW_TAG_structure_type (top-level type for the generator)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let generator_type = unique_type_id.expect_ty();
+ let &ty::Generator(generator_def_id, _, _ ) = generator_type.kind() else {
+ bug!("build_generator_di_node() called with non-generator type: `{:?}`", generator_type)
+ };
+
+ let containing_scope = get_namespace_for_item(cx, generator_def_id);
+ let generator_type_and_layout = cx.layout_of(generator_type);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+
+ let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &generator_type_name,
+ size_and_align_of(generator_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ |cx, generator_type_di_node| {
+ let (generator_layout, state_specific_upvar_names) =
+ generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
+ bug!(
+ "Encountered generator with non-direct-tag layout: {:?}",
+ generator_type_and_layout
+ )
+ };
+
+ let common_upvar_names =
+ closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+
+ // Build variant struct types
+ let variant_struct_type_di_nodes: SmallVec<_> = variants
+ .indices()
+ .map(|variant_index| {
+ // FIXME: This is problematic because just a number is not a valid identifier.
+ // GeneratorSubsts::variant_name(variant_index), would be consistent
+ // with enums?
+ let variant_name = format!("{}", variant_index.as_usize()).into();
+
+ let span = generator_layout.variant_source_info[variant_index].span;
+ let source_info = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ Some((file_metadata(cx, &loc.file), loc.line))
+ } else {
+ None
+ };
+
+ VariantMemberInfo {
+ variant_index,
+ variant_name,
+ variant_struct_type_di_node:
+ super::build_generator_variant_struct_type_di_node(
+ cx,
+ variant_index,
+ generator_type_and_layout,
+ generator_type_di_node,
+ generator_layout,
+ &state_specific_upvar_names,
+ &common_upvar_names,
+ ),
+ source_info,
+ }
+ })
+ .collect();
+
+ smallvec![build_enum_variant_part_di_node(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ &variant_struct_type_di_nodes[..],
+ )]
+ },
+ // We don't seem to be emitting generic args on the generator type, it seems. Rather
+ // they get attached to the struct type of each variant.
+ NO_GENERICS,
+ )
+}
+
+/// Builds the DW_TAG_variant_part of an enum or generator debuginfo node:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// ---> DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+fn build_enum_variant_part_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_member_infos: &[VariantMemberInfo<'_, 'll>],
+) -> &'ll DIType {
+ let tag_member_di_node =
+ build_discr_member_di_node(cx, enum_type_and_layout, enum_type_di_node);
+
+ let variant_part_unique_type_id =
+ UniqueTypeId::for_enum_variant_part(cx.tcx, enum_type_and_layout.ty);
+
+ let stub = StubInfo::new(
+ cx,
+ variant_part_unique_type_id,
+ |cx, variant_part_unique_type_id_str| unsafe {
+ let variant_part_name = "";
+ llvm::LLVMRustDIBuilderCreateVariantPart(
+ DIB(cx),
+ enum_type_di_node,
+ variant_part_name.as_ptr().cast(),
+ variant_part_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ enum_type_and_layout.size.bits(),
+ enum_type_and_layout.align.abi.bits() as u32,
+ DIFlags::FlagZero,
+ tag_member_di_node,
+ create_DIArray(DIB(cx), &[]),
+ variant_part_unique_type_id_str.as_ptr().cast(),
+ variant_part_unique_type_id_str.len(),
+ )
+ },
+ );
+
+ type_map::build_type_with_children(
+ cx,
+ stub,
+ |cx, variant_part_di_node| {
+ variant_member_infos
+ .iter()
+ .map(|variant_member_info| {
+ build_enum_variant_member_di_node(
+ cx,
+ enum_type_and_layout,
+ variant_part_di_node,
+ variant_member_info,
+ )
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+ .di_node
+}
+
+/// Builds the DW_TAG_member describing where we can find the tag of an enum.
+/// Returns `None` if the enum does not have a tag.
+///
+/// ```txt
+///
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// ---> DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+fn build_discr_member_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_or_generator_type_and_layout: TyAndLayout<'tcx>,
+ enum_or_generator_type_di_node: &'ll DIType,
+) -> Option<&'ll DIType> {
+ let tag_name = match enum_or_generator_type_and_layout.ty.kind() {
+ ty::Generator(..) => "__state",
+ _ => "",
+ };
+
+ // NOTE: This is actually wrong. This will become a member of
+ // of the DW_TAG_variant_part. But, due to LLVM's API, that
+ // can only be constructed with this DW_TAG_member already in created.
+ // In LLVM IR the wrong scope will be listed but when DWARF is
+ // generated from it, the DW_TAG_member will be a child the
+ // DW_TAG_variant_part.
+ let containing_scope = enum_or_generator_type_di_node;
+
+ match enum_or_generator_type_and_layout.layout.variants() {
+ // A single-variant enum has no discriminant.
+ &Variants::Single { .. } => None,
+
+ &Variants::Multiple { tag_field, .. } => {
+ let tag_base_type = tag_base_type(cx, enum_or_generator_type_and_layout);
+ let (size, align) = cx.size_and_align_of(tag_base_type);
+
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ tag_name.as_ptr().cast(),
+ tag_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ enum_or_generator_type_and_layout.fields.offset(tag_field).bits(),
+ DIFlags::FlagArtificial,
+ type_di_node(cx, tag_base_type),
+ ))
+ }
+ }
+ }
+}
+
+/// Build the debuginfo node for `DW_TAG_variant`:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// ---> DW_TAG_variant (variant 1)
+/// ---> DW_TAG_variant (variant 2)
+/// ---> DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+///
+/// This node looks like:
+///
+/// ```txt
+/// DW_TAG_variant
+/// DW_AT_discr_value 0
+/// DW_TAG_member
+/// DW_AT_name None
+/// DW_AT_type <0x000002a1>
+/// DW_AT_alignment 0x00000002
+/// DW_AT_data_member_location 0
+/// ```
+///
+/// The DW_AT_discr_value is optional, and is omitted if
+/// - This is the only variant of a univariant enum (i.e. their is no discriminant)
+/// - This is the "dataful" variant of a niche-layout enum
+/// (where only the other variants are identified by a single value)
+///
+/// There is only ever a single member, the type of which is a struct that describes the
+/// fields of the variant (excluding the discriminant). The name of the member is the name
+/// of the variant as given in the source code. The DW_AT_data_member_location is always
+/// zero.
+///
+/// Note that the LLVM DIBuilder API is a bit unintuitive here. The DW_TAG_variant subtree
+/// (including the DW_TAG_member) is built by a single call to
+/// `LLVMRustDIBuilderCreateVariantMemberType()`.
+fn build_enum_variant_member_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ variant_part_di_node: &'ll DIType,
+ variant_member_info: &VariantMemberInfo<'_, 'll>,
+) -> &'ll DIType {
+ let variant_index = variant_member_info.variant_index;
+ let discr_value = super::compute_discriminant_value(cx, enum_type_and_layout, variant_index);
+
+ let (file_di_node, line_number) = variant_member_info
+ .source_info
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariantMemberType(
+ DIB(cx),
+ variant_part_di_node,
+ variant_member_info.variant_name.as_ptr().cast(),
+ variant_member_info.variant_name.len(),
+ file_di_node,
+ line_number,
+ enum_type_and_layout.size.bits(),
+ enum_type_and_layout.align.abi.bits() as u32,
+ Size::ZERO.bits(),
+ discr_value.map(|v| cx.const_u64(v)),
+ DIFlags::FlagZero,
+ variant_member_info.variant_struct_type_di_node,
+ )
+ }
+}
+
+/// Information needed for building a `DW_TAG_variant`:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// ---> DW_TAG_variant (variant 1)
+/// ---> DW_TAG_variant (variant 2)
+/// ---> DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+struct VariantMemberInfo<'a, 'll> {
+ variant_index: VariantIdx,
+ variant_name: Cow<'a, str>,
+ variant_struct_type_di_node: &'ll DIType,
+ source_info: Option<(&'ll DIFile, c_uint)>,
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
new file mode 100644
index 000000000..ce2f419c4
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -0,0 +1,267 @@
+use std::cell::RefCell;
+
+use rustc_data_structures::{
+ fingerprint::Fingerprint,
+ fx::FxHashMap,
+ stable_hasher::{HashStable, StableHasher},
+};
+use rustc_middle::{
+ bug,
+ ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt},
+};
+use rustc_target::abi::{Align, Size, VariantIdx};
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::utils::{create_DIArray, debug_context, DIB},
+ llvm::{
+ self,
+ debuginfo::{DIFlags, DIScope, DIType},
+ },
+};
+
+use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER};
+
+mod private {
+ // This type cannot be constructed outside of this module because
+ // it has a private field. We make use of this in order to prevent
+ // `UniqueTypeId` from being constructed directly, without asserting
+ // the preconditions.
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+ pub struct HiddenZst;
+}
+
+/// A unique identifier for anything that we create a debuginfo node for.
+/// The types it contains are expected to already be normalized (which
+/// is debug_asserted in the constructors).
+///
+/// Note that there are some things that only show up in debuginfo, like
+/// the separate type descriptions for each enum variant. These get an ID
+/// too because they have their own debuginfo node in LLVM IR.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+pub(super) enum UniqueTypeId<'tcx> {
+ /// The ID of a regular type as it shows up at the language level.
+ Ty(Ty<'tcx>, private::HiddenZst),
+ /// The ID for the single DW_TAG_variant_part nested inside the top-level
+ /// DW_TAG_structure_type that describes enums and generators.
+ VariantPart(Ty<'tcx>, private::HiddenZst),
+ /// The ID for the artificial struct type describing a single enum variant.
+ VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst),
+ /// The ID of the artificial type we create for VTables.
+ VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, private::HiddenZst),
+}
+
+impl<'tcx> UniqueTypeId<'tcx> {
+ pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
+ debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
+ UniqueTypeId::Ty(t, private::HiddenZst)
+ }
+
+ pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantPart(enum_ty, private::HiddenZst)
+ }
+
+ pub fn for_enum_variant_struct_type(
+ tcx: TyCtxt<'tcx>,
+ enum_ty: Ty<'tcx>,
+ variant_idx: VariantIdx,
+ ) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
+ }
+
+ pub fn for_vtable_ty(
+ tcx: TyCtxt<'tcx>,
+ self_type: Ty<'tcx>,
+ implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
+ ) -> Self {
+ debug_assert_eq!(
+ self_type,
+ tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
+ );
+ debug_assert_eq!(
+ implemented_trait,
+ tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
+ );
+ UniqueTypeId::VTableTy(self_type, implemented_trait, private::HiddenZst)
+ }
+
+ /// Generates a string version of this [UniqueTypeId], which can be used as the `UniqueId`
+ /// argument of the various `LLVMRustDIBuilderCreate*Type()` methods.
+ ///
+ /// Right now this takes the form of a hex-encoded opaque hash value.
+ pub fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String {
+ let mut hasher = StableHasher::new();
+ tcx.with_stable_hashing_context(|mut hcx| {
+ hcx.while_hashing_spans(false, |hcx| self.hash_stable(hcx, &mut hasher))
+ });
+ hasher.finish::<Fingerprint>().to_hex()
+ }
+
+ pub fn expect_ty(self) -> Ty<'tcx> {
+ match self {
+ UniqueTypeId::Ty(ty, _) => ty,
+ _ => bug!("Expected `UniqueTypeId::Ty` but found `{:?}`", self),
+ }
+ }
+}
+
+/// The `TypeMap` is where the debug context holds the type metadata nodes
+/// created so far. The debuginfo nodes are identified by `UniqueTypeId`.
+#[derive(Default)]
+pub(crate) struct TypeMap<'ll, 'tcx> {
+ pub(super) unique_id_to_di_node: RefCell<FxHashMap<UniqueTypeId<'tcx>, &'ll DIType>>,
+}
+
+impl<'ll, 'tcx> TypeMap<'ll, 'tcx> {
+ /// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
+ /// fail if the mapping already exists.
+ pub(super) fn insert(&self, unique_type_id: UniqueTypeId<'tcx>, metadata: &'ll DIType) {
+ if self.unique_id_to_di_node.borrow_mut().insert(unique_type_id, metadata).is_some() {
+ bug!("type metadata for unique ID '{:?}' is already in the `TypeMap`!", unique_type_id);
+ }
+ }
+
+ pub(super) fn di_node_for_unique_id(
+ &self,
+ unique_type_id: UniqueTypeId<'tcx>,
+ ) -> Option<&'ll DIType> {
+ self.unique_id_to_di_node.borrow().get(&unique_type_id).cloned()
+ }
+}
+
+pub struct DINodeCreationResult<'ll> {
+ pub di_node: &'ll DIType,
+ pub already_stored_in_typemap: bool,
+}
+
+impl<'ll> DINodeCreationResult<'ll> {
+ pub fn new(di_node: &'ll DIType, already_stored_in_typemap: bool) -> Self {
+ DINodeCreationResult { di_node, already_stored_in_typemap }
+ }
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum Stub<'ll> {
+ Struct,
+ Union,
+ VTableTy { vtable_holder: &'ll DIType },
+}
+
+pub struct StubInfo<'ll, 'tcx> {
+ metadata: &'ll DIType,
+ unique_type_id: UniqueTypeId<'tcx>,
+}
+
+impl<'ll, 'tcx> StubInfo<'ll, 'tcx> {
+ pub(super) fn new(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ build: impl FnOnce(&CodegenCx<'ll, 'tcx>, /* unique_type_id_str: */ &str) -> &'ll DIType,
+ ) -> StubInfo<'ll, 'tcx> {
+ let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+ let di_node = build(cx, &unique_type_id_str);
+ StubInfo { metadata: di_node, unique_type_id }
+ }
+}
+
+/// Create a stub debuginfo node onto which fields and nested types can be attached.
+pub(super) fn stub<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ kind: Stub<'ll>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ name: &str,
+ (size, align): (Size, Align),
+ containing_scope: Option<&'ll DIScope>,
+ flags: DIFlags,
+) -> StubInfo<'ll, 'tcx> {
+ let empty_array = create_DIArray(DIB(cx), &[]);
+ let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+
+ let metadata = match kind {
+ Stub::Struct | Stub::VTableTy { .. } => {
+ let vtable_holder = match kind {
+ Stub::VTableTy { vtable_holder } => Some(vtable_holder),
+ _ => None,
+ };
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ flags,
+ None,
+ empty_array,
+ 0,
+ vtable_holder,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ }
+ }
+ Stub::Union => unsafe {
+ llvm::LLVMRustDIBuilderCreateUnionType(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ flags,
+ Some(empty_array),
+ 0,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ },
+ };
+ StubInfo { metadata, unique_type_id }
+}
+
+/// This function enables creating debuginfo nodes that can recursively refer to themselves.
+/// It will first insert the given stub into the type map and only then execute the `members`
+/// and `generics` closures passed in. These closures have access to the stub so they can
+/// directly attach fields to them. If the type of a field transitively refers back
+/// to the type currently being built, the stub will already be found in the type map,
+/// which effectively breaks the recursion cycle.
+pub(super) fn build_type_with_children<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ stub_info: StubInfo<'ll, 'tcx>,
+ members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>,
+ generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>,
+) -> DINodeCreationResult<'ll> {
+ debug_assert_eq!(
+ debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id),
+ None
+ );
+
+ debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata);
+
+ let members: SmallVec<_> =
+ members(cx, stub_info.metadata).into_iter().map(|node| Some(node)).collect();
+ let generics: SmallVec<Option<&'ll DIType>> =
+ generics(cx).into_iter().map(|node| Some(node)).collect();
+
+ if !(members.is_empty() && generics.is_empty()) {
+ unsafe {
+ let members_array = create_DIArray(DIB(cx), &members[..]);
+ let generics_array = create_DIArray(DIB(cx), &generics[..]);
+ llvm::LLVMRustDICompositeTypeReplaceArrays(
+ DIB(cx),
+ stub_info.metadata,
+ Some(members_array),
+ Some(generics_array),
+ );
+ }
+ }
+
+ DINodeCreationResult { di_node: stub_info.metadata, already_stored_in_typemap: true }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
new file mode 100644
index 000000000..cf591295b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -0,0 +1,614 @@
+#![doc = include_str!("doc.md")]
+
+use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
+
+use self::metadata::{file_metadata, type_di_node};
+use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use self::namespace::mangled_name_of_instance;
+use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
+
+use crate::abi::FnAbi;
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+ DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
+ DIVariable,
+};
+use crate::value::Value;
+
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::{DefId, DefIdMap};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitable};
+use rustc_session::config::{self, DebugInfo};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span};
+use rustc_target::abi::Size;
+
+use libc::c_uint;
+use smallvec::SmallVec;
+use std::cell::OnceCell;
+use std::cell::RefCell;
+use std::iter;
+use tracing::debug;
+
+mod create_scope_map;
+pub mod gdb;
+pub mod metadata;
+mod namespace;
+mod utils;
+
+pub use self::create_scope_map::compute_mir_scopes;
+pub use self::metadata::build_global_var_di_node;
+pub use self::metadata::extend_scope_to_file;
+
+#[allow(non_upper_case_globals)]
+const DW_TAG_auto_variable: c_uint = 0x100;
+#[allow(non_upper_case_globals)]
+const DW_TAG_arg_variable: c_uint = 0x101;
+
+/// A context object for maintaining all state needed by the debuginfo module.
+pub struct CodegenUnitDebugContext<'ll, 'tcx> {
+ llcontext: &'ll llvm::Context,
+ llmod: &'ll llvm::Module,
+ builder: &'ll mut DIBuilder<'ll>,
+ created_files: RefCell<FxHashMap<Option<(u128, SourceFileHash)>, &'ll DIFile>>,
+
+ type_map: metadata::TypeMap<'ll, 'tcx>,
+ namespace_map: RefCell<DefIdMap<&'ll DIScope>>,
+ recursion_marker_type: OnceCell<&'ll DIType>,
+}
+
+impl Drop for CodegenUnitDebugContext<'_, '_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _));
+ }
+ }
+}
+
+impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
+ pub fn new(llmod: &'ll llvm::Module) -> Self {
+ debug!("CodegenUnitDebugContext::new");
+ let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
+ // DIBuilder inherits context from the module, so we'd better use the same one
+ let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
+ CodegenUnitDebugContext {
+ llcontext,
+ llmod,
+ builder,
+ created_files: Default::default(),
+ type_map: Default::default(),
+ namespace_map: RefCell::new(Default::default()),
+ recursion_marker_type: OnceCell::new(),
+ }
+ }
+
+ pub fn finalize(&self, sess: &Session) {
+ unsafe {
+ llvm::LLVMRustDIBuilderFinalize(self.builder);
+
+ if !sess.target.is_like_msvc {
+ // Debuginfo generation in LLVM by default uses a higher
+ // version of dwarf than macOS currently understands. We can
+ // instruct LLVM to emit an older version of dwarf, however,
+ // for macOS to understand. For more info see #11352
+ // This can be overridden using --llvm-opts -dwarf-version,N.
+ // Android has the same issue (#22398)
+ let dwarf_version = sess
+ .opts
+ .unstable_opts
+ .dwarf_version
+ .unwrap_or(sess.target.default_dwarf_version);
+ llvm::LLVMRustAddModuleFlag(
+ self.llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "Dwarf Version\0".as_ptr().cast(),
+ dwarf_version,
+ );
+ } else {
+ // Indicate that we want CodeView debug information on MSVC
+ llvm::LLVMRustAddModuleFlag(
+ self.llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "CodeView\0".as_ptr().cast(),
+ 1,
+ )
+ }
+
+ // Prevent bitcode readers from deleting the debug info.
+ let ptr = "Debug Info Version\0".as_ptr();
+ llvm::LLVMRustAddModuleFlag(
+ self.llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ ptr.cast(),
+ llvm::LLVMRustDebugMetadataVersion(),
+ );
+ }
+ }
+}
+
+/// Creates any deferred debug metadata nodes
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+ if let Some(dbg_cx) = &cx.dbg_cx {
+ debug!("finalize");
+
+ if gdb::needs_gdb_debug_scripts_section(cx) {
+ // Add a .debug_gdb_scripts section to this compile-unit. This will
+ // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
+ // which activates the Rust pretty printers for binary this section is
+ // contained in.
+ gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
+ }
+
+ dbg_cx.finalize(cx.sess());
+ }
+}
+
+impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(
+ &mut self,
+ dbg_var: &'ll DIVariable,
+ dbg_loc: &'ll DILocation,
+ variable_alloca: Self::Value,
+ direct_offset: Size,
+ indirect_offsets: &[Size],
+ ) {
+ // Convert the direct and indirect offsets to address ops.
+ // FIXME(eddyb) use `const`s instead of getting the values via FFI,
+ // the values should match the ones in the DWARF standard anyway.
+ let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
+ let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
+ let mut addr_ops = SmallVec::<[u64; 8]>::new();
+
+ if direct_offset.bytes() > 0 {
+ addr_ops.push(op_plus_uconst());
+ addr_ops.push(direct_offset.bytes() as u64);
+ }
+ for &offset in indirect_offsets {
+ addr_ops.push(op_deref());
+ if offset.bytes() > 0 {
+ addr_ops.push(op_plus_uconst());
+ addr_ops.push(offset.bytes() as u64);
+ }
+ }
+
+ unsafe {
+ // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
+ llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
+ DIB(self.cx()),
+ variable_alloca,
+ dbg_var,
+ addr_ops.as_ptr(),
+ addr_ops.len() as c_uint,
+ dbg_loc,
+ self.llbb(),
+ );
+ }
+ }
+
+ fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
+ unsafe {
+ let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
+ llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
+ }
+ }
+
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+ gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
+ }
+
+ fn set_var_name(&mut self, value: &'ll Value, name: &str) {
+ // Avoid wasting time if LLVM value names aren't even enabled.
+ if self.sess().fewer_names() {
+ return;
+ }
+
+ // Only function parameters and instructions are local to a function,
+ // don't change the name of anything else (e.g. globals).
+ let param_or_inst = unsafe {
+ llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
+ };
+ if !param_or_inst {
+ return;
+ }
+
+ // Avoid replacing the name if it already exists.
+ // While we could combine the names somehow, it'd
+ // get noisy quick, and the usefulness is dubious.
+ if llvm::get_value_name(value).is_empty() {
+ llvm::set_value_name(value, name.as_bytes());
+ }
+ }
+}
+
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+ /// Information about the original source file.
+ pub file: Lrc<SourceFile>,
+ /// The (1-based) line number.
+ pub line: u32,
+ /// The (1-based) column number.
+ pub col: u32,
+}
+
+impl CodegenCx<'_, '_> {
+ /// Looks up debug source information about a `BytePos`.
+ // FIXME(eddyb) rename this to better indicate it's a duplicate of
+ // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+ // `lookup_char_pos` return the right information instead.
+ pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+ let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(pos);
+
+ // Use 1-based indexing.
+ let line = (line + 1) as u32;
+ let col = (pos - line_pos).to_u32() + 1;
+
+ (file, line, col)
+ }
+ Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
+ };
+
+ // For MSVC, omit the column number.
+ // Otherwise, emit it. This mimics clang behaviour.
+ // See discussion in https://github.com/rust-lang/rust/issues/42921
+ if self.sess().target.is_like_msvc {
+ DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
+ } else {
+ DebugLoc { file, line, col }
+ }
+ }
+}
+
+impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn create_function_debug_context(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ llfn: &'ll Value,
+ mir: &mir::Body<'tcx>,
+ ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+ if self.sess().opts.debuginfo == DebugInfo::None {
+ return None;
+ }
+
+ // Initialize fn debug context (including scopes).
+ let empty_scope = DebugScope {
+ dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+ inlined_at: None,
+ file_start_pos: BytePos(0),
+ file_end_pos: BytePos(0),
+ };
+ let mut fn_debug_context =
+ FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+
+ // Fill in all the scopes, with the information from the MIR body.
+ compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
+
+ Some(fn_debug_context)
+ }
+
+ fn dbg_scope_fn(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ maybe_definition_llfn: Option<&'ll Value>,
+ ) -> &'ll DIScope {
+ let tcx = self.tcx;
+
+ let def_id = instance.def_id();
+ let containing_scope = get_containing_scope(self, instance);
+ let span = tcx.def_span(def_id);
+ let loc = self.lookup_debug_loc(span.lo());
+ let file_metadata = file_metadata(self, &loc.file);
+
+ let function_type_metadata = unsafe {
+ let fn_signature = get_function_signature(self, fn_abi);
+ llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
+ };
+
+ let mut name = String::new();
+ type_names::push_item_name(tcx, def_id, false, &mut name);
+
+ // Find the enclosing function, in case this is a closure.
+ let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
+
+ // We look up the generics of the enclosing function and truncate the substs
+ // to their length in order to cut off extra stuff that might be in there for
+ // closures or generators.
+ let generics = tcx.generics_of(enclosing_fn_def_id);
+ let substs = instance.substs.truncate_to(tcx, generics);
+
+ type_names::push_generic_params(
+ tcx,
+ tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs),
+ &mut name,
+ );
+
+ let template_parameters = get_template_parameters(self, generics, substs);
+
+ let linkage_name = &mangled_name_of_instance(self, instance).name;
+ // Omit the linkage_name if it is the same as subprogram name.
+ let linkage_name = if &name == linkage_name { "" } else { linkage_name };
+
+ // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
+ let scope_line = loc.line;
+
+ let mut flags = DIFlags::FlagPrototyped;
+
+ if fn_abi.ret.layout.abi.is_uninhabited() {
+ flags |= DIFlags::FlagNoReturn;
+ }
+
+ let mut spflags = DISPFlags::SPFlagDefinition;
+ if is_node_local_to_unit(self, def_id) {
+ spflags |= DISPFlags::SPFlagLocalToUnit;
+ }
+ if self.sess().opts.optimize != config::OptLevel::No {
+ spflags |= DISPFlags::SPFlagOptimized;
+ }
+ if let Some((id, _)) = tcx.entry_fn(()) {
+ if id == def_id {
+ spflags |= DISPFlags::SPFlagMainSubprogram;
+ }
+ }
+
+ unsafe {
+ return llvm::LLVMRustDIBuilderCreateFunction(
+ DIB(self),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ file_metadata,
+ loc.line,
+ function_type_metadata,
+ scope_line,
+ flags,
+ spflags,
+ maybe_definition_llfn,
+ template_parameters,
+ None,
+ );
+ }
+
+ fn get_function_signature<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ ) -> &'ll DIArray {
+ if cx.sess().opts.debuginfo == DebugInfo::Limited {
+ return create_DIArray(DIB(cx), &[]);
+ }
+
+ let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
+
+ // Return type -- llvm::DIBuilder wants this at index 0
+ signature.push(if fn_abi.ret.is_ignore() {
+ None
+ } else {
+ Some(type_di_node(cx, fn_abi.ret.layout.ty))
+ });
+
+ // Arguments types
+ if cx.sess().target.is_like_msvc {
+ // FIXME(#42800):
+ // There is a bug in MSDIA that leads to a crash when it encounters
+ // a fixed-size array of `u8` or something zero-sized in a
+ // function-type (see #40477).
+ // As a workaround, we replace those fixed-size arrays with a
+ // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
+ // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
+ // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
+ // This transformed type is wrong, but these function types are
+ // already inaccurate due to ABI adjustments (see #42800).
+ signature.extend(fn_abi.args.iter().map(|arg| {
+ let t = arg.layout.ty;
+ let t = match t.kind() {
+ ty::Array(ct, _)
+ if (*ct == cx.tcx.types.u8) || cx.layout_of(*ct).is_zst() =>
+ {
+ cx.tcx.mk_imm_ptr(*ct)
+ }
+ _ => t,
+ };
+ Some(type_di_node(cx, t))
+ }));
+ } else {
+ signature
+ .extend(fn_abi.args.iter().map(|arg| Some(type_di_node(cx, arg.layout.ty))));
+ }
+
+ create_DIArray(DIB(cx), &signature[..])
+ }
+
+ fn get_template_parameters<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ generics: &ty::Generics,
+ substs: SubstsRef<'tcx>,
+ ) -> &'ll DIArray {
+ if substs.types().next().is_none() {
+ return create_DIArray(DIB(cx), &[]);
+ }
+
+ // Again, only create type information if full debuginfo is enabled
+ let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
+ let names = get_parameter_names(cx, generics);
+ iter::zip(substs, names)
+ .filter_map(|(kind, name)| {
+ if let GenericArgKind::Type(ty) = kind.unpack() {
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+ let actual_type_metadata = type_di_node(cx, actual_type);
+ let name = name.as_str();
+ Some(unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ DIB(cx),
+ None,
+ name.as_ptr().cast(),
+ name.len(),
+ actual_type_metadata,
+ ))
+ })
+ } else {
+ None
+ }
+ })
+ .collect()
+ } else {
+ vec![]
+ };
+
+ create_DIArray(DIB(cx), &template_params)
+ }
+
+ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+ let mut names = generics.parent.map_or_else(Vec::new, |def_id| {
+ get_parameter_names(cx, cx.tcx.generics_of(def_id))
+ });
+ names.extend(generics.params.iter().map(|param| param.name));
+ names
+ }
+
+ fn get_containing_scope<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ ) -> &'ll DIScope {
+ // First, let's see if this is a method within an inherent impl. Because
+ // if yes, we want to make the result subroutine DIE a child of the
+ // subroutine's self-type.
+ let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
+ // If the method does *not* belong to a trait, proceed
+ if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
+ let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ cx.tcx.type_of(impl_def_id),
+ );
+
+ // Only "class" methods are generally understood by LLVM,
+ // so avoid methods on other types (e.g., `<*mut T>::null`).
+ match impl_self_ty.kind() {
+ ty::Adt(def, ..) if !def.is_box() => {
+ // Again, only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo == DebugInfo::Full
+ && !impl_self_ty.needs_subst()
+ {
+ Some(type_di_node(cx, impl_self_ty))
+ } else {
+ Some(namespace::item_namespace(cx, def.did()))
+ }
+ }
+ _ => None,
+ }
+ } else {
+ // For trait method impls we still use the "parallel namespace"
+ // strategy
+ None
+ }
+ });
+
+ self_type.unwrap_or_else(|| {
+ namespace::item_namespace(
+ cx,
+ DefId {
+ krate: instance.def_id().krate,
+ index: cx
+ .tcx
+ .def_key(instance.def_id())
+ .parent
+ .expect("get_containing_scope: missing parent?"),
+ },
+ )
+ })
+ }
+ }
+
+ fn dbg_loc(
+ &self,
+ scope: &'ll DIScope,
+ inlined_at: Option<&'ll DILocation>,
+ span: Span,
+ ) -> &'ll DILocation {
+ let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
+
+ unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation(line, col, scope, inlined_at) }
+ }
+
+ fn create_vtable_debuginfo(
+ &self,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ vtable: Self::Value,
+ ) {
+ metadata::create_vtable_di_node(self, ty, trait_ref, vtable)
+ }
+
+ fn extend_scope_to_file(
+ &self,
+ scope_metadata: &'ll DIScope,
+ file: &rustc_span::SourceFile,
+ ) -> &'ll DILexicalBlock {
+ metadata::extend_scope_to_file(self, scope_metadata, file)
+ }
+
+ fn debuginfo_finalize(&self) {
+ finalize(self)
+ }
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn create_dbg_var(
+ &self,
+ variable_name: Symbol,
+ variable_type: Ty<'tcx>,
+ scope_metadata: &'ll DIScope,
+ variable_kind: VariableKind,
+ span: Span,
+ ) -> &'ll DIVariable {
+ let loc = self.lookup_debug_loc(span.lo());
+ let file_metadata = file_metadata(self, &loc.file);
+
+ let type_metadata = type_di_node(self, variable_type);
+
+ let (argument_index, dwarf_tag) = match variable_kind {
+ ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
+ LocalVariable => (0, DW_TAG_auto_variable),
+ };
+ let align = self.align_of(variable_type);
+
+ let name = variable_name.as_str();
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariable(
+ DIB(self),
+ dwarf_tag,
+ scope_metadata,
+ name.as_ptr().cast(),
+ name.len(),
+ file_metadata,
+ loc.line,
+ type_metadata,
+ true,
+ DIFlags::FlagZero,
+ argument_index,
+ align.bytes() as u32,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
new file mode 100644
index 000000000..d5ea48c31
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -0,0 +1,48 @@
+// Namespace Handling.
+
+use super::utils::{debug_context, DIB};
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_middle::ty::{self, Instance};
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::DIScope;
+use rustc_hir::def_id::DefId;
+
+pub fn mangled_name_of_instance<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ instance: Instance<'tcx>,
+) -> ty::SymbolName<'tcx> {
+ let tcx = cx.tcx;
+ tcx.symbol_name(instance)
+}
+
+pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+ if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) {
+ return scope;
+ }
+
+ let def_key = cx.tcx.def_key(def_id);
+ let parent_scope = def_key
+ .parent
+ .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
+
+ let namespace_name_string = {
+ let mut output = String::new();
+ type_names::push_item_name(cx.tcx, def_id, false, &mut output);
+ output
+ };
+
+ let scope = unsafe {
+ llvm::LLVMRustDIBuilderCreateNameSpace(
+ DIB(cx),
+ parent_scope,
+ namespace_name_string.as_ptr().cast(),
+ namespace_name_string.len(),
+ false, // ExportSymbols (only relevant for C++ anonymous namespaces)
+ )
+ };
+
+ debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope);
+ scope
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
new file mode 100644
index 000000000..8f2436739
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -0,0 +1,99 @@
+// Utility Functions.
+
+use super::namespace::item_namespace;
+use super::CodegenUnitDebugContext;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
+use rustc_middle::ty::{self, DefIdTree, Ty};
+use tracing::trace;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope};
+
+pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool {
+ // The is_local_to_unit flag indicates whether a function is local to the
+ // current compilation unit (i.e., if it is *static* in the C-sense). The
+ // *reachable* set should provide a good approximation of this, as it
+ // contains everything that might leak out of the current crate (by being
+ // externally visible or by being inlined into something externally
+ // visible). It might better to use the `exported_items` set from
+ // `driver::CrateAnalysis` in the future, but (atm) this set is not
+ // available in the codegen pass.
+ !cx.tcx.is_reachable_non_generic(def_id)
+}
+
+#[allow(non_snake_case)]
+pub fn create_DIArray<'ll>(
+ builder: &DIBuilder<'ll>,
+ arr: &[Option<&'ll DIDescriptor>],
+) -> &'ll DIArray {
+ unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
+}
+
+#[inline]
+pub fn debug_context<'a, 'll, 'tcx>(
+ cx: &'a CodegenCx<'ll, 'tcx>,
+) -> &'a CodegenUnitDebugContext<'ll, 'tcx> {
+ cx.dbg_cx.as_ref().unwrap()
+}
+
+#[inline]
+#[allow(non_snake_case)]
+pub fn DIB<'a, 'll>(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> {
+ cx.dbg_cx.as_ref().unwrap().builder
+}
+
+pub fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+ item_namespace(cx, cx.tcx.parent(def_id))
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(crate) enum FatPtrKind {
+ Slice,
+ Dyn,
+}
+
+/// Determines if `pointee_ty` is slice-like or trait-object-like, i.e.
+/// if the second field of the fat pointer is a length or a vtable-pointer.
+/// If `pointee_ty` does not require a fat pointer (because it is Sized) then
+/// the function returns `None`.
+pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ pointee_ty: Ty<'tcx>,
+) -> Option<FatPtrKind> {
+ let pointee_tail_ty = cx.tcx.struct_tail_erasing_lifetimes(pointee_ty, cx.param_env());
+ let layout = cx.layout_of(pointee_tail_ty);
+ trace!(
+ "fat_pointer_kind: {:?} has layout {:?} (is_unsized? {})",
+ pointee_tail_ty,
+ layout,
+ layout.is_unsized()
+ );
+
+ if !layout.is_unsized() {
+ return None;
+ }
+
+ match *pointee_tail_ty.kind() {
+ ty::Str | ty::Slice(_) => Some(FatPtrKind::Slice),
+ ty::Dynamic(..) => Some(FatPtrKind::Dyn),
+ ty::Foreign(_) => {
+ // Assert that pointers to foreign types really are thin:
+ debug_assert_eq!(
+ cx.size_of(cx.tcx.mk_imm_ptr(pointee_tail_ty)),
+ cx.size_of(cx.tcx.mk_imm_ptr(cx.tcx.types.u8))
+ );
+ None
+ }
+ _ => {
+ // For all other pointee types we should already have returned None
+ // at the beginning of the function.
+ panic!(
+ "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {:?}",
+ pointee_tail_ty
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
new file mode 100644
index 000000000..fa0ecd18f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -0,0 +1,146 @@
+//! Declare various LLVM values.
+//!
+//! Prefer using functions and methods from this module rather than calling LLVM
+//! functions directly. These functions do some additional work to ensure we do
+//! the right thing given the preconceptions of codegen.
+//!
+//! Some useful guidelines:
+//!
+//! * Use declare_* family of methods if you are declaring, but are not
+//! interested in defining the Value they return.
+//! * Use define_* family of methods when you might be defining the Value.
+//! * When in doubt, define.
+
+use crate::abi::{FnAbi, FnAbiLlvmExt};
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::AttributePlace::Function;
+use crate::type_::Type;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::TypeMembershipMethods;
+use rustc_middle::ty::Ty;
+use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use smallvec::SmallVec;
+use tracing::debug;
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ name: &str,
+ callconv: llvm::CallConv,
+ unnamed: llvm::UnnamedAddr,
+ ty: &'ll Type,
+) -> &'ll Value {
+ debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
+ let llfn = unsafe {
+ llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
+ };
+
+ llvm::SetFunctionCallConv(llfn, callconv);
+ llvm::SetUnnamedAddress(llfn, unnamed);
+
+ let mut attrs = SmallVec::<[_; 4]>::new();
+
+ if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
+ attrs.push(llvm::AttributeKind::NoRedZone.create_attr(cx.llcx));
+ }
+
+ attrs.extend(attributes::non_lazy_bind_attr(cx));
+
+ attributes::apply_to_llfn(llfn, Function, &attrs);
+
+ llfn
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+ /// Declare a global value.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// return its Value instead.
+ pub fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
+ debug!("declare_global(name={:?})", name);
+ unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
+ }
+
+ /// Declare a C ABI function.
+ ///
+ /// Only use this for foreign function ABIs and glue. For Rust functions use
+ /// `declare_fn` instead.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// update the declaration and return existing Value instead.
+ pub fn declare_cfn(
+ &self,
+ name: &str,
+ unnamed: llvm::UnnamedAddr,
+ fn_type: &'ll Type,
+ ) -> &'ll Value {
+ declare_raw_fn(self, name, llvm::CCallConv, unnamed, fn_type)
+ }
+
+ /// Declare a Rust function.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// update the declaration and return existing Value instead.
+ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
+ debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
+
+ // Function addresses in Rust are never significant, allowing functions to
+ // be merged.
+ let llfn = declare_raw_fn(
+ self,
+ name,
+ fn_abi.llvm_cconv(),
+ llvm::UnnamedAddr::Global,
+ fn_abi.llvm_type(self),
+ );
+ fn_abi.apply_attrs_llfn(self, llfn);
+
+ if self.tcx.sess.is_sanitizer_cfi_enabled() {
+ let typeid = typeid_for_fnabi(self.tcx, fn_abi);
+ self.set_type_metadata(llfn, typeid);
+ }
+
+ llfn
+ }
+
+ /// Declare a global with an intention to define it.
+ ///
+ /// Use this function when you intend to define a global. This function will
+ /// return `None` if the name already has a definition associated with it. In that
+ /// case an error should be reported to the user, because it usually happens due
+ /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes).
+ pub fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
+ if self.get_defined_value(name).is_some() {
+ None
+ } else {
+ Some(self.declare_global(name, ty))
+ }
+ }
+
+ /// Declare a private global
+ ///
+ /// Use this function when you intend to define a global without a name.
+ pub fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
+ }
+
+ /// Gets declared value by name.
+ pub fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
+ debug!("get_declared_value(name={:?})", name);
+ unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
+ }
+
+ /// Gets defined or externally defined (AvailableExternally linkage) value by
+ /// name.
+ pub fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
+ self.get_declared_value(name).and_then(|val| {
+ let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
+ if !declaration { Some(val) } else { None }
+ })
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
new file mode 100644
index 000000000..9f3647492
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -0,0 +1,1924 @@
+use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::va_arg::emit_va_arg;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
+use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir as hir;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
+use rustc_middle::ty::{self, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_span::{sym, symbol::kw, Span, Symbol};
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
+use rustc_target::spec::{HasTargetSpec, PanicStrategy};
+
+use std::cmp::Ordering;
+use std::iter;
+
+fn get_simple_intrinsic<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ name: Symbol,
+) -> Option<(&'ll Type, &'ll Value)> {
+ let llvm_name = match name {
+ sym::sqrtf32 => "llvm.sqrt.f32",
+ sym::sqrtf64 => "llvm.sqrt.f64",
+ sym::powif32 => "llvm.powi.f32",
+ sym::powif64 => "llvm.powi.f64",
+ sym::sinf32 => "llvm.sin.f32",
+ sym::sinf64 => "llvm.sin.f64",
+ sym::cosf32 => "llvm.cos.f32",
+ sym::cosf64 => "llvm.cos.f64",
+ sym::powf32 => "llvm.pow.f32",
+ sym::powf64 => "llvm.pow.f64",
+ sym::expf32 => "llvm.exp.f32",
+ sym::expf64 => "llvm.exp.f64",
+ sym::exp2f32 => "llvm.exp2.f32",
+ sym::exp2f64 => "llvm.exp2.f64",
+ sym::logf32 => "llvm.log.f32",
+ sym::logf64 => "llvm.log.f64",
+ sym::log10f32 => "llvm.log10.f32",
+ sym::log10f64 => "llvm.log10.f64",
+ sym::log2f32 => "llvm.log2.f32",
+ sym::log2f64 => "llvm.log2.f64",
+ sym::fmaf32 => "llvm.fma.f32",
+ sym::fmaf64 => "llvm.fma.f64",
+ sym::fabsf32 => "llvm.fabs.f32",
+ sym::fabsf64 => "llvm.fabs.f64",
+ sym::minnumf32 => "llvm.minnum.f32",
+ sym::minnumf64 => "llvm.minnum.f64",
+ sym::maxnumf32 => "llvm.maxnum.f32",
+ sym::maxnumf64 => "llvm.maxnum.f64",
+ sym::copysignf32 => "llvm.copysign.f32",
+ sym::copysignf64 => "llvm.copysign.f64",
+ sym::floorf32 => "llvm.floor.f32",
+ sym::floorf64 => "llvm.floor.f64",
+ sym::ceilf32 => "llvm.ceil.f32",
+ sym::ceilf64 => "llvm.ceil.f64",
+ sym::truncf32 => "llvm.trunc.f32",
+ sym::truncf64 => "llvm.trunc.f64",
+ sym::rintf32 => "llvm.rint.f32",
+ sym::rintf64 => "llvm.rint.f64",
+ sym::nearbyintf32 => "llvm.nearbyint.f32",
+ sym::nearbyintf64 => "llvm.nearbyint.f64",
+ sym::roundf32 => "llvm.round.f32",
+ sym::roundf64 => "llvm.round.f64",
+ _ => return None,
+ };
+ Some(cx.get_intrinsic(llvm_name))
+}
+
+impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+ fn codegen_intrinsic_call(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ llresult: &'ll Value,
+ span: Span,
+ ) {
+ let tcx = self.tcx;
+ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+ let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ bug!("expected fn item type, found {}", callee_ty);
+ };
+
+ let sig = callee_ty.fn_sig(tcx);
+ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = tcx.item_name(def_id);
+
+ let llret_ty = self.layout_of(ret_ty).llvm_type(self);
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let simple = get_simple_intrinsic(self, name);
+ let llval = match name {
+ _ if simple.is_some() => {
+ let (simple_ty, simple_fn) = simple.unwrap();
+ self.call(
+ simple_ty,
+ simple_fn,
+ &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+ None,
+ )
+ }
+ sym::likely => {
+ self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
+ }
+ sym::unlikely => self
+ .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
+ kw::Try => {
+ try_intrinsic(
+ self,
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ llresult,
+ );
+ return;
+ }
+ sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
+ sym::va_copy => {
+ self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
+ }
+ sym::va_arg => {
+ match fn_abi.ret.layout.abi {
+ abi::Abi::Scalar(scalar) => {
+ match scalar.primitive() {
+ Primitive::Int(..) => {
+ if self.cx().size_of(ret_ty).bytes() < 4 {
+ // `va_arg` should not be called on an integer type
+ // less than 4 bytes in length. If it is, promote
+ // the integer to an `i32` and truncate the result
+ // back to the smaller type.
+ let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
+ self.trunc(promoted_result, llret_ty)
+ } else {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ }
+ Primitive::F64 | Primitive::Pointer => {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ // `va_arg` should never be used with the return type f32.
+ Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
+ }
+ }
+ _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ let tp_ty = substs.type_at(0);
+ let ptr = args[0].immediate();
+ let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let llty = ty.llvm_type(self);
+ let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
+ self.volatile_load(llty, ptr)
+ } else {
+ self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
+ };
+ let align = if name == sym::unaligned_volatile_load {
+ 1
+ } else {
+ self.align_of(tp_ty).bytes() as u32
+ };
+ unsafe {
+ llvm::LLVMSetAlignment(load, align);
+ }
+ self.to_immediate(load, self.layout_of(tp_ty))
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.volatile_store(self, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.unaligned_volatile_store(self, dst);
+ return;
+ }
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => {
+ let (rw, cache_type) = match name {
+ sym::prefetch_read_data => (0, 1),
+ sym::prefetch_write_data => (1, 1),
+ sym::prefetch_read_instruction => (0, 0),
+ sym::prefetch_write_instruction => (1, 0),
+ _ => bug!(),
+ };
+ self.call_intrinsic(
+ "llvm.prefetch",
+ &[
+ args[0].immediate(),
+ self.const_i32(rw),
+ args[1].immediate(),
+ self.const_i32(cache_type),
+ ],
+ )
+ }
+ sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctpop
+ | sym::bswap
+ | sym::bitreverse
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::saturating_add
+ | sym::saturating_sub => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, self) {
+ Some((width, signed)) => match name {
+ sym::ctlz | sym::cttz => {
+ let y = self.const_bool(false);
+ self.call_intrinsic(
+ &format!("llvm.{}.i{}", name, width),
+ &[args[0].immediate(), y],
+ )
+ }
+ sym::ctlz_nonzero => {
+ let y = self.const_bool(true);
+ let llvm_name = &format!("llvm.ctlz.i{}", width);
+ self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+ }
+ sym::cttz_nonzero => {
+ let y = self.const_bool(true);
+ let llvm_name = &format!("llvm.cttz.i{}", width);
+ self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+ }
+ sym::ctpop => self.call_intrinsic(
+ &format!("llvm.ctpop.i{}", width),
+ &[args[0].immediate()],
+ ),
+ sym::bswap => {
+ if width == 8 {
+ args[0].immediate() // byte swap a u8/i8 is just a no-op
+ } else {
+ self.call_intrinsic(
+ &format!("llvm.bswap.i{}", width),
+ &[args[0].immediate()],
+ )
+ }
+ }
+ sym::bitreverse => self.call_intrinsic(
+ &format!("llvm.bitreverse.i{}", width),
+ &[args[0].immediate()],
+ ),
+ sym::rotate_left | sym::rotate_right => {
+ let is_left = name == sym::rotate_left;
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ // rotate = funnel shift with first two args the same
+ let llvm_name =
+ &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
+ self.call_intrinsic(llvm_name, &[val, val, raw_shift])
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ let is_add = name == sym::saturating_add;
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let llvm_name = &format!(
+ "llvm.{}{}.sat.i{}",
+ if signed { 's' } else { 'u' },
+ if is_add { "add" } else { "sub" },
+ width
+ );
+ self.call_intrinsic(llvm_name, &[lhs, rhs])
+ }
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ tcx.sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::raw_eq => {
+ use abi::Abi::*;
+ let tp_ty = substs.type_at(0);
+ let layout = self.layout_of(tp_ty).layout;
+ let use_integer_compare = match layout.abi() {
+ Scalar(_) | ScalarPair(_, _) => true,
+ Uninhabited | Vector { .. } => false,
+ Aggregate { .. } => {
+ // For rusty ABIs, small aggregates are actually passed
+ // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+ // so we re-use that same threshold here.
+ layout.size() <= self.data_layout().pointer_size * 2
+ }
+ };
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if layout.size().bytes() == 0 {
+ self.const_bool(true)
+ } else if use_integer_compare {
+ let integer_ty = self.type_ix(layout.size().bits());
+ let ptr_ty = self.type_ptr_to(integer_ty);
+ let a_ptr = self.bitcast(a, ptr_ty);
+ let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
+ let b_ptr = self.bitcast(b, ptr_ty);
+ let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
+ self.icmp(IntPredicate::IntEQ, a_val, b_val)
+ } else {
+ let i8p_ty = self.type_i8p();
+ let a_ptr = self.bitcast(a, i8p_ty);
+ let b_ptr = self.bitcast(b, i8p_ty);
+ let n = self.const_usize(layout.size().bytes());
+ let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
+ match self.cx.sess().target.arch.as_ref() {
+ "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
+ _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
+ }
+ }
+ }
+
+ sym::black_box => {
+ args[0].val.store(self, result);
+
+ // We need to "use" the argument in some way LLVM can't introspect, and on
+ // targets that support it we can typically leverage inline assembly to do
+ // this. LLVM's interpretation of inline assembly is that it's, well, a black
+ // box. This isn't the greatest implementation since it probably deoptimizes
+ // more than we want, but it's so far good enough.
+ crate::asm::inline_asm_call(
+ self,
+ "",
+ "r,~{memory}",
+ &[result.llval],
+ self.type_void(),
+ true,
+ false,
+ llvm::AsmDialect::Att,
+ &[span],
+ false,
+ None,
+ )
+ .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
+
+ // We have copied the value to `result` already.
+ return;
+ }
+
+ _ if name.as_str().starts_with("simd_") => {
+ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ Ok(llval) => llval,
+ Err(()) => return,
+ }
+ }
+
+ _ => bug!("unknown intrinsic '{}'", name),
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
+ let ptr = self.pointercast(result.llval, ptr_llty);
+ self.store(llval, ptr, result.align);
+ } else {
+ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+ .val
+ .store(self, result);
+ }
+ }
+ }
+
+ fn abort(&mut self) {
+ self.call_intrinsic("llvm.trap", &[]);
+ }
+
+ fn assume(&mut self, val: Self::Value) {
+ self.call_intrinsic("llvm.assume", &[val]);
+ }
+
+ fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
+ self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
+ }
+
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
+ // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
+ // optimization pass replaces calls to this intrinsic with code to test type membership.
+ let i8p_ty = self.type_i8p();
+ let bitcast = self.bitcast(pointer, i8p_ty);
+ self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
+ }
+
+ fn type_checked_load(
+ &mut self,
+ llvtable: &'ll Value,
+ vtable_byte_offset: u64,
+ typeid: &'ll Value,
+ ) -> Self::Value {
+ let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
+ self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid])
+ }
+
+ fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
+ self.call_intrinsic("llvm.va_start", &[va_list])
+ }
+
+ fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
+ self.call_intrinsic("llvm.va_end", &[va_list])
+ }
+}
+
+fn try_intrinsic<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ if bx.sess().panic_strategy() == PanicStrategy::Abort {
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.call(try_func_ty, try_func, &[data], None);
+ // Return 0 unconditionally from the intrinsic call;
+ // we can never unwind.
+ let ret_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(bx.const_i32(0), dest, ret_align);
+ } else if wants_msvc_seh(bx.sess()) {
+ codegen_msvc_try(bx, try_func, data, catch_func, dest);
+ } else if bx.sess().target.os == "emscripten" {
+ codegen_emcc_try(bx, try_func, data, catch_func, dest);
+ } else {
+ codegen_gnu_try(bx, try_func, data, catch_func, dest);
+ }
+}
+
+// MSVC's definition of the `rust_try` function.
+//
+// This implementation uses the new exception handling instructions in LLVM
+// which have support in LLVM for SEH on MSVC targets. Although these
+// instructions are meant to work for all targets, as of the time of this
+// writing, however, LLVM does not recommend the usage of these new instructions
+// as the old ones are still more optimized.
+fn codegen_msvc_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ bx.set_personality_fn(bx.eh_personality());
+
+ let normal = bx.append_sibling_block("normal");
+ let catchswitch = bx.append_sibling_block("catchswitch");
+ let catchpad_rust = bx.append_sibling_block("catchpad_rust");
+ let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
+ let caught = bx.append_sibling_block("caught");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+
+ // We're generating an IR snippet that looks like:
+ //
+ // declare i32 @rust_try(%try_func, %data, %catch_func) {
+ // %slot = alloca i8*
+ // invoke %try_func(%data) to label %normal unwind label %catchswitch
+ //
+ // normal:
+ // ret i32 0
+ //
+ // catchswitch:
+ // %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
+ //
+ // catchpad_rust:
+ // %tok = catchpad within %cs [%type_descriptor, 8, %slot]
+ // %ptr = load %slot
+ // call %catch_func(%data, %ptr)
+ // catchret from %tok to label %caught
+ //
+ // catchpad_foreign:
+ // %tok = catchpad within %cs [null, 64, null]
+ // call %catch_func(%data, null)
+ // catchret from %tok to label %caught
+ //
+ // caught:
+ // ret i32 1
+ // }
+ //
+ // This structure follows the basic usage of throw/try/catch in LLVM.
+ // For example, compile this C++ snippet to see what LLVM generates:
+ //
+ // struct rust_panic {
+ // rust_panic(const rust_panic&);
+ // ~rust_panic();
+ //
+ // void* x[2];
+ // };
+ //
+ // int __rust_try(
+ // void (*try_func)(void*),
+ // void *data,
+ // void (*catch_func)(void*, void*) noexcept
+ // ) {
+ // try {
+ // try_func(data);
+ // return 0;
+ // } catch(rust_panic& a) {
+ // catch_func(data, &a);
+ // return 1;
+ // } catch(...) {
+ // catch_func(data, NULL);
+ // return 1;
+ // }
+ // }
+ //
+ // More information can be found in libstd's seh.rs implementation.
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let slot = bx.alloca(bx.type_i8p(), ptr_align);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None);
+
+ bx.switch_to_block(normal);
+ bx.ret(bx.const_i32(0));
+
+ bx.switch_to_block(catchswitch);
+ let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
+
+ // We can't use the TypeDescriptor defined in libpanic_unwind because it
+ // might be in another DLL and the SEH encoding only supports specifying
+ // a TypeDescriptor from the current module.
+ //
+ // However this isn't an issue since the MSVC runtime uses string
+ // comparison on the type name to match TypeDescriptors rather than
+ // pointer equality.
+ //
+ // So instead we generate a new TypeDescriptor in each module that uses
+ // `try` and let the linker merge duplicate definitions in the same
+ // module.
+ //
+ // When modifying, make sure that the type_name string exactly matches
+ // the one used in src/libpanic_unwind/seh.rs.
+ let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
+ let type_name = bx.const_bytes(b"rust_panic\0");
+ let type_info =
+ bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
+ let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
+ unsafe {
+ llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
+ llvm::SetUniqueComdat(bx.llmod, tydesc);
+ llvm::LLVMSetInitializer(tydesc, type_info);
+ }
+
+ // The flag value of 8 indicates that we are catching the exception by
+ // reference instead of by value. We can't use catch by value because
+ // that requires copying the exception object, which we don't support
+ // since our exception object effectively contains a Box.
+ //
+ // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
+ bx.switch_to_block(catchpad_rust);
+ let flags = bx.const_i32(8);
+ let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
+ let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
+ bx.catch_ret(&funclet, caught);
+
+ // The flag value of 64 indicates a "catch-all".
+ bx.switch_to_block(catchpad_foreign);
+ let flags = bx.const_i32(64);
+ let null = bx.const_null(bx.type_i8p());
+ let funclet = bx.catch_pad(cs, &[null, flags, null]);
+ bx.call(catch_ty, catch_func, &[data, null], Some(&funclet));
+ bx.catch_ret(&funclet, caught);
+
+ bx.switch_to_block(caught);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Definition of the standard `try` function for Rust using the GNU-like model
+// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
+// instructions).
+//
+// This codegen is a little surprising because we always call a shim
+// function instead of inlining the call to `invoke` manually here. This is done
+// because in LLVM we're only allowed to have one personality per function
+// definition. The call to the `try` intrinsic is being inlined into the
+// function calling it, and that function may already have other personality
+// functions in play. By calling a shim we're guaranteed that our shim will have
+// the right personality function.
+fn codegen_gnu_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ // Codegens the shims described above:
+ //
+ // bx:
+ // invoke %try_func(%data) normal %normal unwind %catch
+ //
+ // normal:
+ // ret 0
+ //
+ // catch:
+ // (%ptr, _) = landingpad
+ // call %catch_func(%data, %ptr)
+ // ret 1
+ let then = bx.append_sibling_block("then");
+ let catch = bx.append_sibling_block("catch");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
+
+ bx.switch_to_block(then);
+ bx.ret(bx.const_i32(0));
+
+ // Type indicator for the exception being thrown.
+ //
+ // The first value in this tuple is a pointer to the exception object
+ // being thrown. The second value is a "selector" indicating which of
+ // the landing pad clauses the exception's type had been matched to.
+ // rust_try ignores the selector.
+ bx.switch_to_block(catch);
+ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
+ let tydesc = bx.const_null(bx.type_i8p());
+ bx.add_clause(vals, tydesc);
+ let ptr = bx.extract_value(vals, 0);
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, catch_func, &[data, ptr], None);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Variant of codegen_gnu_try used for emscripten where Rust panics are
+// implemented using C++ exceptions. Here we use exceptions of a specific type
+// (`struct rust_panic`) to represent Rust panics.
+fn codegen_emcc_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ // Codegens the shims described above:
+ //
+ // bx:
+ // invoke %try_func(%data) normal %normal unwind %catch
+ //
+ // normal:
+ // ret 0
+ //
+ // catch:
+ // (%ptr, %selector) = landingpad
+ // %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
+ // %is_rust_panic = %selector == %rust_typeid
+ // %catch_data = alloca { i8*, i8 }
+ // %catch_data[0] = %ptr
+ // %catch_data[1] = %is_rust_panic
+ // call %catch_func(%data, %catch_data)
+ // ret 1
+ let then = bx.append_sibling_block("then");
+ let catch = bx.append_sibling_block("catch");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
+
+ bx.switch_to_block(then);
+ bx.ret(bx.const_i32(0));
+
+ // Type indicator for the exception being thrown.
+ //
+ // The first value in this tuple is a pointer to the exception object
+ // being thrown. The second value is a "selector" indicating which of
+ // the landing pad clauses the exception's type had been matched to.
+ bx.switch_to_block(catch);
+ let tydesc = bx.eh_catch_typeinfo();
+ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
+ bx.add_clause(vals, tydesc);
+ bx.add_clause(vals, bx.const_null(bx.type_i8p()));
+ let ptr = bx.extract_value(vals, 0);
+ let selector = bx.extract_value(vals, 1);
+
+ // Check if the typeid we got is the one for a Rust panic.
+ let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
+ let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
+ let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
+
+ // We need to pass two values to catch_func (ptr and is_rust_panic), so
+ // create an alloca and pass a pointer to that.
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let i8_align = bx.tcx().data_layout.i8_align.abi;
+ let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
+ let catch_data = bx.alloca(catch_data_type, ptr_align);
+ let catch_data_0 =
+ bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
+ bx.store(ptr, catch_data_0, ptr_align);
+ let catch_data_1 =
+ bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
+ bx.store(is_rust_panic, catch_data_1, i8_align);
+ let catch_data = bx.bitcast(catch_data, bx.type_i8p());
+
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, catch_func, &[data, catch_data], None);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Helper function to give a Block to a closure to codegen a shim function.
+// This is currently primarily used for the `try` intrinsic functions above.
+fn gen_fn<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ name: &str,
+ rust_fn_sig: ty::PolyFnSig<'tcx>,
+ codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> (&'ll Type, &'ll Value) {
+ let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
+ let llty = fn_abi.llvm_type(cx);
+ let llfn = cx.declare_fn(name, fn_abi);
+ cx.set_frame_pointer_type(llfn);
+ cx.apply_target_cpu_attr(llfn);
+ // FIXME(eddyb) find a nicer way to do this.
+ unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
+ let llbb = Builder::append_block(cx, llfn, "entry-block");
+ let bx = Builder::build(cx, llbb);
+ codegen(bx);
+ (llty, llfn)
+}
+
+// Helper function used to get a handle to the `__rust_try` function used to
+// catch exceptions.
+//
+// This function is only generated once and is then cached.
+fn get_rust_try_fn<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> (&'ll Type, &'ll Value) {
+ if let Some(llfn) = cx.rust_try_fn.get() {
+ return llfn;
+ }
+
+ // Define the type up front for the signature of the rust_try function.
+ let tcx = cx.tcx;
+ let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+ // `unsafe fn(*mut i8) -> ()`
+ let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
+ iter::once(i8p),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )));
+ // `unsafe fn(*mut i8, *mut i8) -> ()`
+ let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
+ [i8p, i8p].iter().cloned(),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )));
+ // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
+ let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
+ [try_fn_ty, i8p, catch_fn_ty].into_iter(),
+ tcx.types.i32,
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ ));
+ let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
+ cx.rust_try_fn.set(Some(rust_try));
+ rust_try
+}
+
+fn generic_simd_intrinsic<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ name: Symbol,
+ callee_ty: Ty<'tcx>,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ ret_ty: Ty<'tcx>,
+ llret_ty: &'ll Type,
+ span: Span,
+) -> Result<&'ll Value, ()> {
+ // macros for error handling:
+ #[allow(unused_macro_rules)]
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ macro_rules! require {
+ ($cond: expr, $($fmt: tt)*) => {
+ if !$cond {
+ return_error!($($fmt)*);
+ }
+ };
+ }
+
+ macro_rules! require_simd {
+ ($ty: expr, $position: expr) => {
+ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ };
+ }
+
+ let tcx = bx.tcx();
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+ let arg_tys = sig.inputs();
+
+ if name == sym::simd_select_bitmask {
+ require_simd!(arg_tys[1], "argument");
+ let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+
+ let expected_int_bits = (len.max(8) - 1).next_power_of_two();
+ let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
+
+ let mask_ty = arg_tys[0];
+ let mask = match mask_ty.kind() {
+ ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) =>
+ {
+ let place = PlaceRef::alloca(bx, args[0].layout);
+ args[0].val.store(bx, place);
+ let int_ty = bx.type_ix(expected_bytes * 8);
+ let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
+ bx.load(int_ty, ptr, Align::ONE)
+ }
+ _ => return_error!(
+ "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
+ mask_ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ };
+
+ let i1 = bx.type_i1();
+ let im = bx.type_ix(len);
+ let i1xn = bx.type_vector(i1, len);
+ let m_im = bx.trunc(mask, im);
+ let m_i1s = bx.bitcast(m_im, i1xn);
+ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+ }
+
+ // every intrinsic below takes a SIMD vector as its first argument
+ require_simd!(arg_tys[0], "input");
+ let in_ty = arg_tys[0];
+
+ let comparison = match name {
+ sym::simd_eq => Some(hir::BinOpKind::Eq),
+ sym::simd_ne => Some(hir::BinOpKind::Ne),
+ sym::simd_lt => Some(hir::BinOpKind::Lt),
+ sym::simd_le => Some(hir::BinOpKind::Le),
+ sym::simd_gt => Some(hir::BinOpKind::Gt),
+ sym::simd_ge => Some(hir::BinOpKind::Ge),
+ _ => None,
+ };
+
+ let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+ if let Some(cmp_op) = comparison {
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ require!(
+ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+ "expected return type with integer elements, found `{}` with non-integer `{}`",
+ ret_ty,
+ out_ty
+ );
+
+ return Ok(compare_simd_types(
+ bx,
+ args[0].immediate(),
+ args[1].immediate(),
+ in_elem,
+ llret_ty,
+ cmp_op,
+ ));
+ }
+
+ if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u64 = if stripped.is_empty() {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ match args[2].layout.ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ }
+ _ => return_error!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ args[2].layout.ty
+ ),
+ }
+ } else {
+ stripped.parse().unwrap_or_else(|_| {
+ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+ })
+ };
+
+ require_simd!(ret_ty, "return");
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ "expected return type of length {}, found `{}` with length {}",
+ n,
+ ret_ty,
+ out_len
+ );
+ require!(
+ in_elem == out_ty,
+ "expected return element type `{}` (element of input `{}`), \
+ found `{}` with element type `{}`",
+ in_elem,
+ in_ty,
+ ret_ty,
+ out_ty
+ );
+
+ let total_len = u128::from(in_len) * 2;
+
+ let vector = args[2].immediate();
+
+ let indices: Option<Vec<_>> = (0..n)
+ .map(|i| {
+ let arg_idx = i;
+ let val = bx.const_get_elt(vector, i as u64);
+ match bx.const_to_opt_u128(val, true) {
+ None => {
+ emit_error!("shuffle index #{} is not a constant", arg_idx);
+ None
+ }
+ Some(idx) if idx >= total_len => {
+ emit_error!(
+ "shuffle index #{} is out of bounds (limit {})",
+ arg_idx,
+ total_len
+ );
+ None
+ }
+ Some(idx) => Some(bx.const_i32(idx as i32)),
+ }
+ })
+ .collect();
+ let Some(indices) = indices else {
+ return Ok(bx.const_null(llret_ty));
+ };
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ bx.const_vector(&indices),
+ ));
+ }
+
+ if name == sym::simd_insert {
+ require!(
+ in_elem == arg_tys[2],
+ "expected inserted type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ arg_tys[2]
+ );
+ return Ok(bx.insert_element(
+ args[0].immediate(),
+ args[2].immediate(),
+ args[1].immediate(),
+ ));
+ }
+ if name == sym::simd_extract {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
+ }
+
+ if name == sym::simd_select {
+ let m_elem_ty = in_elem;
+ let m_len = in_len;
+ require_simd!(arg_tys[1], "argument");
+ let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ require!(
+ m_len == v_len,
+ "mismatched lengths: mask length `{}` != other vector length `{}`",
+ m_len,
+ v_len
+ );
+ match m_elem_ty.kind() {
+ ty::Int(_) => {}
+ _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ }
+ // truncate the mask to a vector of i1s
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, m_len as u64);
+ let m_i1s = bx.trunc(args[0].immediate(), i1xn);
+ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+ }
+
+ if name == sym::simd_bitmask {
+ // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
+ // vector mask and returns the most significant bit (MSB) of each lane in the form
+ // of either:
+ // * an unsigned integer
+ // * an array of `u8`
+ // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+ //
+ // The bit order of the result depends on the byte endianness, LSB-first for little
+ // endian and MSB-first for big endian.
+ let expected_int_bits = in_len.max(8);
+ let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
+
+ // Integer vector <i{in_bitwidth} x in_len>:
+ let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
+ ty::Int(i) => (
+ args[0].immediate(),
+ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+ ),
+ ty::Uint(i) => (
+ args[0].immediate(),
+ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+ ),
+ _ => return_error!(
+ "vector argument `{}`'s element type `{}`, expected integer element type",
+ in_ty,
+ in_elem
+ ),
+ };
+
+ // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
+ let shift_indices =
+ vec![
+ bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
+ in_len as _
+ ];
+ let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
+ // Truncate vector to an <i1 x N>
+ let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
+ // Bitcast <i1 x N> to iN:
+ let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
+
+ match ret_ty.kind() {
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
+ // Zero-extend iN to the bitmask type:
+ return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
+ }
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) =>
+ {
+ // Zero-extend iN to the array length:
+ let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
+
+ // Convert the integer to a byte array
+ let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
+ bx.store(ze, ptr, Align::ONE);
+ let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
+ let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
+ return Ok(bx.load(array_ty, ptr, Align::ONE));
+ }
+ _ => return_error!(
+ "cannot return `{}`, expected `u{}` or `[u8; {}]`",
+ ret_ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ }
+ }
+
+ fn simd_simple_float_intrinsic<'ll, 'tcx>(
+ name: Symbol,
+ in_elem: Ty<'_>,
+ in_ty: Ty<'_>,
+ in_len: u64,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ span: Span,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ ) -> Result<&'ll Value, ()> {
+ #[allow(unused_macro_rules)]
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
+ let elem_ty = bx.cx.type_float_from_ty(*f);
+ match f.bit_width() {
+ 32 => ("f32", elem_ty),
+ 64 => ("f64", elem_ty),
+ _ => {
+ return_error!(
+ "unsupported element type `{}` of floating-point vector `{}`",
+ f.name_str(),
+ in_ty
+ );
+ }
+ }
+ } else {
+ return_error!("`{}` is not a floating-point type", in_ty);
+ };
+
+ let vec_ty = bx.type_vector(elem_ty, in_len);
+
+ let (intr_name, fn_ty) = match name {
+ sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+ sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
+ sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
+ sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
+ _ => return_error!("unrecognized intrinsic `{}`", name),
+ };
+ let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+ let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
+ let c =
+ bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ Ok(c)
+ }
+
+ if std::matches!(
+ name,
+ sym::simd_ceil
+ | sym::simd_fabs
+ | sym::simd_fcos
+ | sym::simd_fexp2
+ | sym::simd_fexp
+ | sym::simd_flog10
+ | sym::simd_flog2
+ | sym::simd_flog
+ | sym::simd_floor
+ | sym::simd_fma
+ | sym::simd_fpow
+ | sym::simd_fpowi
+ | sym::simd_fsin
+ | sym::simd_fsqrt
+ | sym::simd_round
+ | sym::simd_trunc
+ ) {
+ return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
+ }
+
+ // FIXME: use:
+ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
+ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
+ fn llvm_vector_str(
+ elem_ty: Ty<'_>,
+ vec_len: u64,
+ no_pointers: usize,
+ bx: &Builder<'_, '_, '_>,
+ ) -> String {
+ let p0s: String = "p0".repeat(no_pointers);
+ match *elem_ty.kind() {
+ ty::Int(v) => format!(
+ "v{}{}i{}",
+ vec_len,
+ p0s,
+ // Normalize to prevent crash if v: IntTy::Isize
+ v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
+ ),
+ ty::Uint(v) => format!(
+ "v{}{}i{}",
+ vec_len,
+ p0s,
+ // Normalize to prevent crash if v: UIntTy::Usize
+ v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
+ ),
+ ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+ _ => unreachable!(),
+ }
+ }
+
+ fn llvm_vector_ty<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ elem_ty: Ty<'_>,
+ vec_len: u64,
+ mut no_pointers: usize,
+ ) -> &'ll Type {
+ // FIXME: use cx.layout_of(ty).llvm_type() ?
+ let mut elem_ty = match *elem_ty.kind() {
+ ty::Int(v) => cx.type_int_from_ty(v),
+ ty::Uint(v) => cx.type_uint_from_ty(v),
+ ty::Float(v) => cx.type_float_from_ty(v),
+ _ => unreachable!(),
+ };
+ while no_pointers > 0 {
+ elem_ty = cx.type_ptr_to(elem_ty);
+ no_pointers -= 1;
+ }
+ cx.type_vector(elem_ty, vec_len)
+ }
+
+ if name == sym::simd_gather {
+ // simd_gather(values: <N x T>, pointers: <N x *_ T>,
+ // mask: <N x i{M}>) -> <N x T>
+ // * N: number of elements in the input vectors
+ // * T: type of the element to load
+ // * M: any integer width is supported, will be truncated to i1
+
+ // All types must be simd vector types
+ require_simd!(in_ty, "first");
+ require_simd!(arg_tys[1], "second");
+ require_simd!(arg_tys[2], "third");
+ require_simd!(ret_ty, "return");
+
+ // Of the same length:
+ let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "second",
+ in_len,
+ in_ty,
+ arg_tys[1],
+ out_len
+ );
+ require!(
+ in_len == out_len2,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "third",
+ in_len,
+ in_ty,
+ arg_tys[2],
+ out_len2
+ );
+
+ // The return type must match the first argument type
+ require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
+
+ // This counts how many pointers
+ fn ptr_count(t: Ty<'_>) -> usize {
+ match t.kind() {
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
+ _ => 0,
+ }
+ }
+
+ // Non-ptr type
+ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+ match t.kind() {
+ ty::RawPtr(p) => non_ptr(p.ty),
+ _ => t,
+ }
+ }
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
+ let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (pointer_count, underlying_ty) = match element_ty1.kind() {
+ ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of second argument `{}` \
+ to be a pointer to the element type `{}` of the first \
+ argument `{}`, found `{}` != `*_ {}`",
+ element_ty1,
+ arg_tys[1],
+ in_elem,
+ in_ty,
+ element_ty1,
+ in_elem
+ );
+ unreachable!();
+ }
+ };
+ assert!(pointer_count > 0);
+ assert_eq!(pointer_count - 1, ptr_count(element_ty0));
+ assert_eq!(underlying_ty, non_ptr(element_ty0));
+
+ // The element type of the third argument must be a signed integer type of any width:
+ let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
+ match element_ty2.kind() {
+ ty::Int(_) => (),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of third argument `{}` \
+ to be a signed integer type",
+ element_ty2,
+ arg_tys[2]
+ );
+ }
+ }
+
+ // Alignment of T, must be a constant integer value:
+ let alignment_ty = bx.type_i32();
+ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+ // Truncate the mask vector to a vector of i1s:
+ let (mask, mask_ty) = {
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len);
+ (bx.trunc(args[2].immediate(), i1xn), i1xn)
+ };
+
+ // Type of the vector of pointers:
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+
+ // Type of the vector of elements:
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+
+ let llvm_intrinsic =
+ format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ let fn_ty = bx.type_func(
+ &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
+ llvm_elem_vec_ty,
+ );
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v =
+ bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
+ return Ok(v);
+ }
+
+ if name == sym::simd_scatter {
+ // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
+ // mask: <N x i{M}>) -> ()
+ // * N: number of elements in the input vectors
+ // * T: type of the element to load
+ // * M: any integer width is supported, will be truncated to i1
+
+ // All types must be simd vector types
+ require_simd!(in_ty, "first");
+ require_simd!(arg_tys[1], "second");
+ require_simd!(arg_tys[2], "third");
+
+ // Of the same length:
+ let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
+ require!(
+ in_len == element_len1,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "second",
+ in_len,
+ in_ty,
+ arg_tys[1],
+ element_len1
+ );
+ require!(
+ in_len == element_len2,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "third",
+ in_len,
+ in_ty,
+ arg_tys[2],
+ element_len2
+ );
+
+ // This counts how many pointers
+ fn ptr_count(t: Ty<'_>) -> usize {
+ match t.kind() {
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
+ _ => 0,
+ }
+ }
+
+ // Non-ptr type
+ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+ match t.kind() {
+ ty::RawPtr(p) => non_ptr(p.ty),
+ _ => t,
+ }
+ }
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
+ let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
+ let (pointer_count, underlying_ty) = match element_ty1.kind() {
+ ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
+ (ptr_count(element_ty1), non_ptr(element_ty1))
+ }
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of second argument `{}` \
+ to be a pointer to the element type `{}` of the first \
+ argument `{}`, found `{}` != `*mut {}`",
+ element_ty1,
+ arg_tys[1],
+ in_elem,
+ in_ty,
+ element_ty1,
+ in_elem
+ );
+ unreachable!();
+ }
+ };
+ assert!(pointer_count > 0);
+ assert_eq!(pointer_count - 1, ptr_count(element_ty0));
+ assert_eq!(underlying_ty, non_ptr(element_ty0));
+
+ // The element type of the third argument must be a signed integer type of any width:
+ match element_ty2.kind() {
+ ty::Int(_) => (),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of third argument `{}` \
+ be a signed integer type",
+ element_ty2,
+ arg_tys[2]
+ );
+ }
+ }
+
+ // Alignment of T, must be a constant integer value:
+ let alignment_ty = bx.type_i32();
+ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+ // Truncate the mask vector to a vector of i1s:
+ let (mask, mask_ty) = {
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len);
+ (bx.trunc(args[2].immediate(), i1xn), i1xn)
+ };
+
+ let ret_t = bx.type_void();
+
+ // Type of the vector of pointers:
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+
+ // Type of the vector of elements:
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+
+ let llvm_intrinsic =
+ format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ let fn_ty =
+ bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v =
+ bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
+ return Ok(v);
+ }
+
+ macro_rules! arith_red {
+ ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
+ $identity:expr) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.$integer_reduce(args[0].immediate());
+ if $ordered {
+ // if overflow occurs, the result is the
+ // mathematical result modulo 2^n:
+ Ok(bx.$op(args[1].immediate(), r))
+ } else {
+ Ok(bx.$integer_reduce(args[0].immediate()))
+ }
+ }
+ ty::Float(f) => {
+ let acc = if $ordered {
+ // ordered arithmetic reductions take an accumulator
+ args[1].immediate()
+ } else {
+ // unordered arithmetic reductions use the identity accumulator
+ match f.bit_width() {
+ 32 => bx.const_real(bx.type_f32(), $identity),
+ 64 => bx.const_real(bx.type_f64(), $identity),
+ v => return_error!(
+ r#"
+unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
+ sym::$name,
+ in_ty,
+ in_elem,
+ v,
+ ret_ty
+ ),
+ }
+ };
+ Ok(bx.$float_reduce(acc, args[0].immediate()))
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
+ arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
+ arith_red!(
+ simd_reduce_add_unordered: vector_reduce_add,
+ vector_reduce_fadd_fast,
+ false,
+ add,
+ 0.0
+ );
+ arith_red!(
+ simd_reduce_mul_unordered: vector_reduce_mul,
+ vector_reduce_fmul_fast,
+ false,
+ mul,
+ 1.0
+ );
+
+ macro_rules! minmax_red {
+ ($name:ident: $int_red:ident, $float_red:ident) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
+ ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
+ ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
+ minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
+
+ minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
+ minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
+
+ macro_rules! bitwise_red {
+ ($name:ident : $red:ident, $boolean:expr) => {
+ if name == sym::$name {
+ let input = if !$boolean {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ args[0].immediate()
+ } else {
+ match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {}
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ }
+
+ // boolean reductions operate on vectors of i1s:
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len as u64);
+ bx.trunc(args[0].immediate(), i1xn)
+ };
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.$red(input);
+ Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ bitwise_red!(simd_reduce_and: vector_reduce_and, false);
+ bitwise_red!(simd_reduce_or: vector_reduce_or, false);
+ bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
+ bitwise_red!(simd_reduce_all: vector_reduce_and, true);
+ bitwise_red!(simd_reduce_any: vector_reduce_or, true);
+
+ if name == sym::simd_cast || name == sym::simd_as {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ // casting cares about nominal type, not just structural type
+ if in_elem == out_elem {
+ return Ok(args[0].immediate());
+ }
+
+ enum Style {
+ Float,
+ Int(/* is signed? */ bool),
+ Unsupported,
+ }
+
+ let (in_style, in_width) = match in_elem.kind() {
+ // vectors of pointer-sized integers should've been
+ // disallowed before here, so this unwrap is safe.
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+ let (out_style, out_width) = match out_elem.kind() {
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+
+ match (in_style, out_style) {
+ (Style::Int(in_is_signed), Style::Int(_)) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => {
+ if in_is_signed {
+ bx.sext(args[0].immediate(), llret_ty)
+ } else {
+ bx.zext(args[0].immediate(), llret_ty)
+ }
+ }
+ });
+ }
+ (Style::Int(in_is_signed), Style::Float) => {
+ return Ok(if in_is_signed {
+ bx.sitofp(args[0].immediate(), llret_ty)
+ } else {
+ bx.uitofp(args[0].immediate(), llret_ty)
+ });
+ }
+ (Style::Float, Style::Int(out_is_signed)) => {
+ return Ok(match (out_is_signed, name == sym::simd_as) {
+ (false, false) => bx.fptoui(args[0].immediate(), llret_ty),
+ (true, false) => bx.fptosi(args[0].immediate(), llret_ty),
+ (_, true) => bx.cast_float_to_int(out_is_signed, args[0].immediate(), llret_ty),
+ });
+ }
+ (Style::Float, Style::Float) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
+ });
+ }
+ _ => { /* Unsupported. Fallthrough. */ }
+ }
+ require!(
+ false,
+ "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ );
+ }
+ macro_rules! arith_binary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+ arith_binary! {
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or;
+ simd_xor: Uint, Int => xor;
+ simd_fmax: Float => maxnum;
+ simd_fmin: Float => minnum;
+
+ }
+ macro_rules! arith_unary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+ arith_unary! {
+ simd_neg: Int => neg, Float => fneg;
+ }
+
+ if name == sym::simd_arith_offset {
+ // This also checks that the first operand is a ptr type.
+ let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
+ span_bug!(span, "must be called with a vector of pointer types as first argument")
+ });
+ let layout = bx.layout_of(pointee.ty);
+ let ptrs = args[0].immediate();
+ // The second argument must be a ptr-sized integer.
+ // (We don't care about the signedness, this is wrapping anyway.)
+ let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
+ if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
+ span_bug!(
+ span,
+ "must be called with a vector of pointer-sized integers as second argument"
+ );
+ }
+ let offsets = args[1].immediate();
+
+ return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
+ }
+
+ if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let is_add = name == sym::simd_saturating_add;
+ let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
+ ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
+ _ => {
+ return_error!(
+ "expected element type `{}` of vector type `{}` \
+ to be a signed or unsigned integer type",
+ arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ arg_tys[0]
+ );
+ }
+ };
+ let llvm_intrinsic = &format!(
+ "llvm.{}{}.sat.v{}i{}",
+ if signed { 's' } else { 'u' },
+ if is_add { "add" } else { "sub" },
+ in_len,
+ elem_width
+ );
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
+
+ let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
+ let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v = bx.call(fn_ty, f, &[lhs, rhs], None);
+ return Ok(v);
+ }
+
+ span_bug!(span, "unknown SIMD intrinsic");
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
+ }
+ ty::Uint(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))
+ }
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
new file mode 100644
index 000000000..636d689a3
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -0,0 +1,442 @@
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(hash_raw_entry)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(extern_types)]
+#![feature(once_cell)]
+#![feature(iter_intersperse)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+use back::write::{create_informational_target_machine, create_target_machine};
+
+pub use llvm_util::target_features;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::back::write::{
+ CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::ModuleCodegen;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{ErrorGuaranteed, FatalError, Handler};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+
+use std::any::Any;
+use std::ffi::CStr;
+
+mod back {
+ pub mod archive;
+ pub mod lto;
+ mod profiling;
+ pub mod write;
+}
+
+mod abi;
+mod allocator;
+mod asm;
+mod attributes;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+
+// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
+#[path = "llvm/mod.rs"]
+mod llvm_;
+pub mod llvm {
+ pub use super::llvm_::*;
+}
+
+mod llvm_util;
+mod mono_item;
+mod type_;
+mod type_of;
+mod va_arg;
+mod value;
+
+#[derive(Clone)]
+pub struct LlvmCodegenBackend(());
+
+struct TimeTraceProfiler {
+ enabled: bool,
+}
+
+impl TimeTraceProfiler {
+ fn new(enabled: bool) -> Self {
+ if enabled {
+ unsafe { llvm::LLVMTimeTraceProfilerInitialize() }
+ }
+ TimeTraceProfiler { enabled }
+ }
+}
+
+impl Drop for TimeTraceProfiler {
+ fn drop(&mut self) {
+ if self.enabled {
+ unsafe { llvm::LLVMTimeTraceProfilerFinishThread() }
+ }
+ }
+}
+
+impl ExtraBackendMethods for LlvmCodegenBackend {
+ fn codegen_allocator<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ ) -> ModuleLlvm {
+ let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name);
+ unsafe {
+ allocator::codegen(tcx, &mut module_llvm, module_name, kind, has_alloc_error_handler);
+ }
+ module_llvm
+ }
+ fn compile_codegen_unit(
+ &self,
+ tcx: TyCtxt<'_>,
+ cgu_name: Symbol,
+ ) -> (ModuleCodegen<ModuleLlvm>, u64) {
+ base::compile_codegen_unit(tcx, cgu_name)
+ }
+ fn target_machine_factory(
+ &self,
+ sess: &Session,
+ optlvl: OptLevel,
+ target_features: &[String],
+ ) -> TargetMachineFactoryFn<Self> {
+ back::write::target_machine_factory(sess, optlvl, target_features)
+ }
+ fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
+ llvm_util::target_cpu(sess)
+ }
+ fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
+ llvm_util::tune_cpu(sess)
+ }
+
+ fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::spawn(move || {
+ let _profiler = TimeTraceProfiler::new(time_trace);
+ f()
+ })
+ }
+
+ fn spawn_named_thread<F, T>(
+ time_trace: bool,
+ name: String,
+ f: F,
+ ) -> std::io::Result<std::thread::JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::Builder::new().name(name).spawn(move || {
+ let _profiler = TimeTraceProfiler::new(time_trace);
+ f()
+ })
+ }
+}
+
+impl WriteBackendMethods for LlvmCodegenBackend {
+ type Module = ModuleLlvm;
+ type ModuleBuffer = back::lto::ModuleBuffer;
+ type Context = llvm::Context;
+ type TargetMachine = &'static mut llvm::TargetMachine;
+ type ThinData = back::lto::ThinData;
+ type ThinBuffer = back::lto::ThinBuffer;
+ fn print_pass_timings(&self) {
+ unsafe {
+ llvm::LLVMRustPrintPassTimings();
+ }
+ }
+ fn run_link(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ modules: Vec<ModuleCodegen<Self::Module>>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::write::link(cgcx, diag_handler, modules)
+ }
+ fn run_fat_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<FatLTOInput<Self>>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ back::lto::run_fat(cgcx, modules, cached_modules)
+ }
+ fn run_thin_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<(String, Self::ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+ back::lto::run_thin(cgcx, modules, cached_modules)
+ }
+ unsafe fn optimize(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<(), FatalError> {
+ back::write::optimize(cgcx, diag_handler, module, config)
+ }
+ fn optimize_fat(
+ cgcx: &CodegenContext<Self>,
+ module: &mut ModuleCodegen<Self::Module>,
+ ) -> Result<(), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ back::lto::run_pass_manager(cgcx, &diag_handler, module, false)
+ }
+ unsafe fn optimize_thin(
+ cgcx: &CodegenContext<Self>,
+ thin: ThinModule<Self>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::lto::optimize_thin_module(thin, cgcx)
+ }
+ unsafe fn codegen(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<CompiledModule, FatalError> {
+ back::write::codegen(cgcx, diag_handler, module, config)
+ }
+ fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+ back::lto::prepare_thin(module)
+ }
+ fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+ (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
+ }
+}
+
+unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
+unsafe impl Sync for LlvmCodegenBackend {}
+
+impl LlvmCodegenBackend {
+ pub fn new() -> Box<dyn CodegenBackend> {
+ Box::new(LlvmCodegenBackend(()))
+ }
+}
+
+impl CodegenBackend for LlvmCodegenBackend {
+ fn init(&self, sess: &Session) {
+ llvm_util::init(sess); // Make sure llvm is inited
+ }
+
+ fn provide(&self, providers: &mut Providers) {
+ providers.global_backend_features =
+ |tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
+ }
+
+ fn print(&self, req: PrintRequest, sess: &Session) {
+ match req {
+ PrintRequest::RelocationModels => {
+ println!("Available relocation models:");
+ for name in &[
+ "static",
+ "pic",
+ "pie",
+ "dynamic-no-pic",
+ "ropi",
+ "rwpi",
+ "ropi-rwpi",
+ "default",
+ ] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::CodeModels => {
+ println!("Available code models:");
+ for name in &["tiny", "small", "kernel", "medium", "large"] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::TlsModels => {
+ println!("Available TLS models:");
+ for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::StackProtectorStrategies => {
+ println!(
+ r#"Available stack protector strategies:
+ all
+ Generate stack canaries in all functions.
+
+ strong
+ Generate stack canaries in a function if it either:
+ - has a local variable of `[T; N]` type, regardless of `T` and `N`
+ - takes the address of a local variable.
+
+ (Note that a local variable being borrowed is not equivalent to its
+ address being taken: e.g. some borrows may be removed by optimization,
+ while by-value argument passing may be implemented with reference to a
+ local stack variable in the ABI.)
+
+ basic
+ Generate stack canaries in functions with local variables of `[T; N]`
+ type, where `T` is byte-sized and `N` >= 8.
+
+ none
+ Do not generate stack canaries.
+"#
+ );
+ }
+ req => llvm_util::print(req, sess),
+ }
+ }
+
+ fn print_passes(&self) {
+ llvm_util::print_passes();
+ }
+
+ fn print_version(&self) {
+ llvm_util::print_version();
+ }
+
+ fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ target_features(sess, allow_unstable)
+ }
+
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ Box::new(rustc_codegen_ssa::base::codegen_crate(
+ LlvmCodegenBackend(()),
+ tcx,
+ crate::llvm_util::target_cpu(tcx.sess).to_string(),
+ metadata,
+ need_metadata_module,
+ ))
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ let (codegen_results, work_products) = ongoing_codegen
+ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
+ .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
+ .join(sess);
+
+ sess.time("llvm_dump_timing_file", || {
+ if sess.opts.unstable_opts.llvm_time_trace {
+ let file_name = outputs.with_extension("llvm_timings.json");
+ llvm_util::time_trace_profiler_finish(&file_name);
+ }
+ });
+
+ Ok((codegen_results, work_products))
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed> {
+ use crate::back::archive::LlvmArchiveBuilderBuilder;
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ // Run the linker on any artifacts that resulted from the LLVM run.
+ // This should produce either a finished executable or library.
+ link_binary(sess, &LlvmArchiveBuilderBuilder, &codegen_results, outputs)
+ }
+}
+
+pub struct ModuleLlvm {
+ llcx: &'static mut llvm::Context,
+ llmod_raw: *const llvm::Module,
+ tm: &'static mut llvm::TargetMachine,
+}
+
+unsafe impl Send for ModuleLlvm {}
+unsafe impl Sync for ModuleLlvm {}
+
+impl ModuleLlvm {
+ fn new(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+ let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+ ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx, mod_name) }
+ }
+ }
+
+ fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+ let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+ ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
+ }
+ }
+
+ fn parse(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ name: &CStr,
+ buffer: &[u8],
+ handler: &Handler,
+ ) -> Result<Self, FatalError> {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?;
+ let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap());
+ let tm = match (cgcx.tm_factory)(tm_factory_config) {
+ Ok(m) => m,
+ Err(e) => {
+ handler.struct_err(&e).emit();
+ return Err(FatalError);
+ }
+ };
+
+ Ok(ModuleLlvm { llmod_raw, llcx, tm })
+ }
+ }
+
+ fn llmod(&self) -> &llvm::Module {
+ unsafe { &*self.llmod_raw }
+ }
+}
+
+impl Drop for ModuleLlvm {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+ llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
new file mode 100644
index 000000000..64db4f746
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
@@ -0,0 +1,105 @@
+//! A wrapper around LLVM's archive (.a) code
+
+use rustc_fs_util::path_to_c_string;
+use std::path::Path;
+use std::slice;
+use std::str;
+
+pub struct ArchiveRO {
+ pub raw: &'static mut super::Archive,
+}
+
+unsafe impl Send for ArchiveRO {}
+
+pub struct Iter<'a> {
+ raw: &'a mut super::ArchiveIterator<'a>,
+}
+
+pub struct Child<'a> {
+ pub raw: &'a mut super::ArchiveChild<'a>,
+}
+
+impl ArchiveRO {
+ /// Opens a static archive for read-only purposes. This is more optimized
+ /// than the `open` method because it uses LLVM's internal `Archive` class
+ /// rather than shelling out to `ar` for everything.
+ ///
+ /// If this archive is used with a mutable method, then an error will be
+ /// raised.
+ pub fn open(dst: &Path) -> Result<ArchiveRO, String> {
+ unsafe {
+ let s = path_to_c_string(dst);
+ let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| {
+ super::last_error().unwrap_or_else(|| "failed to open archive".to_owned())
+ })?;
+ Ok(ArchiveRO { raw: ar })
+ }
+ }
+
+ pub fn iter(&self) -> Iter<'_> {
+ unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw) } }
+ }
+}
+
+impl Drop for ArchiveRO {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _));
+ }
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = Result<Child<'a>, String>;
+
+ fn next(&mut self) -> Option<Result<Child<'a>, String>> {
+ unsafe {
+ match super::LLVMRustArchiveIteratorNext(self.raw) {
+ Some(raw) => Some(Ok(Child { raw })),
+ None => super::last_error().map(Err),
+ }
+ }
+ }
+}
+
+impl<'a> Drop for Iter<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _));
+ }
+ }
+}
+
+impl<'a> Child<'a> {
+ pub fn name(&self) -> Option<&'a str> {
+ unsafe {
+ let mut name_len = 0;
+ let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len);
+ if name_ptr.is_null() {
+ None
+ } else {
+ let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize);
+ str::from_utf8(name).ok().map(|s| s.trim())
+ }
+ }
+ }
+
+ pub fn data(&self) -> &'a [u8] {
+ unsafe {
+ let mut data_len = 0;
+ let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len);
+ if data_ptr.is_null() {
+ panic!("failed to read data from archive child");
+ }
+ slice::from_raw_parts(data_ptr as *const u8, data_len as usize)
+ }
+ }
+}
+
+impl<'a> Drop for Child<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
new file mode 100644
index 000000000..45de284d2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -0,0 +1,213 @@
+//! LLVM diagnostic reports.
+
+pub use self::Diagnostic::*;
+pub use self::OptimizationDiagnosticKind::*;
+
+use crate::value::Value;
+use libc::c_uint;
+
+use super::{DiagnosticInfo, SMDiagnostic};
+use rustc_span::InnerSpan;
+
+#[derive(Copy, Clone)]
+pub enum OptimizationDiagnosticKind {
+ OptimizationRemark,
+ OptimizationMissed,
+ OptimizationAnalysis,
+ OptimizationAnalysisFPCommute,
+ OptimizationAnalysisAliasing,
+ OptimizationFailure,
+ OptimizationRemarkOther,
+}
+
+pub struct OptimizationDiagnostic<'ll> {
+ pub kind: OptimizationDiagnosticKind,
+ pub pass_name: String,
+ pub function: &'ll Value,
+ pub line: c_uint,
+ pub column: c_uint,
+ pub filename: String,
+ pub message: String,
+}
+
+impl<'ll> OptimizationDiagnostic<'ll> {
+ unsafe fn unpack(kind: OptimizationDiagnosticKind, di: &'ll DiagnosticInfo) -> Self {
+ let mut function = None;
+ let mut line = 0;
+ let mut column = 0;
+
+ let mut message = None;
+ let mut filename = None;
+ let pass_name = super::build_string(|pass_name| {
+ message = super::build_string(|message| {
+ filename = super::build_string(|filename| {
+ super::LLVMRustUnpackOptimizationDiagnostic(
+ di,
+ pass_name,
+ &mut function,
+ &mut line,
+ &mut column,
+ filename,
+ message,
+ )
+ })
+ .ok()
+ })
+ .ok()
+ })
+ .ok();
+
+ let mut filename = filename.unwrap_or_default();
+ if filename.is_empty() {
+ filename.push_str("<unknown file>");
+ }
+
+ OptimizationDiagnostic {
+ kind,
+ pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"),
+ function: function.unwrap(),
+ line,
+ column,
+ filename,
+ message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM"),
+ }
+ }
+}
+
+pub struct SrcMgrDiagnostic {
+ pub level: super::DiagnosticLevel,
+ pub message: String,
+ pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl SrcMgrDiagnostic {
+ pub unsafe fn unpack(diag: &SMDiagnostic) -> SrcMgrDiagnostic {
+ // Recover the post-substitution assembly code from LLVM for better
+ // diagnostics.
+ let mut have_source = false;
+ let mut buffer = String::new();
+ let mut level = super::DiagnosticLevel::Error;
+ let mut loc = 0;
+ let mut ranges = [0; 8];
+ let mut num_ranges = ranges.len() / 2;
+ let message = super::build_string(|message| {
+ buffer = super::build_string(|buffer| {
+ have_source = super::LLVMRustUnpackSMDiagnostic(
+ diag,
+ message,
+ buffer,
+ &mut level,
+ &mut loc,
+ ranges.as_mut_ptr(),
+ &mut num_ranges,
+ );
+ })
+ .expect("non-UTF8 inline asm");
+ })
+ .expect("non-UTF8 SMDiagnostic");
+
+ SrcMgrDiagnostic {
+ message,
+ level,
+ source: have_source.then(|| {
+ let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)];
+ for i in 0..num_ranges {
+ spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize));
+ }
+ (buffer, spans)
+ }),
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct InlineAsmDiagnostic {
+ pub level: super::DiagnosticLevel,
+ pub cookie: c_uint,
+ pub message: String,
+ pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl InlineAsmDiagnostic {
+ unsafe fn unpackInlineAsm(di: &DiagnosticInfo) -> Self {
+ let mut cookie = 0;
+ let mut message = None;
+ let mut level = super::DiagnosticLevel::Error;
+
+ super::LLVMRustUnpackInlineAsmDiagnostic(di, &mut level, &mut cookie, &mut message);
+
+ InlineAsmDiagnostic {
+ level,
+ cookie,
+ message: super::twine_to_string(message.unwrap()),
+ source: None,
+ }
+ }
+
+ unsafe fn unpackSrcMgr(di: &DiagnosticInfo) -> Self {
+ let mut cookie = 0;
+ let smdiag = SrcMgrDiagnostic::unpack(super::LLVMRustGetSMDiagnostic(di, &mut cookie));
+ InlineAsmDiagnostic {
+ level: smdiag.level,
+ cookie,
+ message: smdiag.message,
+ source: smdiag.source,
+ }
+ }
+}
+
+pub enum Diagnostic<'ll> {
+ Optimization(OptimizationDiagnostic<'ll>),
+ InlineAsm(InlineAsmDiagnostic),
+ PGO(&'ll DiagnosticInfo),
+ Linker(&'ll DiagnosticInfo),
+ Unsupported(&'ll DiagnosticInfo),
+
+ /// LLVM has other types that we do not wrap here.
+ UnknownDiagnostic(&'ll DiagnosticInfo),
+}
+
+impl<'ll> Diagnostic<'ll> {
+ pub unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
+ use super::DiagnosticKind as Dk;
+ let kind = super::LLVMRustGetDiagInfoKind(di);
+
+ match kind {
+ Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpackInlineAsm(di)),
+
+ Dk::OptimizationRemark => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di))
+ }
+ Dk::OptimizationRemarkOther => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di))
+ }
+ Dk::OptimizationRemarkMissed => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di))
+ }
+
+ Dk::OptimizationRemarkAnalysis => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di))
+ }
+
+ Dk::OptimizationRemarkAnalysisFPCommute => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di))
+ }
+
+ Dk::OptimizationRemarkAnalysisAliasing => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di))
+ }
+
+ Dk::OptimizationFailure => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di))
+ }
+
+ Dk::PGOProfile => PGO(di),
+ Dk::Linker => Linker(di),
+ Dk::Unsupported => Unsupported(di),
+
+ Dk::SrcMgr => InlineAsm(InlineAsmDiagnostic::unpackSrcMgr(di)),
+
+ _ => UnknownDiagnostic(di),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
new file mode 100644
index 000000000..3139f93bf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -0,0 +1,2547 @@
+#![allow(non_camel_case_types)]
+#![allow(non_upper_case_globals)]
+
+use rustc_codegen_ssa::coverageinfo::map as coverage_map;
+
+use super::debuginfo::{
+ DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
+ DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
+ DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable,
+ DebugEmissionKind,
+};
+
+use libc::{c_char, c_int, c_uint, size_t};
+use libc::{c_ulonglong, c_void};
+
+use std::marker::PhantomData;
+
+use super::RustString;
+
+pub type Bool = c_uint;
+
+pub const True: Bool = 1 as Bool;
+pub const False: Bool = 0 as Bool;
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum LLVMRustResult {
+ Success,
+ Failure,
+}
+
+// Rust version of the C struct with the same name in rustc_llvm/llvm-wrapper/RustWrapper.cpp.
+#[repr(C)]
+pub struct LLVMRustCOFFShortExport {
+ pub name: *const c_char,
+ pub ordinal_present: bool,
+ // value of `ordinal` only important when `ordinal_present` is true
+ pub ordinal: u16,
+}
+
+impl LLVMRustCOFFShortExport {
+ pub fn new(name: *const c_char, ordinal: Option<u16>) -> LLVMRustCOFFShortExport {
+ LLVMRustCOFFShortExport {
+ name,
+ ordinal_present: ordinal.is_some(),
+ ordinal: ordinal.unwrap_or(0),
+ }
+ }
+}
+
+/// Translation of LLVM's MachineTypes enum, defined in llvm\include\llvm\BinaryFormat\COFF.h.
+///
+/// We include only architectures supported on Windows.
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum LLVMMachineType {
+ AMD64 = 0x8664,
+ I386 = 0x14c,
+ ARM64 = 0xaa64,
+ ARM = 0x01c0,
+}
+
+/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h.
+///
+/// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are
+/// resolved according to the merge behaviors specified here. Flags differing only in merge
+/// behavior are still considered to be in conflict.
+///
+/// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably,
+/// 'Error' and 'Warning' cannot be mixed for a given flag.
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum LLVMModFlagBehavior {
+ Error = 1,
+ Warning = 2,
+ Require = 3,
+ Override = 4,
+ Append = 5,
+ AppendUnique = 6,
+ Max = 7,
+}
+
+// Consts for the LLVM CallConv type, pre-cast to usize.
+
+/// LLVM CallingConv::ID. Should we wrap this?
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum CallConv {
+ CCallConv = 0,
+ FastCallConv = 8,
+ ColdCallConv = 9,
+ X86StdcallCallConv = 64,
+ X86FastcallCallConv = 65,
+ ArmAapcsCallConv = 67,
+ Msp430Intr = 69,
+ X86_ThisCall = 70,
+ PtxKernel = 71,
+ X86_64_SysV = 78,
+ X86_64_Win64 = 79,
+ X86_VectorCall = 80,
+ X86_Intr = 83,
+ AvrNonBlockingInterrupt = 84,
+ AvrInterrupt = 85,
+ AmdGpuKernel = 91,
+}
+
+/// LLVMRustLinkage
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum Linkage {
+ ExternalLinkage = 0,
+ AvailableExternallyLinkage = 1,
+ LinkOnceAnyLinkage = 2,
+ LinkOnceODRLinkage = 3,
+ WeakAnyLinkage = 4,
+ WeakODRLinkage = 5,
+ AppendingLinkage = 6,
+ InternalLinkage = 7,
+ PrivateLinkage = 8,
+ ExternalWeakLinkage = 9,
+ CommonLinkage = 10,
+}
+
+// LLVMRustVisibility
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq)]
+pub enum Visibility {
+ Default = 0,
+ Hidden = 1,
+ Protected = 2,
+}
+
+/// LLVMUnnamedAddr
+#[repr(C)]
+pub enum UnnamedAddr {
+ No,
+ Local,
+ Global,
+}
+
+/// LLVMDLLStorageClass
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum DLLStorageClass {
+ #[allow(dead_code)]
+ Default = 0,
+ DllImport = 1, // Function to be imported from DLL.
+ #[allow(dead_code)]
+ DllExport = 2, // Function to be accessible from DLL.
+}
+
+/// Matches LLVMRustAttribute in LLVMWrapper.h
+/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
+/// though it is not ABI compatible (since it's a C++ enum)
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub enum AttributeKind {
+ AlwaysInline = 0,
+ ByVal = 1,
+ Cold = 2,
+ InlineHint = 3,
+ MinSize = 4,
+ Naked = 5,
+ NoAlias = 6,
+ NoCapture = 7,
+ NoInline = 8,
+ NonNull = 9,
+ NoRedZone = 10,
+ NoReturn = 11,
+ NoUnwind = 12,
+ OptimizeForSize = 13,
+ ReadOnly = 14,
+ SExt = 15,
+ StructRet = 16,
+ UWTable = 17,
+ ZExt = 18,
+ InReg = 19,
+ SanitizeThread = 20,
+ SanitizeAddress = 21,
+ SanitizeMemory = 22,
+ NonLazyBind = 23,
+ OptimizeNone = 24,
+ ReturnsTwice = 25,
+ ReadNone = 26,
+ InaccessibleMemOnly = 27,
+ SanitizeHWAddress = 28,
+ WillReturn = 29,
+ StackProtectReq = 30,
+ StackProtectStrong = 31,
+ StackProtect = 32,
+ NoUndef = 33,
+ SanitizeMemTag = 34,
+ NoCfCheck = 35,
+ ShadowCallStack = 36,
+ AllocSize = 37,
+ AllocatedPointer = 38,
+ AllocAlign = 39,
+}
+
+/// LLVMIntPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum IntPredicate {
+ IntEQ = 32,
+ IntNE = 33,
+ IntUGT = 34,
+ IntUGE = 35,
+ IntULT = 36,
+ IntULE = 37,
+ IntSGT = 38,
+ IntSGE = 39,
+ IntSLT = 40,
+ IntSLE = 41,
+}
+
+impl IntPredicate {
+ pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
+ match intpre {
+ rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
+ rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
+ rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
+ rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
+ rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
+ rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
+ rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
+ rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
+ rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
+ rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
+ }
+ }
+}
+
+/// LLVMRealPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum RealPredicate {
+ RealPredicateFalse = 0,
+ RealOEQ = 1,
+ RealOGT = 2,
+ RealOGE = 3,
+ RealOLT = 4,
+ RealOLE = 5,
+ RealONE = 6,
+ RealORD = 7,
+ RealUNO = 8,
+ RealUEQ = 9,
+ RealUGT = 10,
+ RealUGE = 11,
+ RealULT = 12,
+ RealULE = 13,
+ RealUNE = 14,
+ RealPredicateTrue = 15,
+}
+
+impl RealPredicate {
+ pub fn from_generic(realp: rustc_codegen_ssa::common::RealPredicate) -> Self {
+ match realp {
+ rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
+ RealPredicate::RealPredicateFalse
+ }
+ rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
+ rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
+ rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
+ rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
+ rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
+ rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
+ rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
+ rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
+ rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
+ rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
+ rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
+ rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
+ rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
+ rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
+ rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
+ RealPredicate::RealPredicateTrue
+ }
+ }
+ }
+}
+
+/// LLVMTypeKind
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum TypeKind {
+ Void = 0,
+ Half = 1,
+ Float = 2,
+ Double = 3,
+ X86_FP80 = 4,
+ FP128 = 5,
+ PPC_FP128 = 6,
+ Label = 7,
+ Integer = 8,
+ Function = 9,
+ Struct = 10,
+ Array = 11,
+ Pointer = 12,
+ Vector = 13,
+ Metadata = 14,
+ X86_MMX = 15,
+ Token = 16,
+ ScalableVector = 17,
+ BFloat = 18,
+ X86_AMX = 19,
+}
+
+impl TypeKind {
+ pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
+ match self {
+ TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
+ TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
+ TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
+ TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
+ TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
+ TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
+ TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
+ TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
+ TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
+ TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
+ TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
+ TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
+ TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
+ TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
+ TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
+ TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX,
+ TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
+ TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
+ TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
+ TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX,
+ }
+ }
+}
+
+/// LLVMAtomicRmwBinOp
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicRmwBinOp {
+ AtomicXchg = 0,
+ AtomicAdd = 1,
+ AtomicSub = 2,
+ AtomicAnd = 3,
+ AtomicNand = 4,
+ AtomicOr = 5,
+ AtomicXor = 6,
+ AtomicMax = 7,
+ AtomicMin = 8,
+ AtomicUMax = 9,
+ AtomicUMin = 10,
+}
+
+impl AtomicRmwBinOp {
+ pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
+ match op {
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin,
+ }
+ }
+}
+
+/// LLVMAtomicOrdering
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicOrdering {
+ #[allow(dead_code)]
+ NotAtomic = 0,
+ Unordered = 1,
+ Monotonic = 2,
+ // Consume = 3, // Not specified yet.
+ Acquire = 4,
+ Release = 5,
+ AcquireRelease = 6,
+ SequentiallyConsistent = 7,
+}
+
+impl AtomicOrdering {
+ pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
+ match ao {
+ rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
+ rustc_codegen_ssa::common::AtomicOrdering::Relaxed => AtomicOrdering::Monotonic,
+ rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
+ rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
+ rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => {
+ AtomicOrdering::AcquireRelease
+ }
+ rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => {
+ AtomicOrdering::SequentiallyConsistent
+ }
+ }
+ }
+}
+
+/// LLVMRustSynchronizationScope
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum SynchronizationScope {
+ SingleThread,
+ CrossThread,
+}
+
+impl SynchronizationScope {
+ pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self {
+ match sc {
+ rustc_codegen_ssa::common::SynchronizationScope::SingleThread => {
+ SynchronizationScope::SingleThread
+ }
+ rustc_codegen_ssa::common::SynchronizationScope::CrossThread => {
+ SynchronizationScope::CrossThread
+ }
+ }
+ }
+}
+
+/// LLVMRustFileType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum FileType {
+ AssemblyFile,
+ ObjectFile,
+}
+
+/// LLVMMetadataType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum MetadataType {
+ MD_dbg = 0,
+ MD_tbaa = 1,
+ MD_prof = 2,
+ MD_fpmath = 3,
+ MD_range = 4,
+ MD_tbaa_struct = 5,
+ MD_invariant_load = 6,
+ MD_alias_scope = 7,
+ MD_noalias = 8,
+ MD_nontemporal = 9,
+ MD_mem_parallel_loop_access = 10,
+ MD_nonnull = 11,
+ MD_align = 17,
+ MD_type = 19,
+ MD_vcall_visibility = 28,
+ MD_noundef = 29,
+}
+
+/// LLVMRustAsmDialect
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum AsmDialect {
+ Att,
+ Intel,
+}
+
+/// LLVMRustCodeGenOptLevel
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptLevel {
+ None,
+ Less,
+ Default,
+ Aggressive,
+}
+
+/// LLVMRustPassBuilderOptLevel
+#[repr(C)]
+pub enum PassBuilderOptLevel {
+ O0,
+ O1,
+ O2,
+ O3,
+ Os,
+ Oz,
+}
+
+/// LLVMRustOptStage
+#[derive(PartialEq)]
+#[repr(C)]
+pub enum OptStage {
+ PreLinkNoLTO,
+ PreLinkThinLTO,
+ PreLinkFatLTO,
+ ThinLTO,
+ FatLTO,
+}
+
+/// LLVMRustSanitizerOptions
+#[repr(C)]
+pub struct SanitizerOptions {
+ pub sanitize_address: bool,
+ pub sanitize_address_recover: bool,
+ pub sanitize_memory: bool,
+ pub sanitize_memory_recover: bool,
+ pub sanitize_memory_track_origins: c_int,
+ pub sanitize_thread: bool,
+ pub sanitize_hwaddress: bool,
+ pub sanitize_hwaddress_recover: bool,
+}
+
+/// LLVMRelocMode
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum RelocModel {
+ Static,
+ PIC,
+ DynamicNoPic,
+ ROPI,
+ RWPI,
+ ROPI_RWPI,
+}
+
+/// LLVMRustCodeModel
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum CodeModel {
+ Tiny,
+ Small,
+ Kernel,
+ Medium,
+ Large,
+ None,
+}
+
+/// LLVMRustDiagnosticKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticKind {
+ Other,
+ InlineAsm,
+ StackSize,
+ DebugMetadataVersion,
+ SampleProfile,
+ OptimizationRemark,
+ OptimizationRemarkMissed,
+ OptimizationRemarkAnalysis,
+ OptimizationRemarkAnalysisFPCommute,
+ OptimizationRemarkAnalysisAliasing,
+ OptimizationRemarkOther,
+ OptimizationFailure,
+ PGOProfile,
+ Linker,
+ Unsupported,
+ SrcMgr,
+}
+
+/// LLVMRustDiagnosticLevel
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticLevel {
+ Error,
+ Warning,
+ Note,
+ Remark,
+}
+
+/// LLVMRustArchiveKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ArchiveKind {
+ K_GNU,
+ K_BSD,
+ K_DARWIN,
+ K_COFF,
+}
+
+// LLVMRustThinLTOData
+extern "C" {
+ pub type ThinLTOData;
+}
+
+// LLVMRustThinLTOBuffer
+extern "C" {
+ pub type ThinLTOBuffer;
+}
+
+/// LLVMRustThinLTOModule
+#[repr(C)]
+pub struct ThinLTOModule {
+ pub identifier: *const c_char,
+ pub data: *const u8,
+ pub len: usize,
+}
+
+/// LLVMThreadLocalMode
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ThreadLocalMode {
+ NotThreadLocal,
+ GeneralDynamic,
+ LocalDynamic,
+ InitialExec,
+ LocalExec,
+}
+
+/// LLVMRustChecksumKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ChecksumKind {
+ None,
+ MD5,
+ SHA1,
+ SHA256,
+}
+
+extern "C" {
+ type Opaque;
+}
+#[repr(C)]
+struct InvariantOpaque<'a> {
+ _marker: PhantomData<&'a mut &'a ()>,
+ _opaque: Opaque,
+}
+
+// Opaque pointer types
+extern "C" {
+ pub type Module;
+}
+extern "C" {
+ pub type Context;
+}
+extern "C" {
+ pub type Type;
+}
+extern "C" {
+ pub type Value;
+}
+extern "C" {
+ pub type ConstantInt;
+}
+extern "C" {
+ pub type Attribute;
+}
+extern "C" {
+ pub type Metadata;
+}
+extern "C" {
+ pub type BasicBlock;
+}
+#[repr(C)]
+pub struct Builder<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct PassManager<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type PassManagerBuilder;
+}
+extern "C" {
+ pub type Pass;
+}
+extern "C" {
+ pub type TargetMachine;
+}
+extern "C" {
+ pub type Archive;
+}
+#[repr(C)]
+pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type Twine;
+}
+extern "C" {
+ pub type DiagnosticInfo;
+}
+extern "C" {
+ pub type SMDiagnostic;
+}
+#[repr(C)]
+pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct Linker<'a>(InvariantOpaque<'a>);
+
+extern "C" {
+ pub type DiagnosticHandler;
+}
+
+pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
+pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
+
+pub mod coverageinfo {
+ use super::coverage_map;
+
+ /// Aligns with [llvm::coverage::CounterMappingRegion::RegionKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L209-L230)
+ #[derive(Copy, Clone, Debug)]
+ #[repr(C)]
+ pub enum RegionKind {
+ /// A CodeRegion associates some code with a counter
+ CodeRegion = 0,
+
+ /// An ExpansionRegion represents a file expansion region that associates
+ /// a source range with the expansion of a virtual source file, such as
+ /// for a macro instantiation or #include file.
+ ExpansionRegion = 1,
+
+ /// A SkippedRegion represents a source range with code that was skipped
+ /// by a preprocessor or similar means.
+ SkippedRegion = 2,
+
+ /// A GapRegion is like a CodeRegion, but its count is only set as the
+ /// line execution count when its the only region in the line.
+ GapRegion = 3,
+
+ /// A BranchRegion represents leaf-level boolean expressions and is
+ /// associated with two counters, each representing the number of times the
+ /// expression evaluates to true or false.
+ BranchRegion = 4,
+ }
+
+ /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+ /// coverage map, in accordance with the
+ /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+ /// The struct composes fields representing the `Counter` type and value(s) (injected counter
+ /// ID, or expression type and operands), the source file (an indirect index into a "filenames
+ /// array", encoded separately), and source location (start and end positions of the represented
+ /// code region).
+ ///
+ /// Matches LLVMRustCounterMappingRegion.
+ #[derive(Copy, Clone, Debug)]
+ #[repr(C)]
+ pub struct CounterMappingRegion {
+ /// The counter type and type-dependent counter data, if any.
+ counter: coverage_map::Counter,
+
+ /// If the `RegionKind` is a `BranchRegion`, this represents the counter
+ /// for the false branch of the region.
+ false_counter: coverage_map::Counter,
+
+ /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+ /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+ /// that, in turn, are used to look up the filename for this region.
+ file_id: u32,
+
+ /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+ /// the mapping regions created as a result of macro expansion, by checking if their file id
+ /// matches the expanded file id.
+ expanded_file_id: u32,
+
+ /// 1-based starting line of the mapping region.
+ start_line: u32,
+
+ /// 1-based starting column of the mapping region.
+ start_col: u32,
+
+ /// 1-based ending line of the mapping region.
+ end_line: u32,
+
+ /// 1-based ending column of the mapping region. If the high bit is set, the current
+ /// mapping region is a gap area.
+ end_col: u32,
+
+ kind: RegionKind,
+ }
+
+ impl CounterMappingRegion {
+ pub(crate) fn code_region(
+ counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::CodeRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn branch_region(
+ counter: coverage_map::Counter,
+ false_counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter,
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::BranchRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn expansion_region(
+ file_id: u32,
+ expanded_file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: coverage_map::Counter::zero(),
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::ExpansionRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn skipped_region(
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: coverage_map::Counter::zero(),
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::SkippedRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn gap_region(
+ counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col: (1_u32 << 31) | end_col,
+ kind: RegionKind::GapRegion,
+ }
+ }
+ }
+}
+
+pub mod debuginfo {
+ use super::{InvariantOpaque, Metadata};
+ use bitflags::bitflags;
+
+ #[repr(C)]
+ pub struct DIBuilder<'a>(InvariantOpaque<'a>);
+
+ pub type DIDescriptor = Metadata;
+ pub type DILocation = Metadata;
+ pub type DIScope = DIDescriptor;
+ pub type DIFile = DIScope;
+ pub type DILexicalBlock = DIScope;
+ pub type DISubprogram = DIScope;
+ pub type DINameSpace = DIScope;
+ pub type DIType = DIDescriptor;
+ pub type DIBasicType = DIType;
+ pub type DIDerivedType = DIType;
+ pub type DICompositeType = DIDerivedType;
+ pub type DIVariable = DIDescriptor;
+ pub type DIGlobalVariableExpression = DIDescriptor;
+ pub type DIArray = DIDescriptor;
+ pub type DISubrange = DIDescriptor;
+ pub type DIEnumerator = DIDescriptor;
+ pub type DITemplateTypeParameter = DIDescriptor;
+
+ // These values **must** match with LLVMRustDIFlags!!
+ bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct DIFlags: u32 {
+ const FlagZero = 0;
+ const FlagPrivate = 1;
+ const FlagProtected = 2;
+ const FlagPublic = 3;
+ const FlagFwdDecl = (1 << 2);
+ const FlagAppleBlock = (1 << 3);
+ const FlagBlockByrefStruct = (1 << 4);
+ const FlagVirtual = (1 << 5);
+ const FlagArtificial = (1 << 6);
+ const FlagExplicit = (1 << 7);
+ const FlagPrototyped = (1 << 8);
+ const FlagObjcClassComplete = (1 << 9);
+ const FlagObjectPointer = (1 << 10);
+ const FlagVector = (1 << 11);
+ const FlagStaticMember = (1 << 12);
+ const FlagLValueReference = (1 << 13);
+ const FlagRValueReference = (1 << 14);
+ const FlagExternalTypeRef = (1 << 15);
+ const FlagIntroducedVirtual = (1 << 18);
+ const FlagBitField = (1 << 19);
+ const FlagNoReturn = (1 << 20);
+ }
+ }
+
+ // These values **must** match with LLVMRustDISPFlags!!
+ bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct DISPFlags: u32 {
+ const SPFlagZero = 0;
+ const SPFlagVirtual = 1;
+ const SPFlagPureVirtual = 2;
+ const SPFlagLocalToUnit = (1 << 2);
+ const SPFlagDefinition = (1 << 3);
+ const SPFlagOptimized = (1 << 4);
+ const SPFlagMainSubprogram = (1 << 5);
+ }
+ }
+
+ /// LLVMRustDebugEmissionKind
+ #[derive(Copy, Clone)]
+ #[repr(C)]
+ pub enum DebugEmissionKind {
+ NoDebug,
+ FullDebug,
+ LineTablesOnly,
+ }
+
+ impl DebugEmissionKind {
+ pub fn from_generic(kind: rustc_session::config::DebugInfo) -> Self {
+ use rustc_session::config::DebugInfo;
+ match kind {
+ DebugInfo::None => DebugEmissionKind::NoDebug,
+ DebugInfo::Limited => DebugEmissionKind::LineTablesOnly,
+ DebugInfo::Full => DebugEmissionKind::FullDebug,
+ }
+ }
+ }
+}
+
+use bitflags::bitflags;
+// These values **must** match with LLVMRustAllocKindFlags
+bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct AllocKindFlags : u64 {
+ const Unknown = 0;
+ const Alloc = 1;
+ const Realloc = 1 << 1;
+ const Free = 1 << 2;
+ const Uninitialized = 1 << 3;
+ const Zeroed = 1 << 4;
+ const Aligned = 1 << 5;
+ }
+}
+
+extern "C" {
+ pub type ModuleBuffer;
+}
+
+pub type SelfProfileBeforePassCallback =
+ unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
+pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void);
+
+extern "C" {
+ pub fn LLVMRustInstallFatalErrorHandler();
+ pub fn LLVMRustDisableSystemDialogsOnCrash();
+
+ // Create and destroy contexts.
+ pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
+ pub fn LLVMContextDispose(C: &'static mut Context);
+ pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
+
+ // Create modules.
+ pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
+ pub fn LLVMGetModuleContext(M: &Module) -> &Context;
+ pub fn LLVMCloneModule(M: &Module) -> &Module;
+
+ /// Data layout. See Module::getDataLayout.
+ pub fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char;
+ pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
+
+ /// See Module::setModuleInlineAsm.
+ pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char, AsmLen: size_t);
+
+ /// See llvm::LLVMTypeKind::getTypeID.
+ pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind;
+
+ // Operations on integer types
+ pub fn LLVMInt1TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt8TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt16TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt32TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt64TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type;
+
+ pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint;
+
+ // Operations on real types
+ pub fn LLVMFloatTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type;
+
+ // Operations on function types
+ pub fn LLVMFunctionType<'a>(
+ ReturnType: &'a Type,
+ ParamTypes: *const &'a Type,
+ ParamCount: c_uint,
+ IsVarArg: Bool,
+ ) -> &'a Type;
+ pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint;
+ pub fn LLVMGetParamTypes<'a>(FunctionTy: &'a Type, Dest: *mut &'a Type);
+
+ // Operations on struct types
+ pub fn LLVMStructTypeInContext<'a>(
+ C: &'a Context,
+ ElementTypes: *const &'a Type,
+ ElementCount: c_uint,
+ Packed: Bool,
+ ) -> &'a Type;
+
+ // Operations on array, pointer, and vector types (sequence types)
+ pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
+ pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
+ pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
+
+ pub fn LLVMGetElementType(Ty: &Type) -> &Type;
+ pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
+
+ // Operations on other types
+ pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type;
+
+ // Operations on all values
+ pub fn LLVMTypeOf(Val: &Value) -> &Type;
+ pub fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char;
+ pub fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t);
+ pub fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value);
+ pub fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value);
+ pub fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+ pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+ pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
+
+ // Operations on constants of any type
+ pub fn LLVMConstNull(Ty: &Type) -> &Value;
+ pub fn LLVMGetUndef(Ty: &Type) -> &Value;
+
+ // Operations on metadata
+ pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
+ pub fn LLVMMDNodeInContext<'a>(
+ C: &'a Context,
+ Vals: *const &'a Value,
+ Count: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMMDNodeInContext2<'a>(
+ C: &'a Context,
+ Vals: *const &'a Metadata,
+ Count: size_t,
+ ) -> &'a Metadata;
+ pub fn LLVMAddNamedMetadataOperand<'a>(M: &'a Module, Name: *const c_char, Val: &'a Value);
+
+ // Operations on scalar constants
+ pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
+ pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
+ pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value;
+ pub fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong;
+ pub fn LLVMRustConstInt128Get(
+ ConstantVal: &ConstantInt,
+ SExt: bool,
+ high: &mut u64,
+ low: &mut u64,
+ ) -> bool;
+
+ // Operations on composite constants
+ pub fn LLVMConstStringInContext(
+ C: &Context,
+ Str: *const c_char,
+ Length: c_uint,
+ DontNullTerminate: Bool,
+ ) -> &Value;
+ pub fn LLVMConstStructInContext<'a>(
+ C: &'a Context,
+ ConstantVals: *const &'a Value,
+ Count: c_uint,
+ Packed: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMConstArray<'a>(
+ ElementTy: &'a Type,
+ ConstantVals: *const &'a Value,
+ Length: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
+
+ // Constant expressions
+ pub fn LLVMRustConstInBoundsGEP2<'a>(
+ ty: &'a Type,
+ ConstantVal: &'a Value,
+ ConstantIndices: *const &'a Value,
+ NumIndices: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMConstZExt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstPointerCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMGetAggregateElement(ConstantVal: &Value, Idx: c_uint) -> Option<&Value>;
+
+ // Operations on global variables, functions, and aliases (globals)
+ pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
+ pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
+ pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
+ pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
+ pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
+ pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
+ pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool);
+ pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
+ pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
+ pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
+
+ // Operations on global variables
+ pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
+ pub fn LLVMRustGetOrInsertGlobal<'a>(
+ M: &'a Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ T: &'a Type,
+ ) -> &'a Value;
+ pub fn LLVMRustInsertPrivateGlobal<'a>(M: &'a Module, T: &'a Type) -> &'a Value;
+ pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
+ pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMDeleteGlobal(GlobalVar: &Value);
+ pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMSetInitializer<'a>(GlobalVar: &'a Value, ConstantVal: &'a Value);
+ pub fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool;
+ pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode);
+ pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool;
+ pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool);
+ pub fn LLVMRustGetNamedValue(
+ M: &Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ ) -> Option<&Value>;
+ pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+
+ // Operations on attributes
+ pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
+ pub fn LLVMCreateStringAttribute(
+ C: &Context,
+ Name: *const c_char,
+ NameLen: c_uint,
+ Value: *const c_char,
+ ValueLen: c_uint,
+ ) -> &Attribute;
+ pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute;
+ pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute;
+ pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute;
+ pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+ pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+ pub fn LLVMRustCreateElementTypeAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+ pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute;
+ pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute;
+ pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute;
+
+ // Operations on functions
+ pub fn LLVMRustGetOrInsertFunction<'a>(
+ M: &'a Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ FunctionTy: &'a Type,
+ ) -> &'a Value;
+ pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
+ pub fn LLVMRustAddFunctionAttributes<'a>(
+ Fn: &'a Value,
+ index: c_uint,
+ Attrs: *const &'a Attribute,
+ AttrsLen: size_t,
+ );
+
+ // Operations on parameters
+ pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
+ pub fn LLVMCountParams(Fn: &Value) -> c_uint;
+ pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value;
+
+ // Operations on basic blocks
+ pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value;
+ pub fn LLVMAppendBasicBlockInContext<'a>(
+ C: &'a Context,
+ Fn: &'a Value,
+ Name: *const c_char,
+ ) -> &'a BasicBlock;
+
+ // Operations on instructions
+ pub fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
+ pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
+
+ // Operations on call sites
+ pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
+ pub fn LLVMRustAddCallSiteAttributes<'a>(
+ Instr: &'a Value,
+ index: c_uint,
+ Attrs: *const &'a Attribute,
+ AttrsLen: size_t,
+ );
+
+ // Operations on load/store instructions (only)
+ pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
+
+ // Operations on phi nodes
+ pub fn LLVMAddIncoming<'a>(
+ PhiNode: &'a Value,
+ IncomingValues: *const &'a Value,
+ IncomingBlocks: *const &'a BasicBlock,
+ Count: c_uint,
+ );
+
+ // Instruction builders
+ pub fn LLVMCreateBuilderInContext(C: &Context) -> &mut Builder<'_>;
+ pub fn LLVMPositionBuilderAtEnd<'a>(Builder: &Builder<'a>, Block: &'a BasicBlock);
+ pub fn LLVMGetInsertBlock<'a>(Builder: &Builder<'a>) -> &'a BasicBlock;
+ pub fn LLVMDisposeBuilder<'a>(Builder: &'a mut Builder<'a>);
+
+ // Metadata
+ pub fn LLVMSetCurrentDebugLocation<'a>(Builder: &Builder<'a>, L: &'a Value);
+
+ // Terminators
+ pub fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value;
+ pub fn LLVMBuildRet<'a>(B: &Builder<'a>, V: &'a Value) -> &'a Value;
+ pub fn LLVMBuildBr<'a>(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value;
+ pub fn LLVMBuildCondBr<'a>(
+ B: &Builder<'a>,
+ If: &'a Value,
+ Then: &'a BasicBlock,
+ Else: &'a BasicBlock,
+ ) -> &'a Value;
+ pub fn LLVMBuildSwitch<'a>(
+ B: &Builder<'a>,
+ V: &'a Value,
+ Else: &'a BasicBlock,
+ NumCases: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildInvoke<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Fn: &'a Value,
+ Args: *const &'a Value,
+ NumArgs: c_uint,
+ Then: &'a BasicBlock,
+ Catch: &'a BasicBlock,
+ Bundle: Option<&OperandBundleDef<'a>>,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLandingPad<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ PersFn: Option<&'a Value>,
+ NumClauses: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildResume<'a>(B: &Builder<'a>, Exn: &'a Value) -> &'a Value;
+ pub fn LLVMBuildUnreachable<'a>(B: &Builder<'a>) -> &'a Value;
+
+ pub fn LLVMRustBuildCleanupPad<'a>(
+ B: &Builder<'a>,
+ ParentPad: Option<&'a Value>,
+ ArgCnt: c_uint,
+ Args: *const &'a Value,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCleanupRet<'a>(
+ B: &Builder<'a>,
+ CleanupPad: &'a Value,
+ UnwindBB: Option<&'a BasicBlock>,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchPad<'a>(
+ B: &Builder<'a>,
+ ParentPad: &'a Value,
+ ArgCnt: c_uint,
+ Args: *const &'a Value,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchRet<'a>(
+ B: &Builder<'a>,
+ Pad: &'a Value,
+ BB: &'a BasicBlock,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchSwitch<'a>(
+ Builder: &Builder<'a>,
+ ParentPad: Option<&'a Value>,
+ BB: Option<&'a BasicBlock>,
+ NumHandlers: c_uint,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustAddHandler<'a>(CatchSwitch: &'a Value, Handler: &'a BasicBlock);
+ pub fn LLVMSetPersonalityFn<'a>(Func: &'a Value, Pers: &'a Value);
+
+ // Add a case to the switch instruction
+ pub fn LLVMAddCase<'a>(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock);
+
+ // Add a clause to the landing pad instruction
+ pub fn LLVMAddClause<'a>(LandingPad: &'a Value, ClauseVal: &'a Value);
+
+ // Set the cleanup on a landing pad instruction
+ pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool);
+
+ // Arithmetic
+ pub fn LLVMBuildAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildUDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExactUDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExactSDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildURem<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSRem<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFRem<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildShl<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLShr<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildAShr<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildAnd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildOr<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildXor<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildFNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildNot<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMRustSetFastMath(Instr: &Value);
+
+ // Memory
+ pub fn LLVMBuildAlloca<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildArrayAlloca<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Val: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLoad2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ PointerVal: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ pub fn LLVMBuildStore<'a>(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
+
+ pub fn LLVMBuildGEP2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Pointer: &'a Value,
+ Indices: *const &'a Value,
+ NumIndices: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInBoundsGEP2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Pointer: &'a Value,
+ Indices: *const &'a Value,
+ NumIndices: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildStructGEP2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Pointer: &'a Value,
+ Idx: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ // Casts
+ pub fn LLVMBuildTrunc<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildZExt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSExt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPToUI<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPToSI<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildUIToFP<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSIToFP<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPTrunc<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPExt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildPtrToInt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildIntToPtr<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildBitCast<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildPointerCast<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildIntCast<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ IsSigned: bool,
+ ) -> &'a Value;
+
+ // Comparisons
+ pub fn LLVMBuildICmp<'a>(
+ B: &Builder<'a>,
+ Op: c_uint,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFCmp<'a>(
+ B: &Builder<'a>,
+ Op: c_uint,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ // Miscellaneous instructions
+ pub fn LLVMBuildPhi<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value;
+ pub fn LLVMRustBuildCall<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Fn: &'a Value,
+ Args: *const &'a Value,
+ NumArgs: c_uint,
+ Bundle: Option<&OperandBundleDef<'a>>,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemCpy<'a>(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Src: &'a Value,
+ SrcAlign: c_uint,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemMove<'a>(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Src: &'a Value,
+ SrcAlign: c_uint,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemSet<'a>(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Val: &'a Value,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMBuildSelect<'a>(
+ B: &Builder<'a>,
+ If: &'a Value,
+ Then: &'a Value,
+ Else: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildVAArg<'a>(
+ B: &Builder<'a>,
+ list: &'a Value,
+ Ty: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExtractElement<'a>(
+ B: &Builder<'a>,
+ VecVal: &'a Value,
+ Index: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInsertElement<'a>(
+ B: &Builder<'a>,
+ VecVal: &'a Value,
+ EltVal: &'a Value,
+ Index: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildShuffleVector<'a>(
+ B: &Builder<'a>,
+ V1: &'a Value,
+ V2: &'a Value,
+ Mask: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExtractValue<'a>(
+ B: &Builder<'a>,
+ AggVal: &'a Value,
+ Index: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInsertValue<'a>(
+ B: &Builder<'a>,
+ AggVal: &'a Value,
+ EltVal: &'a Value,
+ Index: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildVectorReduceFAdd<'a>(
+ B: &Builder<'a>,
+ Acc: &'a Value,
+ Src: &'a Value,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMul<'a>(
+ B: &Builder<'a>,
+ Acc: &'a Value,
+ Src: &'a Value,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceAdd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMul<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceAnd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceOr<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceXor<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMin<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsSigned: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMax<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsSigned: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMin<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsNaN: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMax<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsNaN: bool,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildMinNum<'a>(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildMaxNum<'a>(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+
+ // Atomic Operations
+ pub fn LLVMRustBuildAtomicLoad<'a>(
+ B: &Builder<'a>,
+ ElementType: &'a Type,
+ PointerVal: &'a Value,
+ Name: *const c_char,
+ Order: AtomicOrdering,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicStore<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ Ptr: &'a Value,
+ Order: AtomicOrdering,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicCmpXchg<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ CMP: &'a Value,
+ RHS: &'a Value,
+ Order: AtomicOrdering,
+ FailureOrder: AtomicOrdering,
+ Weak: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMBuildAtomicRMW<'a>(
+ B: &Builder<'a>,
+ Op: AtomicRmwBinOp,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Order: AtomicOrdering,
+ SingleThreaded: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicFence(
+ B: &Builder<'_>,
+ Order: AtomicOrdering,
+ Scope: SynchronizationScope,
+ );
+
+ /// Writes a module to the specified path. Returns 0 on success.
+ pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
+
+ /// Creates a pass manager.
+ pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>;
+
+ /// Creates a function-by-function pass manager
+ pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> &mut PassManager<'_>;
+
+ /// Disposes a pass manager.
+ pub fn LLVMDisposePassManager<'a>(PM: &'a mut PassManager<'a>);
+
+ /// Runs a pass manager on a module.
+ pub fn LLVMRunPassManager<'a>(PM: &PassManager<'a>, M: &'a Module) -> Bool;
+
+ pub fn LLVMInitializePasses();
+
+ pub fn LLVMTimeTraceProfilerInitialize();
+
+ pub fn LLVMTimeTraceProfilerFinishThread();
+
+ pub fn LLVMTimeTraceProfilerFinish(FileName: *const c_char);
+
+ pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>);
+
+ pub fn LLVMRustPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
+ pub fn LLVMRustPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
+ pub fn LLVMRustPassManagerBuilderUseInlinerWithThreshold(
+ PMB: &PassManagerBuilder,
+ threshold: c_uint,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateModulePassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+
+ pub fn LLVMRustPassManagerBuilderPopulateFunctionPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateLTOPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ Internalize: Bool,
+ RunInliner: Bool,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+
+ pub fn LLVMGetHostCPUFeatures() -> *mut c_char;
+
+ pub fn LLVMDisposeMessage(message: *mut c_char);
+
+ pub fn LLVMIsMultithreaded() -> Bool;
+
+ /// Returns a string describing the last error caused by an LLVMRust* call.
+ pub fn LLVMRustGetLastError() -> *const c_char;
+
+ /// Print the pass timings since static dtors aren't picking them up.
+ pub fn LLVMRustPrintPassTimings();
+
+ pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
+
+ pub fn LLVMStructSetBody<'a>(
+ StructTy: &'a Type,
+ ElementTypes: *const &'a Type,
+ ElementCount: c_uint,
+ Packed: Bool,
+ );
+
+ /// Prepares inline assembly.
+ pub fn LLVMRustInlineAsm(
+ Ty: &Type,
+ AsmString: *const c_char,
+ AsmStringLen: size_t,
+ Constraints: *const c_char,
+ ConstraintsLen: size_t,
+ SideEffects: Bool,
+ AlignStack: Bool,
+ Dialect: AsmDialect,
+ CanThrow: Bool,
+ ) -> &Value;
+ pub fn LLVMRustInlineAsmVerify(
+ Ty: &Type,
+ Constraints: *const c_char,
+ ConstraintsLen: size_t,
+ ) -> bool;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ Filenames: *const *const c_char,
+ FilenamesLen: size_t,
+ BufferOut: &RustString,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMappingToBuffer(
+ VirtualFileMappingIDs: *const c_uint,
+ NumVirtualFileMappingIDs: c_uint,
+ Expressions: *const coverage_map::CounterExpression,
+ NumExpressions: c_uint,
+ MappingRegions: *const coverageinfo::CounterMappingRegion,
+ NumMappingRegions: c_uint,
+ BufferOut: &RustString,
+ );
+
+ pub fn LLVMRustCoverageCreatePGOFuncNameVar(F: &Value, FuncName: *const c_char) -> &Value;
+ pub fn LLVMRustCoverageHashCString(StrVal: *const c_char) -> u64;
+ pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
+
+ pub fn LLVMRustCoverageMappingVersion() -> u32;
+ pub fn LLVMRustDebugMetadataVersion() -> u32;
+ pub fn LLVMRustVersionMajor() -> u32;
+ pub fn LLVMRustVersionMinor() -> u32;
+ pub fn LLVMRustVersionPatch() -> u32;
+
+ /// Add LLVM module flags.
+ ///
+ /// In order for Rust-C LTO to work, module flags must be compatible with Clang. What
+ /// "compatible" means depends on the merge behaviors involved.
+ pub fn LLVMRustAddModuleFlag(
+ M: &Module,
+ merge_behavior: LLVMModFlagBehavior,
+ name: *const c_char,
+ value: u32,
+ );
+ pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool;
+
+ pub fn LLVMRustMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value;
+
+ pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>;
+
+ pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>);
+
+ pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder<'_>);
+
+ pub fn LLVMRustDIBuilderCreateCompileUnit<'a>(
+ Builder: &DIBuilder<'a>,
+ Lang: c_uint,
+ File: &'a DIFile,
+ Producer: *const c_char,
+ ProducerLen: size_t,
+ isOptimized: bool,
+ Flags: *const c_char,
+ RuntimeVer: c_uint,
+ SplitName: *const c_char,
+ SplitNameLen: size_t,
+ kind: DebugEmissionKind,
+ DWOId: u64,
+ SplitDebugInlining: bool,
+ ) -> &'a DIDescriptor;
+
+ pub fn LLVMRustDIBuilderCreateFile<'a>(
+ Builder: &DIBuilder<'a>,
+ Filename: *const c_char,
+ FilenameLen: size_t,
+ Directory: *const c_char,
+ DirectoryLen: size_t,
+ CSKind: ChecksumKind,
+ Checksum: *const c_char,
+ ChecksumLen: size_t,
+ ) -> &'a DIFile;
+
+ pub fn LLVMRustDIBuilderCreateSubroutineType<'a>(
+ Builder: &DIBuilder<'a>,
+ ParameterTypes: &'a DIArray,
+ ) -> &'a DICompositeType;
+
+ pub fn LLVMRustDIBuilderCreateFunction<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ LinkageName: *const c_char,
+ LinkageNameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ ScopeLine: c_uint,
+ Flags: DIFlags,
+ SPFlags: DISPFlags,
+ MaybeFn: Option<&'a Value>,
+ TParam: &'a DIArray,
+ Decl: Option<&'a DIDescriptor>,
+ ) -> &'a DISubprogram;
+
+ pub fn LLVMRustDIBuilderCreateBasicType<'a>(
+ Builder: &DIBuilder<'a>,
+ Name: *const c_char,
+ NameLen: size_t,
+ SizeInBits: u64,
+ Encoding: c_uint,
+ ) -> &'a DIBasicType;
+
+ pub fn LLVMRustDIBuilderCreateTypedef<'a>(
+ Builder: &DIBuilder<'a>,
+ Type: &'a DIBasicType,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Scope: Option<&'a DIScope>,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreatePointerType<'a>(
+ Builder: &DIBuilder<'a>,
+ PointeeTy: &'a DIType,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ AddressSpace: c_uint,
+ Name: *const c_char,
+ NameLen: size_t,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreateStructType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIDescriptor>,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ DerivedFrom: Option<&'a DIType>,
+ Elements: &'a DIArray,
+ RunTimeLang: c_uint,
+ VTableHolder: Option<&'a DIType>,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DICompositeType;
+
+ pub fn LLVMRustDIBuilderCreateMemberType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Flags: DIFlags,
+ Ty: &'a DIType,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreateVariantMemberType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Discriminant: Option<&'a Value>,
+ Flags: DIFlags,
+ Ty: &'a DIType,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateLexicalBlock<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ File: &'a DIFile,
+ Line: c_uint,
+ Col: c_uint,
+ ) -> &'a DILexicalBlock;
+
+ pub fn LLVMRustDIBuilderCreateLexicalBlockFile<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ File: &'a DIFile,
+ ) -> &'a DILexicalBlock;
+
+ pub fn LLVMRustDIBuilderCreateStaticVariable<'a>(
+ Builder: &DIBuilder<'a>,
+ Context: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ LinkageName: *const c_char,
+ LinkageNameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ isLocalToUnit: bool,
+ Val: &'a Value,
+ Decl: Option<&'a DIDescriptor>,
+ AlignInBits: u32,
+ ) -> &'a DIGlobalVariableExpression;
+
+ pub fn LLVMRustDIBuilderCreateVariable<'a>(
+ Builder: &DIBuilder<'a>,
+ Tag: c_uint,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ AlwaysPreserve: bool,
+ Flags: DIFlags,
+ ArgNo: c_uint,
+ AlignInBits: u32,
+ ) -> &'a DIVariable;
+
+ pub fn LLVMRustDIBuilderCreateArrayType<'a>(
+ Builder: &DIBuilder<'a>,
+ Size: u64,
+ AlignInBits: u32,
+ Ty: &'a DIType,
+ Subscripts: &'a DIArray,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderGetOrCreateSubrange<'a>(
+ Builder: &DIBuilder<'a>,
+ Lo: i64,
+ Count: i64,
+ ) -> &'a DISubrange;
+
+ pub fn LLVMRustDIBuilderGetOrCreateArray<'a>(
+ Builder: &DIBuilder<'a>,
+ Ptr: *const Option<&'a DIDescriptor>,
+ Count: c_uint,
+ ) -> &'a DIArray;
+
+ pub fn LLVMRustDIBuilderInsertDeclareAtEnd<'a>(
+ Builder: &DIBuilder<'a>,
+ Val: &'a Value,
+ VarInfo: &'a DIVariable,
+ AddrOps: *const u64,
+ AddrOpsCount: c_uint,
+ DL: &'a DILocation,
+ InsertAtEnd: &'a BasicBlock,
+ ) -> &'a Value;
+
+ pub fn LLVMRustDIBuilderCreateEnumerator<'a>(
+ Builder: &DIBuilder<'a>,
+ Name: *const c_char,
+ NameLen: size_t,
+ Value: i64,
+ IsUnsigned: bool,
+ ) -> &'a DIEnumerator;
+
+ pub fn LLVMRustDIBuilderCreateEnumerationType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Elements: &'a DIArray,
+ ClassType: &'a DIType,
+ IsScoped: bool,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateUnionType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Elements: Option<&'a DIArray>,
+ RunTimeLang: c_uint,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateVariantPart<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Discriminator: Option<&'a DIDerivedType>,
+ Elements: &'a DIArray,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr);
+
+ pub fn LLVMRustDIBuilderCreateTemplateTypeParameter<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ Ty: &'a DIType,
+ ) -> &'a DITemplateTypeParameter;
+
+ pub fn LLVMRustDIBuilderCreateNameSpace<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ ExportSymbols: bool,
+ ) -> &'a DINameSpace;
+
+ pub fn LLVMRustDICompositeTypeReplaceArrays<'a>(
+ Builder: &DIBuilder<'a>,
+ CompositeType: &'a DIType,
+ Elements: Option<&'a DIArray>,
+ Params: Option<&'a DIArray>,
+ );
+
+ pub fn LLVMRustDIBuilderCreateDebugLocation<'a>(
+ Line: c_uint,
+ Column: c_uint,
+ Scope: &'a DIScope,
+ InlinedAt: Option<&'a DILocation>,
+ ) -> &'a DILocation;
+ pub fn LLVMRustDIBuilderCreateOpDeref() -> u64;
+ pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> u64;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString);
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString);
+
+ pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
+
+ pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>;
+ pub fn LLVMRustCreateAddressSanitizerFunctionPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustCreateModuleAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustCreateMemorySanitizerPass(
+ TrackOrigins: c_int,
+ Recover: bool,
+ ) -> &'static mut Pass;
+ pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass;
+ pub fn LLVMRustCreateHWAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass);
+ pub fn LLVMRustAddLastExtensionPasses(
+ PMB: &PassManagerBuilder,
+ Passes: *const &'static mut Pass,
+ NumPasses: size_t,
+ );
+
+ pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
+
+ pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
+ pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
+ pub fn LLVMRustGetTargetFeature(
+ T: &TargetMachine,
+ Index: size_t,
+ Feature: &mut *const c_char,
+ Desc: &mut *const c_char,
+ );
+
+ pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+ pub fn LLVMRustCreateTargetMachine(
+ Triple: *const c_char,
+ CPU: *const c_char,
+ Features: *const c_char,
+ Abi: *const c_char,
+ Model: CodeModel,
+ Reloc: RelocModel,
+ Level: CodeGenOptLevel,
+ UseSoftFP: bool,
+ FunctionSections: bool,
+ DataSections: bool,
+ UniqueSectionNames: bool,
+ TrapUnreachable: bool,
+ Singlethread: bool,
+ AsmComments: bool,
+ EmitStackSizeSection: bool,
+ RelaxELFRelocations: bool,
+ UseInitArray: bool,
+ SplitDwarfFile: *const c_char,
+ ) -> Option<&'static mut TargetMachine>;
+ pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+ pub fn LLVMRustAddBuilderLibraryInfo<'a>(
+ PMB: &'a PassManagerBuilder,
+ M: &'a Module,
+ DisableSimplifyLibCalls: bool,
+ );
+ pub fn LLVMRustConfigurePassManagerBuilder(
+ PMB: &PassManagerBuilder,
+ OptLevel: CodeGenOptLevel,
+ MergeFunctions: bool,
+ SLPVectorize: bool,
+ LoopVectorize: bool,
+ PrepareForThinLTO: bool,
+ PGOGenPath: *const c_char,
+ PGOUsePath: *const c_char,
+ PGOSampleUsePath: *const c_char,
+ SizeLevel: c_int,
+ );
+ pub fn LLVMRustAddLibraryInfo<'a>(
+ PM: &PassManager<'a>,
+ M: &'a Module,
+ DisableSimplifyLibCalls: bool,
+ );
+ pub fn LLVMRustRunFunctionPassManager<'a>(PM: &PassManager<'a>, M: &'a Module);
+ pub fn LLVMRustWriteOutputFile<'a>(
+ T: &'a TargetMachine,
+ PM: &PassManager<'a>,
+ M: &'a Module,
+ Output: *const c_char,
+ DwoOutput: *const c_char,
+ FileType: FileType,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustOptimizeWithNewPassManager<'a>(
+ M: &'a Module,
+ TM: &'a TargetMachine,
+ OptLevel: PassBuilderOptLevel,
+ OptStage: OptStage,
+ NoPrepopulatePasses: bool,
+ VerifyIR: bool,
+ UseThinLTOBuffers: bool,
+ MergeFunctions: bool,
+ UnrollLoops: bool,
+ SLPVectorize: bool,
+ LoopVectorize: bool,
+ DisableSimplifyLibCalls: bool,
+ EmitLifetimeMarkers: bool,
+ SanitizerOptions: Option<&SanitizerOptions>,
+ PGOGenPath: *const c_char,
+ PGOUsePath: *const c_char,
+ InstrumentCoverage: bool,
+ InstrumentGCOV: bool,
+ PGOSampleUsePath: *const c_char,
+ DebugInfoForProfiling: bool,
+ llvm_selfprofiler: *mut c_void,
+ begin_callback: SelfProfileBeforePassCallback,
+ end_callback: SelfProfileAfterPassCallback,
+ ExtraPasses: *const c_char,
+ ExtraPassesLen: size_t,
+ LLVMPlugins: *const c_char,
+ LLVMPluginsLen: size_t,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustPrintModule(
+ M: &Module,
+ Output: *const c_char,
+ Demangle: extern "C" fn(*const c_char, size_t, *mut c_char, size_t) -> size_t,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
+ pub fn LLVMRustPrintPasses();
+ pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
+ pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool);
+ pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
+
+ pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>;
+ pub fn LLVMRustArchiveIteratorNew(AR: &Archive) -> &mut ArchiveIterator<'_>;
+ pub fn LLVMRustArchiveIteratorNext<'a>(
+ AIR: &ArchiveIterator<'a>,
+ ) -> Option<&'a mut ArchiveChild<'a>>;
+ pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildFree<'a>(ACR: &'a mut ArchiveChild<'a>);
+ pub fn LLVMRustArchiveIteratorFree<'a>(AIR: &'a mut ArchiveIterator<'a>);
+ pub fn LLVMRustDestroyArchive(AR: &'static mut Archive);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustUnpackOptimizationDiagnostic<'a>(
+ DI: &'a DiagnosticInfo,
+ pass_name_out: &RustString,
+ function_out: &mut Option<&'a Value>,
+ loc_line_out: &mut c_uint,
+ loc_column_out: &mut c_uint,
+ loc_filename_out: &RustString,
+ message_out: &RustString,
+ );
+
+ pub fn LLVMRustUnpackInlineAsmDiagnostic<'a>(
+ DI: &'a DiagnosticInfo,
+ level_out: &mut DiagnosticLevel,
+ cookie_out: &mut c_uint,
+ message_out: &mut Option<&'a Twine>,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString);
+ pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind;
+
+ pub fn LLVMRustGetSMDiagnostic<'a>(
+ DI: &'a DiagnosticInfo,
+ cookie_out: &mut c_uint,
+ ) -> &'a SMDiagnostic;
+
+ pub fn LLVMRustSetInlineAsmDiagnosticHandler(
+ C: &Context,
+ H: InlineAsmDiagHandlerTy,
+ CX: *mut c_void,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustUnpackSMDiagnostic(
+ d: &SMDiagnostic,
+ message_out: &RustString,
+ buffer_out: &RustString,
+ level_out: &mut DiagnosticLevel,
+ loc_out: &mut c_uint,
+ ranges_out: *mut c_uint,
+ num_ranges: &mut usize,
+ ) -> bool;
+
+ pub fn LLVMRustWriteArchive(
+ Dst: *const c_char,
+ NumMembers: size_t,
+ Members: *const &RustArchiveMember<'_>,
+ WriteSymbtab: bool,
+ Kind: ArchiveKind,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustArchiveMemberNew<'a>(
+ Filename: *const c_char,
+ Name: *const c_char,
+ Child: Option<&ArchiveChild<'a>>,
+ ) -> &'a mut RustArchiveMember<'a>;
+ pub fn LLVMRustArchiveMemberFree<'a>(Member: &'a mut RustArchiveMember<'a>);
+
+ pub fn LLVMRustWriteImportLibrary(
+ ImportName: *const c_char,
+ Path: *const c_char,
+ Exports: *const LLVMRustCOFFShortExport,
+ NumExports: usize,
+ Machine: u16,
+ MinGW: bool,
+ ) -> LLVMRustResult;
+
+ pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
+
+ pub fn LLVMRustBuildOperandBundleDef<'a>(
+ Name: *const c_char,
+ Inputs: *const &'a Value,
+ NumInputs: c_uint,
+ ) -> &'a mut OperandBundleDef<'a>;
+ pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
+
+ pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
+
+ pub fn LLVMRustSetComdat<'a>(M: &'a Module, V: &'a Value, Name: *const c_char, NameLen: size_t);
+ pub fn LLVMRustSetModulePICLevel(M: &Module);
+ pub fn LLVMRustSetModulePIELevel(M: &Module);
+ pub fn LLVMRustSetModuleCodeModel(M: &Module, Model: CodeModel);
+ pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer;
+ pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8;
+ pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
+ pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
+ pub fn LLVMRustModuleCost(M: &Module) -> u64;
+
+ pub fn LLVMRustThinLTOBufferCreate(M: &Module, is_thin: bool) -> &'static mut ThinLTOBuffer;
+ pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
+ pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char;
+ pub fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t;
+ pub fn LLVMRustCreateThinLTOData(
+ Modules: *const ThinLTOModule,
+ NumModules: c_uint,
+ PreservedSymbols: *const *const c_char,
+ PreservedSymbolsLen: c_uint,
+ ) -> Option<&'static mut ThinLTOData>;
+ pub fn LLVMRustPrepareThinLTORename(
+ Data: &ThinLTOData,
+ Module: &Module,
+ Target: &TargetMachine,
+ ) -> bool;
+ pub fn LLVMRustPrepareThinLTOResolveWeak(Data: &ThinLTOData, Module: &Module) -> bool;
+ pub fn LLVMRustPrepareThinLTOInternalize(Data: &ThinLTOData, Module: &Module) -> bool;
+ pub fn LLVMRustPrepareThinLTOImport(
+ Data: &ThinLTOData,
+ Module: &Module,
+ Target: &TargetMachine,
+ ) -> bool;
+ pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData);
+ pub fn LLVMRustParseBitcodeForLTO(
+ Context: &Context,
+ Data: *const u8,
+ len: usize,
+ Identifier: *const c_char,
+ ) -> Option<&Module>;
+ pub fn LLVMRustGetBitcodeSliceFromObjectData(
+ Data: *const u8,
+ len: usize,
+ out_len: &mut usize,
+ ) -> *const u8;
+ pub fn LLVMRustThinLTOGetDICompileUnit(
+ M: &Module,
+ CU1: &mut *mut c_void,
+ CU2: &mut *mut c_void,
+ );
+ pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
+
+ pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
+ pub fn LLVMRustLinkerAdd(
+ linker: &Linker<'_>,
+ bytecode: *const c_char,
+ bytecode_len: usize,
+ ) -> bool;
+ pub fn LLVMRustLinkerFree<'a>(linker: &'a mut Linker<'a>);
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustComputeLTOCacheKey(
+ key_out: &RustString,
+ mod_id: *const c_char,
+ data: &ThinLTOData,
+ );
+
+ pub fn LLVMRustContextGetDiagnosticHandler(Context: &Context) -> Option<&DiagnosticHandler>;
+ pub fn LLVMRustContextSetDiagnosticHandler(
+ context: &Context,
+ diagnostic_handler: Option<&DiagnosticHandler>,
+ );
+ pub fn LLVMRustContextConfigureDiagnosticHandler(
+ context: &Context,
+ diagnostic_handler_callback: DiagnosticHandlerTy,
+ diagnostic_handler_context: *mut c_void,
+ remark_all_passes: bool,
+ remark_passes: *const *const c_char,
+ remark_passes_len: usize,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustGetMangledName(V: &Value, out: &RustString);
+
+ pub fn LLVMRustGetElementTypeArgIndex(CallSite: &Value) -> i32;
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
new file mode 100644
index 000000000..6602a4ab8
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -0,0 +1,318 @@
+#![allow(non_snake_case)]
+
+pub use self::AtomicRmwBinOp::*;
+pub use self::CallConv::*;
+pub use self::CodeGenOptSize::*;
+pub use self::IntPredicate::*;
+pub use self::Linkage::*;
+pub use self::MetadataType::*;
+pub use self::RealPredicate::*;
+
+use libc::c_uint;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_llvm::RustString;
+use std::cell::RefCell;
+use std::ffi::{CStr, CString};
+use std::str::FromStr;
+use std::string::FromUtf8Error;
+
+pub mod archive_ro;
+pub mod diagnostic;
+mod ffi;
+
+pub use self::ffi::*;
+
+impl LLVMRustResult {
+ pub fn into_result(self) -> Result<(), ()> {
+ match self {
+ LLVMRustResult::Success => Ok(()),
+ LLVMRustResult::Failure => Err(()),
+ }
+ }
+}
+
+pub fn AddFunctionAttributes<'ll>(llfn: &'ll Value, idx: AttributePlace, attrs: &[&'ll Attribute]) {
+ unsafe {
+ LLVMRustAddFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len());
+ }
+}
+
+pub fn AddCallSiteAttributes<'ll>(
+ callsite: &'ll Value,
+ idx: AttributePlace,
+ attrs: &[&'ll Attribute],
+) {
+ unsafe {
+ LLVMRustAddCallSiteAttributes(callsite, idx.as_uint(), attrs.as_ptr(), attrs.len());
+ }
+}
+
+pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &str, value: &str) -> &'ll Attribute {
+ unsafe {
+ LLVMCreateStringAttribute(
+ llcx,
+ attr.as_ptr().cast(),
+ attr.len().try_into().unwrap(),
+ value.as_ptr().cast(),
+ value.len().try_into().unwrap(),
+ )
+ }
+}
+
+pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute {
+ unsafe {
+ LLVMCreateStringAttribute(
+ llcx,
+ attr.as_ptr().cast(),
+ attr.len().try_into().unwrap(),
+ std::ptr::null(),
+ 0,
+ )
+ }
+}
+
+pub fn CreateAlignmentAttr(llcx: &Context, bytes: u64) -> &Attribute {
+ unsafe { LLVMRustCreateAlignmentAttr(llcx, bytes) }
+}
+
+pub fn CreateDereferenceableAttr(llcx: &Context, bytes: u64) -> &Attribute {
+ unsafe { LLVMRustCreateDereferenceableAttr(llcx, bytes) }
+}
+
+pub fn CreateDereferenceableOrNullAttr(llcx: &Context, bytes: u64) -> &Attribute {
+ unsafe { LLVMRustCreateDereferenceableOrNullAttr(llcx, bytes) }
+}
+
+pub fn CreateByValAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
+ unsafe { LLVMRustCreateByValAttr(llcx, ty) }
+}
+
+pub fn CreateStructRetAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
+ unsafe { LLVMRustCreateStructRetAttr(llcx, ty) }
+}
+
+pub fn CreateUWTableAttr(llcx: &Context, async_: bool) -> &Attribute {
+ unsafe { LLVMRustCreateUWTableAttr(llcx, async_) }
+}
+
+pub fn CreateAllocSizeAttr(llcx: &Context, size_arg: u32) -> &Attribute {
+ unsafe { LLVMRustCreateAllocSizeAttr(llcx, size_arg) }
+}
+
+pub fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> &Attribute {
+ unsafe { LLVMRustCreateAllocKindAttr(llcx, kind_arg.bits()) }
+}
+
+#[derive(Copy, Clone)]
+pub enum AttributePlace {
+ ReturnValue,
+ Argument(u32),
+ Function,
+}
+
+impl AttributePlace {
+ pub fn as_uint(self) -> c_uint {
+ match self {
+ AttributePlace::ReturnValue => 0,
+ AttributePlace::Argument(i) => 1 + i,
+ AttributePlace::Function => !0,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptSize {
+ CodeGenOptSizeNone = 0,
+ CodeGenOptSizeDefault = 1,
+ CodeGenOptSizeAggressive = 2,
+}
+
+impl FromStr for ArchiveKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "gnu" => Ok(ArchiveKind::K_GNU),
+ "bsd" => Ok(ArchiveKind::K_BSD),
+ "darwin" => Ok(ArchiveKind::K_DARWIN),
+ "coff" => Ok(ArchiveKind::K_COFF),
+ _ => Err(()),
+ }
+ }
+}
+
+pub fn SetInstructionCallConv(instr: &Value, cc: CallConv) {
+ unsafe {
+ LLVMSetInstructionCallConv(instr, cc as c_uint);
+ }
+}
+pub fn SetFunctionCallConv(fn_: &Value, cc: CallConv) {
+ unsafe {
+ LLVMSetFunctionCallConv(fn_, cc as c_uint);
+ }
+}
+
+// Externally visible symbols that might appear in multiple codegen units need to appear in
+// their own comdat section so that the duplicates can be discarded at link time. This can for
+// example happen for generics when using multiple codegen units. This function simply uses the
+// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
+// function.
+// For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52
+pub fn SetUniqueComdat(llmod: &Module, val: &Value) {
+ unsafe {
+ let name = get_value_name(val);
+ LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
+ }
+}
+
+pub fn SetUnnamedAddress(global: &Value, unnamed: UnnamedAddr) {
+ unsafe {
+ LLVMSetUnnamedAddress(global, unnamed);
+ }
+}
+
+pub fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) {
+ unsafe {
+ LLVMSetThreadLocalMode(global, mode);
+ }
+}
+
+impl AttributeKind {
+ /// Create an LLVM Attribute with no associated value.
+ pub fn create_attr(self, llcx: &Context) -> &Attribute {
+ unsafe { LLVMRustCreateAttrNoValue(llcx, self) }
+ }
+}
+
+pub fn set_section(llglobal: &Value, section_name: &str) {
+ let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
+ unsafe {
+ LLVMSetSection(llglobal, section_name_cstr.as_ptr());
+ }
+}
+
+pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
+ let name_cstr = CString::new(name).expect("unexpected CString error");
+ unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
+}
+
+pub fn set_initializer(llglobal: &Value, constant_val: &Value) {
+ unsafe {
+ LLVMSetInitializer(llglobal, constant_val);
+ }
+}
+
+pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
+ unsafe {
+ LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
+ }
+}
+
+pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
+ unsafe {
+ LLVMRustSetLinkage(llglobal, linkage);
+ }
+}
+
+pub fn set_visibility(llglobal: &Value, visibility: Visibility) {
+ unsafe {
+ LLVMRustSetVisibility(llglobal, visibility);
+ }
+}
+
+pub fn set_alignment(llglobal: &Value, bytes: usize) {
+ unsafe {
+ ffi::LLVMSetAlignment(llglobal, bytes as c_uint);
+ }
+}
+
+pub fn set_comdat(llmod: &Module, llglobal: &Value, name: &str) {
+ unsafe {
+ LLVMRustSetComdat(llmod, llglobal, name.as_ptr().cast(), name.len());
+ }
+}
+
+/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
+pub fn get_param(llfn: &Value, index: c_uint) -> &Value {
+ unsafe {
+ assert!(
+ index < LLVMCountParams(llfn),
+ "out of bounds argument access: {} out of {} arguments",
+ index,
+ LLVMCountParams(llfn)
+ );
+ LLVMGetParam(llfn, index)
+ }
+}
+
+/// Safe wrapper for `LLVMGetValueName2` into a byte slice
+pub fn get_value_name(value: &Value) -> &[u8] {
+ unsafe {
+ let mut len = 0;
+ let data = LLVMGetValueName2(value, &mut len);
+ std::slice::from_raw_parts(data.cast(), len)
+ }
+}
+
+/// Safe wrapper for `LLVMSetValueName2` from a byte slice
+pub fn set_value_name(value: &Value, name: &[u8]) {
+ unsafe {
+ let data = name.as_ptr().cast();
+ LLVMSetValueName2(value, data, name.len());
+ }
+}
+
+pub fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
+ let sr = RustString { bytes: RefCell::new(Vec::new()) };
+ f(&sr);
+ String::from_utf8(sr.bytes.into_inner())
+}
+
+pub fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
+ let sr = RustString { bytes: RefCell::new(Vec::new()) };
+ f(&sr);
+ sr.bytes.into_inner()
+}
+
+pub fn twine_to_string(tr: &Twine) -> String {
+ unsafe {
+ build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
+ }
+}
+
+pub fn last_error() -> Option<String> {
+ unsafe {
+ let cstr = LLVMRustGetLastError();
+ if cstr.is_null() {
+ None
+ } else {
+ let err = CStr::from_ptr(cstr).to_bytes();
+ let err = String::from_utf8_lossy(err).to_string();
+ libc::free(cstr as *mut _);
+ Some(err)
+ }
+ }
+}
+
+pub struct OperandBundleDef<'a> {
+ pub raw: &'a mut ffi::OperandBundleDef<'a>,
+}
+
+impl<'a> OperandBundleDef<'a> {
+ pub fn new(name: &str, vals: &[&'a Value]) -> Self {
+ let name = SmallCStr::new(name);
+ let def = unsafe {
+ LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
+ };
+ OperandBundleDef { raw: def }
+ }
+}
+
+impl Drop for OperandBundleDef<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
new file mode 100644
index 000000000..a0a640473
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -0,0 +1,562 @@
+use crate::back::write::create_informational_target_machine;
+use crate::{llvm, llvm_util};
+use libc::c_int;
+use libloading::Library;
+use rustc_codegen_ssa::target_features::{
+ supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
+};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_fs_util::path_to_c_string;
+use rustc_middle::bug;
+use rustc_session::config::PrintRequest;
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::{MergeFunctions, PanicStrategy};
+use smallvec::{smallvec, SmallVec};
+use std::ffi::{CStr, CString};
+use tracing::debug;
+
+use std::mem;
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::str;
+use std::sync::Once;
+
+static INIT: Once = Once::new();
+
+pub(crate) fn init(sess: &Session) {
+ unsafe {
+ // Before we touch LLVM, make sure that multithreading is enabled.
+ if llvm::LLVMIsMultithreaded() != 1 {
+ bug!("LLVM compiled without support for threads");
+ }
+ INIT.call_once(|| {
+ configure_llvm(sess);
+ });
+ }
+}
+
+fn require_inited() {
+ if !INIT.is_completed() {
+ bug!("LLVM is not initialized");
+ }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+ let n_args = sess.opts.cg.llvm_args.len() + sess.target.llvm_args.len();
+ let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
+ let mut llvm_args = Vec::with_capacity(n_args + 1);
+
+ llvm::LLVMRustInstallFatalErrorHandler();
+ // On Windows, an LLVM assertion will open an Abort/Retry/Ignore dialog
+ // box for the purpose of launching a debugger. However, on CI this will
+ // cause it to hang until it times out, which can take several hours.
+ if std::env::var_os("CI").is_some() {
+ llvm::LLVMRustDisableSystemDialogsOnCrash();
+ }
+
+ fn llvm_arg_to_arg_name(full_arg: &str) -> &str {
+ full_arg.trim().split(|c: char| c == '=' || c.is_whitespace()).next().unwrap_or("")
+ }
+
+ let cg_opts = sess.opts.cg.llvm_args.iter().map(AsRef::as_ref);
+ let tg_opts = sess.target.llvm_args.iter().map(AsRef::as_ref);
+ let sess_args = cg_opts.chain(tg_opts);
+
+ let user_specified_args: FxHashSet<_> =
+ sess_args.clone().map(|s| llvm_arg_to_arg_name(s)).filter(|s| !s.is_empty()).collect();
+
+ {
+ // This adds the given argument to LLVM. Unless `force` is true
+ // user specified arguments are *not* overridden.
+ let mut add = |arg: &str, force: bool| {
+ if force || !user_specified_args.contains(llvm_arg_to_arg_name(arg)) {
+ let s = CString::new(arg).unwrap();
+ llvm_args.push(s.as_ptr());
+ llvm_c_strs.push(s);
+ }
+ };
+ // Set the llvm "program name" to make usage and invalid argument messages more clear.
+ add("rustc -Cllvm-args=\"...\" with", true);
+ if sess.time_llvm_passes() {
+ add("-time-passes", false);
+ }
+ if sess.print_llvm_passes() {
+ add("-debug-pass=Structure", false);
+ }
+ if sess.target.generate_arange_section
+ && !sess.opts.unstable_opts.no_generate_arange_section
+ {
+ add("-generate-arange-section", false);
+ }
+
+ // Disable the machine outliner by default in LLVM versions 11 and LLVM
+ // version 12, where it leads to miscompilation.
+ //
+ // Ref:
+ // - https://github.com/rust-lang/rust/issues/85351
+ // - https://reviews.llvm.org/D103167
+ if llvm_util::get_version() < (13, 0, 0) {
+ add("-enable-machine-outliner=never", false);
+ }
+
+ match sess.opts.unstable_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
+ MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
+ MergeFunctions::Aliases => {
+ add("-mergefunc-use-aliases", false);
+ }
+ }
+
+ if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind {
+ add("-enable-emscripten-cxx-exceptions", false);
+ }
+
+ // HACK(eddyb) LLVM inserts `llvm.assume` calls to preserve align attributes
+ // during inlining. Unfortunately these may block other optimizations.
+ add("-preserve-alignment-assumptions-during-inlining=false", false);
+
+ // Use non-zero `import-instr-limit` multiplier for cold callsites.
+ add("-import-cold-multiplier=0.1", false);
+
+ for arg in sess_args {
+ add(&(*arg), true);
+ }
+ }
+
+ if sess.opts.unstable_opts.llvm_time_trace {
+ llvm::LLVMTimeTraceProfilerInitialize();
+ }
+
+ llvm::LLVMInitializePasses();
+
+ // Use the legacy plugin registration if we don't use the new pass manager
+ if !should_use_new_llvm_pass_manager(
+ &sess.opts.unstable_opts.new_llvm_pass_manager,
+ &sess.target.arch,
+ ) {
+ // Register LLVM plugins by loading them into the compiler process.
+ for plugin in &sess.opts.unstable_opts.llvm_plugins {
+ let lib = Library::new(plugin).unwrap_or_else(|e| bug!("couldn't load plugin: {}", e));
+ debug!("LLVM plugin loaded successfully {:?} ({})", lib, plugin);
+
+ // Intentionally leak the dynamic library. We can't ever unload it
+ // since the library can make things that will live arbitrarily long.
+ mem::forget(lib);
+ }
+ }
+
+ rustc_llvm::initialize_available_targets();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
+}
+
+pub fn time_trace_profiler_finish(file_name: &Path) {
+ unsafe {
+ let file_name = path_to_c_string(file_name);
+ llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr());
+ }
+}
+
+// WARNING: the features after applying `to_llvm_features` must be known
+// to LLVM or the feature detection code will walk past the end of the feature
+// array, leading to crashes.
+//
+// To find a list of LLVM's names, check llvm-project/llvm/include/llvm/Support/*TargetParser.def
+// where the * matches the architecture's name
+// Beware to not use the llvm github project for this, but check the git submodule
+// found in src/llvm-project
+// Though note that Rust can also be build with an external precompiled version of LLVM
+// which might lead to failures if the oldest tested / supported LLVM version
+// doesn't yet support the relevant intrinsics
+pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
+ let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
+ match (arch, s) {
+ ("x86", "sse4.2") => {
+ if get_version() >= (14, 0, 0) {
+ smallvec!["sse4.2", "crc32"]
+ } else {
+ smallvec!["sse4.2"]
+ }
+ }
+ ("x86", "pclmulqdq") => smallvec!["pclmul"],
+ ("x86", "rdrand") => smallvec!["rdrnd"],
+ ("x86", "bmi1") => smallvec!["bmi"],
+ ("x86", "cmpxchg16b") => smallvec!["cx16"],
+ ("x86", "avx512vaes") => smallvec!["vaes"],
+ ("x86", "avx512gfni") => smallvec!["gfni"],
+ ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
+ ("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
+ ("aarch64", "dpb") => smallvec!["ccpp"],
+ ("aarch64", "dpb2") => smallvec!["ccdp"],
+ ("aarch64", "frintts") => smallvec!["fptoint"],
+ ("aarch64", "fcma") => smallvec!["complxnum"],
+ ("aarch64", "pmuv3") => smallvec!["perfmon"],
+ ("aarch64", "paca") => smallvec!["pauth"],
+ ("aarch64", "pacg") => smallvec!["pauth"],
+ // Rust ties fp and neon together. In LLVM neon implicitly enables fp,
+ // but we manually enable neon when a feature only implicitly enables fp
+ ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
+ ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
+ ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
+ ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
+ ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
+ ("aarch64", "sve") => smallvec!["sve", "neon"],
+ ("aarch64", "sve2") => smallvec!["sve2", "neon"],
+ ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
+ ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
+ ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
+ ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
+ (_, s) => smallvec![s],
+ }
+}
+
+// Given a map from target_features to whether they are enabled or disabled,
+// ensure only valid combinations are allowed.
+pub fn check_tied_features(
+ sess: &Session,
+ features: &FxHashMap<&str, bool>,
+) -> Option<&'static [&'static str]> {
+ if !features.is_empty() {
+ for tied in tied_target_features(sess) {
+ // Tied features must be set to the same value, or not set at all
+ let mut tied_iter = tied.iter();
+ let enabled = features.get(tied_iter.next().unwrap());
+ if tied_iter.any(|f| enabled != features.get(f)) {
+ return Some(tied);
+ }
+ }
+ }
+ return None;
+}
+
+// Used to generate cfg variables and apply features
+// Must express features in the way Rust understands them
+pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ let target_machine = create_informational_target_machine(sess);
+ let mut features: Vec<Symbol> = supported_target_features(sess)
+ .iter()
+ .filter_map(|&(feature, gate)| {
+ if sess.is_nightly_build() || allow_unstable || gate.is_none() {
+ Some(feature)
+ } else {
+ None
+ }
+ })
+ .filter(|feature| {
+ // check that all features in a given smallvec are enabled
+ for llvm_feature in to_llvm_features(sess, feature) {
+ let cstr = SmallCStr::new(llvm_feature);
+ if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+ return false;
+ }
+ }
+ true
+ })
+ .map(|feature| Symbol::intern(feature))
+ .collect();
+
+ // LLVM 14 changed the ABI for i128 arguments to __float/__fix builtins on Win64
+ // (see https://reviews.llvm.org/D110413). This unstable target feature is intended for use
+ // by compiler-builtins, to export the builtins with the expected, LLVM-version-dependent ABI.
+ // The target feature can be dropped once we no longer support older LLVM versions.
+ if sess.is_nightly_build() && get_version() >= (14, 0, 0) {
+ features.push(Symbol::intern("llvm14-builtins-abi"));
+ }
+ features
+}
+
+pub fn print_version() {
+ let (major, minor, patch) = get_version();
+ println!("LLVM version: {}.{}.{}", major, minor, patch);
+}
+
+pub fn get_version() -> (u32, u32, u32) {
+ // Can be called without initializing LLVM
+ unsafe {
+ (llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor(), llvm::LLVMRustVersionPatch())
+ }
+}
+
+pub fn print_passes() {
+ // Can be called without initializing LLVM
+ unsafe {
+ llvm::LLVMRustPrintPasses();
+ }
+}
+
+fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
+ let len = unsafe { llvm::LLVMRustGetTargetFeaturesCount(tm) };
+ let mut ret = Vec::with_capacity(len);
+ for i in 0..len {
+ unsafe {
+ let mut feature = ptr::null();
+ let mut desc = ptr::null();
+ llvm::LLVMRustGetTargetFeature(tm, i, &mut feature, &mut desc);
+ if feature.is_null() || desc.is_null() {
+ bug!("LLVM returned a `null` target feature string");
+ }
+ let feature = CStr::from_ptr(feature).to_str().unwrap_or_else(|e| {
+ bug!("LLVM returned a non-utf8 feature string: {}", e);
+ });
+ let desc = CStr::from_ptr(desc).to_str().unwrap_or_else(|e| {
+ bug!("LLVM returned a non-utf8 feature string: {}", e);
+ });
+ ret.push((feature, desc));
+ }
+ }
+ ret
+}
+
+fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
+ let mut target_features = llvm_target_features(tm);
+ let mut rustc_target_features = supported_target_features(sess)
+ .iter()
+ .filter_map(|(feature, _gate)| {
+ for llvm_feature in to_llvm_features(sess, *feature) {
+ // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+ match target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok().map(
+ |index| {
+ let (_f, desc) = target_features.remove(index);
+ (*feature, desc)
+ },
+ ) {
+ Some(v) => return Some(v),
+ None => {}
+ }
+ }
+ None
+ })
+ .collect::<Vec<_>>();
+ rustc_target_features.extend_from_slice(&[(
+ "crt-static",
+ "Enables C Run-time Libraries to be statically linked",
+ )]);
+ let max_feature_len = target_features
+ .iter()
+ .chain(rustc_target_features.iter())
+ .map(|(feature, _desc)| feature.len())
+ .max()
+ .unwrap_or(0);
+
+ println!("Features supported by rustc for this target:");
+ for (feature, desc) in &rustc_target_features {
+ println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
+ }
+ println!("\nCode-generation features supported by LLVM for this target:");
+ for (feature, desc) in &target_features {
+ println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
+ }
+ if target_features.is_empty() {
+ println!(" Target features listing is not supported by this LLVM version.");
+ }
+ println!("\nUse +feature to enable a feature, or -feature to disable it.");
+ println!("For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
+ println!("Code-generation features cannot be used in cfg or #[target_feature],");
+ println!("and may be renamed or removed in a future version of LLVM or rustc.\n");
+}
+
+pub(crate) fn print(req: PrintRequest, sess: &Session) {
+ require_inited();
+ let tm = create_informational_target_machine(sess);
+ match req {
+ PrintRequest::TargetCPUs => unsafe { llvm::LLVMRustPrintTargetCPUs(tm) },
+ PrintRequest::TargetFeatures => print_target_features(sess, tm),
+ _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
+ }
+}
+
+fn handle_native(name: &str) -> &str {
+ if name != "native" {
+ return name;
+ }
+
+ unsafe {
+ let mut len = 0;
+ let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
+ str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
+ }
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ match sess.opts.cg.target_cpu {
+ Some(ref name) => handle_native(name),
+ None => handle_native(sess.target.cpu.as_ref()),
+ }
+}
+
+/// The list of LLVM features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+/// `--target` and similar).
+pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<String> {
+ // Features that come earlier are overridden by conflicting features later in the string.
+ // Typically we'll want more explicit settings to override the implicit ones, so:
+ //
+ // * Features from -Ctarget-cpu=*; are overridden by [^1]
+ // * Features implied by --target; are overridden by
+ // * Features from -Ctarget-feature; are overridden by
+ // * function specific features.
+ //
+ // [^1]: target-cpu=native is handled here, other target-cpu values are handled implicitly
+ // through LLVM TargetMachine implementation.
+ //
+ // FIXME(nagisa): it isn't clear what's the best interaction between features implied by
+ // `-Ctarget-cpu` and `--target` are. On one hand, you'd expect CLI arguments to always
+ // override anything that's implicit, so e.g. when there's no `--target` flag, features implied
+ // the host target are overridden by `-Ctarget-cpu=*`. On the other hand, what about when both
+ // `--target` and `-Ctarget-cpu=*` are specified? Both then imply some target features and both
+ // flags are specified by the user on the CLI. It isn't as clear-cut which order of precedence
+ // should be taken in cases like these.
+ let mut features = vec![];
+
+ // -Ctarget-cpu=native
+ match sess.opts.cg.target_cpu {
+ Some(ref s) if s == "native" => {
+ let features_string = unsafe {
+ let ptr = llvm::LLVMGetHostCPUFeatures();
+ let features_string = if !ptr.is_null() {
+ CStr::from_ptr(ptr)
+ .to_str()
+ .unwrap_or_else(|e| {
+ bug!("LLVM returned a non-utf8 features string: {}", e);
+ })
+ .to_owned()
+ } else {
+ bug!("could not allocate host CPU features, LLVM returned a `null` string");
+ };
+
+ llvm::LLVMDisposeMessage(ptr);
+
+ features_string
+ };
+ features.extend(features_string.split(',').map(String::from));
+ }
+ Some(_) | None => {}
+ };
+
+ // Features implied by an implicit or explicit `--target`.
+ features.extend(
+ sess.target
+ .features
+ .split(',')
+ .filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
+ .map(String::from),
+ );
+
+ // -Ctarget-features
+ let supported_features = supported_target_features(sess);
+ let mut featsmap = FxHashMap::default();
+ let feats = sess
+ .opts
+ .cg
+ .target_feature
+ .split(',')
+ .filter_map(|s| {
+ let enable_disable = match s.chars().next() {
+ None => return None,
+ Some(c @ '+' | c @ '-') => c,
+ Some(_) => {
+ if diagnostics {
+ let mut diag = sess.struct_warn(&format!(
+ "unknown feature specified for `-Ctarget-feature`: `{}`",
+ s
+ ));
+ diag.note("features must begin with a `+` to enable or `-` to disable it");
+ diag.emit();
+ }
+ return None;
+ }
+ };
+
+ let feature = backend_feature_name(s)?;
+ // Warn against use of LLVM specific feature names on the CLI.
+ if diagnostics && !supported_features.iter().any(|&(v, _)| v == feature) {
+ let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| {
+ let llvm_features = to_llvm_features(sess, rust_feature);
+ if llvm_features.contains(&feature) && !llvm_features.contains(&rust_feature) {
+ Some(rust_feature)
+ } else {
+ None
+ }
+ });
+ let mut diag = sess.struct_warn(&format!(
+ "unknown feature specified for `-Ctarget-feature`: `{}`",
+ feature
+ ));
+ diag.note("it is still passed through to the codegen backend");
+ if let Some(rust_feature) = rust_feature {
+ diag.help(&format!("you might have meant: `{}`", rust_feature));
+ } else {
+ diag.note("consider filing a feature request");
+ }
+ diag.emit();
+ }
+
+ if diagnostics {
+ // FIXME(nagisa): figure out how to not allocate a full hashset here.
+ featsmap.insert(feature, enable_disable == '+');
+ }
+
+ // rustc-specific features do not get passed down to LLVM…
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ // ... otherwise though we run through `to_llvm_features` when
+ // passing requests down to LLVM. This means that all in-language
+ // features also work on the command line instead of having two
+ // different names when the LLVM name and the Rust name differ.
+ Some(
+ to_llvm_features(sess, feature)
+ .into_iter()
+ .map(move |f| format!("{}{}", enable_disable, f)),
+ )
+ })
+ .flatten();
+ features.extend(feats);
+
+ if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) {
+ sess.err(&format!(
+ "target features {} must all be enabled or disabled together",
+ f.join(", ")
+ ));
+ }
+
+ features
+}
+
+/// Returns a feature name for the given `+feature` or `-feature` string.
+///
+/// Only allows features that are backend specific (i.e. not [`RUSTC_SPECIFIC_FEATURES`].)
+fn backend_feature_name(s: &str) -> Option<&str> {
+ // features must start with a `+` or `-`.
+ let feature = s.strip_prefix(&['+', '-'][..]).unwrap_or_else(|| {
+ bug!("target feature `{}` must begin with a `+` or `-`", s);
+ });
+ // Rustc-specific feature requests like `+crt-static` or `-crt-static`
+ // are not passed down to LLVM.
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ Some(feature)
+}
+
+pub fn tune_cpu(sess: &Session) -> Option<&str> {
+ let name = sess.opts.unstable_opts.tune_cpu.as_ref()?;
+ Some(handle_native(name))
+}
+
+pub(crate) fn should_use_new_llvm_pass_manager(user_opt: &Option<bool>, target_arch: &str) -> bool {
+ // The new pass manager is enabled by default for LLVM >= 13.
+ // This matches Clang, which also enables it since Clang 13.
+
+ // Since LLVM 15, the legacy pass manager is no longer supported.
+ if llvm_util::get_version() >= (15, 0, 0) {
+ return true;
+ }
+
+ // There are some perf issues with the new pass manager when targeting
+ // s390x with LLVM 13, so enable the new pass manager only with LLVM 14.
+ // See https://github.com/rust-lang/rust/issues/89609.
+ let min_version = if target_arch == "s390x" { 14 } else { 13 };
+ user_opt.unwrap_or_else(|| llvm_util::get_version() >= (min_version, 0, 0))
+}
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
new file mode 100644
index 000000000..6e9428485
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -0,0 +1,150 @@
+use crate::attributes;
+use crate::base;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_of::LayoutLlvmExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+pub use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_session::config::CrateType;
+use rustc_target::spec::RelocModel;
+use tracing::debug;
+
+impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> {
+ fn predefine_static(
+ &self,
+ def_id: DefId,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ ) {
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let llty = self.layout_of(ty).llvm_type(self);
+
+ let g = self.define_global(symbol_name, llty).unwrap_or_else(|| {
+ self.sess().span_fatal(
+ self.tcx.def_span(def_id),
+ &format!("symbol `{}` is already defined", symbol_name),
+ )
+ });
+
+ unsafe {
+ llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
+ llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
+ if self.should_assume_dso_local(g, false) {
+ llvm::LLVMRustSetDSOLocal(g, true);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, g);
+ }
+
+ fn predefine_fn(
+ &self,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ ) {
+ assert!(!instance.substs.needs_infer());
+
+ let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+ let lldecl = self.declare_fn(symbol_name, fn_abi);
+ unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
+ let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ base::set_link_section(lldecl, attrs);
+ if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
+ llvm::SetUniqueComdat(self.llmod, lldecl);
+ }
+
+ // If we're compiling the compiler-builtins crate, e.g., the equivalent of
+ // compiler-rt, then we want to implicitly compile everything with hidden
+ // visibility as we're going to link this object all over the place but
+ // don't want the symbols to get exported.
+ if linkage != Linkage::Internal
+ && linkage != Linkage::Private
+ && self.tcx.is_compiler_builtins(LOCAL_CRATE)
+ {
+ unsafe {
+ llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
+ }
+ } else {
+ unsafe {
+ llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility));
+ }
+ }
+
+ debug!("predefine_fn: instance = {:?}", instance);
+
+ attributes::from_fn_attrs(self, lldecl, instance);
+
+ unsafe {
+ if self.should_assume_dso_local(lldecl, false) {
+ llvm::LLVMRustSetDSOLocal(lldecl, true);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, lldecl);
+ }
+}
+
+impl CodegenCx<'_, '_> {
+ /// Whether a definition or declaration can be assumed to be local to a group of
+ /// libraries that form a single DSO or executable.
+ pub(crate) unsafe fn should_assume_dso_local(
+ &self,
+ llval: &llvm::Value,
+ is_declaration: bool,
+ ) -> bool {
+ let linkage = llvm::LLVMRustGetLinkage(llval);
+ let visibility = llvm::LLVMRustGetVisibility(llval);
+
+ if matches!(linkage, llvm::Linkage::InternalLinkage | llvm::Linkage::PrivateLinkage) {
+ return true;
+ }
+
+ if visibility != llvm::Visibility::Default && linkage != llvm::Linkage::ExternalWeakLinkage
+ {
+ return true;
+ }
+
+ // Symbols from executables can't really be imported any further.
+ let all_exe = self.tcx.sess.crate_types().iter().all(|ty| *ty == CrateType::Executable);
+ let is_declaration_for_linker =
+ is_declaration || linkage == llvm::Linkage::AvailableExternallyLinkage;
+ if all_exe && !is_declaration_for_linker {
+ return true;
+ }
+
+ // PowerPC64 prefers TOC indirection to avoid copy relocations.
+ if matches!(&*self.tcx.sess.target.arch, "powerpc64" | "powerpc64le") {
+ return false;
+ }
+
+ // Thread-local variables generally don't support copy relocations.
+ let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval)
+ .map(|v| llvm::LLVMIsThreadLocal(v) == llvm::True)
+ .unwrap_or(false);
+ if is_thread_local_var {
+ return false;
+ }
+
+ // Match clang by only supporting COFF and ELF for now.
+ if self.tcx.sess.target.is_like_osx {
+ return false;
+ }
+
+ // Static relocation model should force copy relocations everywhere.
+ if self.tcx.sess.relocation_model() == RelocModel::Static {
+ return true;
+ }
+
+ // With pie relocation model calls of functions defined in the translation
+ // unit can use copy relocations.
+ self.tcx.sess.relocation_model() == RelocModel::Pie && !is_declaration
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
new file mode 100644
index 000000000..eeb38d4ec
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -0,0 +1,319 @@
+pub use crate::llvm::Type;
+
+use crate::abi::{FnAbiLlvmExt, LlvmType};
+use crate::common;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::{Bool, False, True};
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use std::fmt;
+use std::ptr;
+
+use libc::{c_char, c_uint};
+
+impl PartialEq for Type {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl fmt::Debug for Type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(
+ &llvm::build_string(|s| unsafe {
+ llvm::LLVMRustWriteTypeToString(self, s);
+ })
+ .expect("non-UTF8 type description from LLVM"),
+ )
+ }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type {
+ let name = SmallCStr::new(name);
+ unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) }
+ }
+
+ pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
+ unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) }
+ }
+
+ pub(crate) fn type_void(&self) -> &'ll Type {
+ unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
+ }
+
+ pub(crate) fn type_metadata(&self) -> &'ll Type {
+ unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) }
+ }
+
+ ///x Creates an integer type with the given number of bits, e.g., i24
+ pub(crate) fn type_ix(&self, num_bits: u64) -> &'ll Type {
+ unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) }
+ }
+
+ pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
+ }
+
+ pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
+ unsafe {
+ let n_args = llvm::LLVMCountParamTypes(ty) as usize;
+ let mut args = Vec::with_capacity(n_args);
+ llvm::LLVMGetParamTypes(ty, args.as_mut_ptr());
+ args.set_len(n_args);
+ args
+ }
+ }
+
+ pub(crate) fn type_bool(&self) -> &'ll Type {
+ self.type_i8()
+ }
+
+ pub(crate) fn type_int_from_ty(&self, t: ty::IntTy) -> &'ll Type {
+ match t {
+ ty::IntTy::Isize => self.type_isize(),
+ ty::IntTy::I8 => self.type_i8(),
+ ty::IntTy::I16 => self.type_i16(),
+ ty::IntTy::I32 => self.type_i32(),
+ ty::IntTy::I64 => self.type_i64(),
+ ty::IntTy::I128 => self.type_i128(),
+ }
+ }
+
+ pub(crate) fn type_uint_from_ty(&self, t: ty::UintTy) -> &'ll Type {
+ match t {
+ ty::UintTy::Usize => self.type_isize(),
+ ty::UintTy::U8 => self.type_i8(),
+ ty::UintTy::U16 => self.type_i16(),
+ ty::UintTy::U32 => self.type_i32(),
+ ty::UintTy::U64 => self.type_i64(),
+ ty::UintTy::U128 => self.type_i128(),
+ }
+ }
+
+ pub(crate) fn type_float_from_ty(&self, t: ty::FloatTy) -> &'ll Type {
+ match t {
+ ty::FloatTy::F32 => self.type_f32(),
+ ty::FloatTy::F64 => self.type_f64(),
+ }
+ }
+
+ pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
+ // FIXME(eddyb) We could find a better approximation if ity.align < align.
+ let ity = Integer::approximate_align(self, align);
+ self.type_from_integer(ity)
+ }
+
+ /// Return a LLVM type that has at most the required alignment,
+ /// and exactly the required size, as a best-effort padding array.
+ pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
+ let unit = Integer::approximate_align(self, align);
+ let size = size.bytes();
+ let unit_size = unit.size().bytes();
+ assert_eq!(size % unit_size, 0);
+ self.type_array(self.type_from_integer(unit), size / unit_size)
+ }
+
+ pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) }
+ }
+
+ pub(crate) fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ unsafe { llvm::LLVMRustArrayType(ty, len) }
+ }
+}
+
+impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn type_i1(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt1TypeInContext(self.llcx) }
+ }
+
+ fn type_i8(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt8TypeInContext(self.llcx) }
+ }
+
+ fn type_i16(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt16TypeInContext(self.llcx) }
+ }
+
+ fn type_i32(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt32TypeInContext(self.llcx) }
+ }
+
+ fn type_i64(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt64TypeInContext(self.llcx) }
+ }
+
+ fn type_i128(&self) -> &'ll Type {
+ unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) }
+ }
+
+ fn type_isize(&self) -> &'ll Type {
+ self.isize_ty
+ }
+
+ fn type_f32(&self) -> &'ll Type {
+ unsafe { llvm::LLVMFloatTypeInContext(self.llcx) }
+ }
+
+ fn type_f64(&self) -> &'ll Type {
+ unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) }
+ }
+
+ fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) }
+ }
+
+ fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type {
+ unsafe {
+ llvm::LLVMStructTypeInContext(
+ self.llcx,
+ els.as_ptr(),
+ els.len() as c_uint,
+ packed as Bool,
+ )
+ }
+ }
+
+ fn type_kind(&self, ty: &'ll Type) -> TypeKind {
+ unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
+ }
+
+ fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+ assert_ne!(
+ self.type_kind(ty),
+ TypeKind::Function,
+ "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
+ );
+ ty.ptr_to(AddressSpace::DATA)
+ }
+
+ fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
+ ty.ptr_to(address_space)
+ }
+
+ fn element_type(&self, ty: &'ll Type) -> &'ll Type {
+ match self.type_kind(ty) {
+ TypeKind::Array | TypeKind::Vector => unsafe { llvm::LLVMGetElementType(ty) },
+ TypeKind::Pointer => bug!("element_type is not supported for opaque pointers"),
+ other => bug!("element_type called on unsupported type {:?}", other),
+ }
+ }
+
+ fn vector_length(&self, ty: &'ll Type) -> usize {
+ unsafe { llvm::LLVMGetVectorSize(ty) as usize }
+ }
+
+ fn float_width(&self, ty: &'ll Type) -> usize {
+ match self.type_kind(ty) {
+ TypeKind::Float => 32,
+ TypeKind::Double => 64,
+ TypeKind::X86_FP80 => 80,
+ TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
+ _ => bug!("llvm_float_width called on a non-float type"),
+ }
+ }
+
+ fn int_width(&self, ty: &'ll Type) -> u64 {
+ unsafe { llvm::LLVMGetIntTypeWidth(ty) as u64 }
+ }
+
+ fn val_ty(&self, v: &'ll Value) -> &'ll Type {
+ common::val_ty(v)
+ }
+}
+
+impl Type {
+ pub fn i8_llcx(llcx: &llvm::Context) -> &Type {
+ unsafe { llvm::LLVMInt8TypeInContext(llcx) }
+ }
+
+ // Creates an integer type with the given number of bits, e.g., i24
+ pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type {
+ unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
+ }
+
+ pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
+ Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
+ }
+
+ fn ptr_to(&self, address_space: AddressSpace) -> &Type {
+ unsafe { llvm::LLVMPointerType(self, address_space.0) }
+ }
+}
+
+impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+ layout.llvm_type(self)
+ }
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+ layout.immediate_llvm_type(self)
+ }
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_llvm_immediate()
+ }
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_llvm_scalar_pair()
+ }
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+ layout.llvm_field_index(self, index)
+ }
+ fn scalar_pair_element_backend_type(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'ll Type {
+ layout.scalar_pair_element_llvm_type(self, index, immediate)
+ }
+ fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
+ ty.llvm_type(self)
+ }
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ fn_abi.llvm_type(self)
+ }
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ fn_abi.ptr_to_llvm_type(self)
+ }
+ fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
+ ty.llvm_type(self)
+ }
+}
+
+impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn set_type_metadata(&self, function: &'ll Value, typeid: String) {
+ let typeid_metadata = self.typeid_metadata(typeid);
+ let v = [self.const_usize(0), typeid_metadata];
+ unsafe {
+ llvm::LLVMGlobalSetMetadata(
+ function,
+ llvm::MD_type as c_uint,
+ llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+ self.llcx,
+ v.as_ptr(),
+ v.len() as c_uint,
+ )),
+ )
+ }
+ }
+
+ fn typeid_metadata(&self, typeid: String) -> &'ll Value {
+ unsafe {
+ llvm::LLVMMDStringInContext(
+ self.llcx,
+ typeid.as_ptr() as *const c_char,
+ typeid.len() as c_uint,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
new file mode 100644
index 000000000..9f0e6c80b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -0,0 +1,418 @@
+use crate::common::*;
+use crate::context::TypeLowering;
+use crate::llvm_util::get_version;
+use crate::type_::Type;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
+use rustc_middle::ty::{self, Ty, TypeVisitable};
+use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
+use rustc_target::abi::{Int, Pointer, F32, F64};
+use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
+use smallvec::{smallvec, SmallVec};
+use tracing::debug;
+
+use std::fmt::Write;
+
+fn uncached_llvm_type<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
+ field_remapping: &mut Option<SmallVec<[u32; 4]>>,
+) -> &'a Type {
+ match layout.abi {
+ Abi::Scalar(_) => bug!("handled elsewhere"),
+ Abi::Vector { element, count } => {
+ let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+ return cx.type_vector(element, count);
+ }
+ Abi::ScalarPair(..) => {
+ return cx.type_struct(
+ &[
+ layout.scalar_pair_element_llvm_type(cx, 0, false),
+ layout.scalar_pair_element_llvm_type(cx, 1, false),
+ ],
+ false,
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ }
+
+ let name = match layout.ty.kind() {
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::Dynamic(..) |
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ // For performance reasons we use names only when emitting LLVM IR. Unless we are on
+ // LLVM < 14, where the use of unnamed types resulted in various issues, e.g., #76213,
+ // #79564, and #79246.
+ if get_version() < (14, 0, 0) || !cx.sess().fewer_names() =>
+ {
+ let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
+ if let (&ty::Adt(def, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ if def.is_enum() && !def.variants().is_empty() {
+ write!(&mut name, "::{}", def.variant(index).name).unwrap();
+ }
+ }
+ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ }
+ Some(name)
+ }
+ // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
+ // might be recursive. Other cases are non-recursive and we can use literal structure types.
+ ty::Adt(..) => Some(String::new()),
+ _ => None,
+ };
+
+ match layout.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+ let packed = false;
+ match name {
+ None => cx.type_struct(&[fill], packed),
+ Some(ref name) => {
+ let llty = cx.type_named_struct(name);
+ cx.set_struct_body(llty, &[fill], packed);
+ llty
+ }
+ }
+ }
+ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
+ FieldsShape::Arbitrary { .. } => match name {
+ None => {
+ let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
+ *field_remapping = new_field_remapping;
+ cx.type_struct(&llfields, packed)
+ }
+ Some(ref name) => {
+ let llty = cx.type_named_struct(name);
+ *defer = Some((llty, layout));
+ llty
+ }
+ },
+ }
+}
+
+fn struct_llfields<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) {
+ debug!("struct_llfields: {:#?}", layout);
+ let field_count = layout.fields.count();
+
+ let mut packed = false;
+ let mut offset = Size::ZERO;
+ let mut prev_effective_align = layout.align.abi;
+ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+ let mut field_remapping = smallvec![0; field_count];
+ for i in layout.fields.index_by_increasing_offset() {
+ let target_offset = layout.fields.offset(i as usize);
+ let field = layout.field(cx, i);
+ let effective_field_align =
+ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+ packed |= effective_field_align < field.align.abi;
+
+ debug!(
+ "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
+ effective_field_align: {}",
+ i,
+ field,
+ offset,
+ target_offset,
+ effective_field_align.bytes()
+ );
+ assert!(target_offset >= offset);
+ let padding = target_offset - offset;
+ if padding != Size::ZERO {
+ let padding_align = prev_effective_align.min(effective_field_align);
+ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+ result.push(cx.type_padding_filler(padding, padding_align));
+ debug!(" padding before: {:?}", padding);
+ }
+ field_remapping[i] = result.len() as u32;
+ result.push(field.llvm_type(cx));
+ offset = target_offset + field.size;
+ prev_effective_align = effective_field_align;
+ }
+ let padding_used = result.len() > field_count;
+ if !layout.is_unsized() && field_count > 0 {
+ if offset > layout.size {
+ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+ }
+ let padding = layout.size - offset;
+ if padding != Size::ZERO {
+ let padding_align = prev_effective_align;
+ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+ debug!(
+ "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
+ padding, offset, layout.size
+ );
+ result.push(cx.type_padding_filler(padding, padding_align));
+ }
+ } else {
+ debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
+ }
+ let field_remapping = if padding_used { Some(field_remapping) } else { None };
+ (result, packed, field_remapping)
+}
+
+impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
+ pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
+ self.layout_of(ty).align.abi
+ }
+
+ pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
+ self.layout_of(ty).size
+ }
+
+ pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
+ let layout = self.layout_of(ty);
+ (layout.size, layout.align.abi)
+ }
+}
+
+pub trait LayoutLlvmExt<'tcx> {
+ fn is_llvm_immediate(&self) -> bool;
+ fn is_llvm_scalar_pair(&self) -> bool;
+ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+ fn scalar_llvm_type_at<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ scalar: Scalar,
+ offset: Size,
+ ) -> &'a Type;
+ fn scalar_pair_element_llvm_type<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'a Type;
+ fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
+ fn is_llvm_immediate(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::Vector { .. } => true,
+ Abi::ScalarPair(..) => false,
+ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ }
+ }
+
+ fn is_llvm_scalar_pair(&self) -> bool {
+ match self.abi {
+ Abi::ScalarPair(..) => true,
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ }
+ }
+
+ /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+ /// The pointee type of the pointer in `PlaceRef` is always this type.
+ /// For sized types, it is also the right LLVM type for an `alloca`
+ /// containing a value of that type, and most immediates (except `bool`).
+ /// Unsized types, however, are represented by a "minimal unit", e.g.
+ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+ /// If the type is an unsized struct, the regular layout is generated,
+ /// with the inner-most trailing unsized field using the "minimal unit"
+ /// of that field's type - this is useful for taking the address of
+ /// that field and ensuring the struct has the right alignment.
+ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ if let Abi::Scalar(scalar) = self.abi {
+ // Use a different cache for scalars because pointers to DSTs
+ // can be either fat or thin (data pointers of fat pointers).
+ if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
+ return llty;
+ }
+ let llty = match *self.ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
+ }
+ ty::FnPtr(sig) => {
+ cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
+ }
+ _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
+ };
+ cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
+ return llty;
+ }
+
+ // Check the cache.
+ let variant_index = match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+ if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
+ return llty.lltype;
+ }
+
+ debug!("llvm_type({:#?})", self);
+
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+ // Make sure lifetimes are erased, to avoid generating distinct LLVM
+ // types for Rust types that only differ in the choice of lifetimes.
+ let normal_ty = cx.tcx.erase_regions(self.ty);
+
+ let mut defer = None;
+ let mut field_remapping = None;
+ let llty = if self.ty != normal_ty {
+ let mut layout = cx.layout_of(normal_ty);
+ if let Some(v) = variant_index {
+ layout = layout.for_variant(cx, v);
+ }
+ layout.llvm_type(cx)
+ } else {
+ uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping)
+ };
+ debug!("--> mapped {:#?} to llty={:?}", self, llty);
+
+ cx.type_lowering
+ .borrow_mut()
+ .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping });
+
+ if let Some((llty, layout)) = defer {
+ let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
+ cx.set_struct_body(llty, &llfields, packed);
+ cx.type_lowering
+ .borrow_mut()
+ .get_mut(&(self.ty, variant_index))
+ .unwrap()
+ .field_remapping = new_field_remapping;
+ }
+ llty
+ }
+
+ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ if let Abi::Scalar(scalar) = self.abi {
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+ }
+ self.llvm_type(cx)
+ }
+
+ fn scalar_llvm_type_at<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ scalar: Scalar,
+ offset: Size,
+ ) -> &'a Type {
+ match scalar.primitive() {
+ Int(i, _) => cx.type_from_integer(i),
+ F32 => cx.type_f32(),
+ F64 => cx.type_f64(),
+ Pointer => {
+ // If we know the alignment, pick something better than i8.
+ let (pointee, address_space) =
+ if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ (cx.type_pointee_for_align(pointee.align), pointee.address_space)
+ } else {
+ (cx.type_i8(), AddressSpace::DATA)
+ };
+ cx.type_ptr_to_ext(pointee, address_space)
+ }
+ }
+ }
+
+ fn scalar_pair_element_llvm_type<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'a Type {
+ // HACK(eddyb) special-case fat pointers until LLVM removes
+ // pointee types, to avoid bitcasting every `OperandRef::deref`.
+ match self.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(_) => {
+ return self.field(cx, index).llvm_type(cx);
+ }
+ // only wide pointer boxes are handled as pointers
+ // thin pointer boxes with scalar allocators are handled by the general logic below
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
+ }
+ _ => {}
+ }
+
+ let Abi::ScalarPair(a, b) = self.abi else {
+ bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
+ };
+ let scalar = [a, b][index];
+
+ // Make sure to return the same type `immediate_llvm_type` would when
+ // dealing with an immediate pair. This means that `(bool, bool)` is
+ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+ // immediate, just like `bool` is typically `i8` in memory and only `i1`
+ // when immediate. We need to load/store `bool` as `i8` to avoid
+ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+ if immediate && scalar.is_bool() {
+ return cx.type_i1();
+ }
+
+ let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
+ self.scalar_llvm_type_at(cx, scalar, offset)
+ }
+
+ fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
+ }
+ _ => {}
+ }
+ match self.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
+ }
+
+ FieldsShape::Array { .. } => index as u64,
+
+ FieldsShape::Arbitrary { .. } => {
+ let variant_index = match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+
+ // Look up llvm field if indexes do not match memory order due to padding. If
+ // `field_remapping` is `None` no padding was used and the llvm field index
+ // matches the memory index.
+ match cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
+ Some(TypeLowering { field_remapping: Some(ref remap), .. }) => {
+ remap[index] as u64
+ }
+ Some(_) => self.fields.memory_index(index) as u64,
+ None => {
+ bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self)
+ }
+ }
+ }
+ }
+ }
+
+ // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at`
+ // (the inherent method, which is lacking this caching logic) can result in
+ // the uncached version being called - not wrong, but potentially inefficient.
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+ return pointee;
+ }
+
+ let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
+
+ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+ result
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
new file mode 100644
index 000000000..ceb3d5a84
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -0,0 +1,214 @@
+use crate::builder::Builder;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::{
+ common::IntPredicate,
+ traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
+};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Align, Endian, HasDataLayout, Size};
+
+fn round_pointer_up_to_alignment<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ addr: &'ll Value,
+ align: Align,
+ ptr_ty: &'ll Type,
+) -> &'ll Value {
+ let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
+ ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
+ ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
+ bx.inttoptr(ptr_as_int, ptr_ty)
+}
+
+fn emit_direct_ptr_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ llty: &'ll Type,
+ size: Size,
+ align: Align,
+ slot_size: Align,
+ allow_higher_align: bool,
+) -> (&'ll Value, Align) {
+ let va_list_ty = bx.type_i8p();
+ let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
+ let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
+ bx.bitcast(list.immediate(), va_list_ptr_ty)
+ } else {
+ list.immediate()
+ };
+
+ let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ let (addr, addr_align) = if allow_higher_align && align > slot_size {
+ (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+ } else {
+ (ptr, slot_size)
+ };
+
+ let aligned_size = size.align_to(slot_size).bytes() as i32;
+ let full_direct_size = bx.cx().const_i32(aligned_size);
+ let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]);
+ bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
+ let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
+ let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
+ (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+ } else {
+ (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+ }
+}
+
+fn emit_ptr_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+ indirect: bool,
+ slot_size: Align,
+ allow_higher_align: bool,
+) -> &'ll Value {
+ let layout = bx.cx.layout_of(target_ty);
+ let (llty, size, align) = if indirect {
+ (
+ bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+ bx.cx.data_layout().pointer_size,
+ bx.cx.data_layout().pointer_align,
+ )
+ } else {
+ (layout.llvm_type(bx.cx), layout.size, layout.align)
+ };
+ let (addr, addr_align) =
+ emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
+ if indirect {
+ let tmp_ret = bx.load(llty, addr, addr_align);
+ bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
+ } else {
+ bx.load(llty, addr, addr_align)
+ }
+}
+
+fn emit_aapcs_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Implementation of the AAPCS64 calling convention for va_args see
+ // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+ let va_list_addr = list.immediate();
+ let va_list_layout = list.deref(bx.cx).layout;
+ let va_list_ty = va_list_layout.llvm_type(bx);
+ let layout = bx.cx.layout_of(target_ty);
+
+ let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
+ let in_reg = bx.append_sibling_block("va_arg.in_reg");
+ let on_stack = bx.append_sibling_block("va_arg.on_stack");
+ let end = bx.append_sibling_block("va_arg.end");
+ let zero = bx.const_i32(0);
+ let offset_align = Align::from_bytes(4).unwrap();
+
+ let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
+ let (reg_off, reg_top_index, slot_size) = if gr_type {
+ let gr_offs =
+ bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
+ let nreg = (layout.size.bytes() + 7) / 8;
+ (gr_offs, va_list_layout.llvm_field_index(bx.cx, 1), nreg * 8)
+ } else {
+ let vr_off =
+ bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 4));
+ let nreg = (layout.size.bytes() + 15) / 16;
+ (vr_off, va_list_layout.llvm_field_index(bx.cx, 2), nreg * 16)
+ };
+
+ // if the offset >= 0 then the value will be on the stack
+ let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
+ let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
+ bx.cond_br(use_stack, on_stack, maybe_reg);
+
+ // The value at this point might be in a register, but there is a chance that
+ // it could be on the stack so we have to update the offset and then check
+ // the offset again.
+
+ bx.switch_to_block(maybe_reg);
+ if gr_type && layout.align.abi.bytes() > 8 {
+ reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
+ reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
+ }
+ let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
+
+ bx.store(new_reg_off_v, reg_off, offset_align);
+
+ // Check to see if we have overflowed the registers as a result of this.
+ // If we have then we need to use the stack for this value
+ let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
+ bx.cond_br(use_stack, on_stack, in_reg);
+
+ bx.switch_to_block(in_reg);
+ let top_type = bx.type_i8p();
+ let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
+ let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
+
+ // reg_value = *(@top + reg_off_v);
+ let mut reg_addr = bx.gep(bx.type_i8(), top, &[reg_off_v]);
+ if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
+ // On big-endian systems the value is right-aligned in its slot.
+ let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
+ reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
+ }
+ let reg_type = layout.llvm_type(bx);
+ let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
+ let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
+ bx.br(end);
+
+ // On Stack block
+ bx.switch_to_block(on_stack);
+ let stack_value =
+ emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
+ bx.br(end);
+
+ bx.switch_to_block(end);
+ let val =
+ bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
+
+ val
+}
+
+pub(super) fn emit_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ addr: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Determine the va_arg implementation to use. The LLVM va_arg instruction
+ // is lacking in some instances, so we should only use it as a fallback.
+ let target = &bx.cx.tcx.sess.target;
+ let arch = &bx.cx.tcx.sess.target.arch;
+ match &**arch {
+ // Windows x86
+ "x86" if target.is_like_windows => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
+ }
+ // Generic x86
+ "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
+ // Windows AArch64
+ "aarch64" if target.is_like_windows => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
+ }
+ // macOS / iOS AArch64
+ "aarch64" if target.is_like_osx => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
+ }
+ "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
+ // Windows x86_64
+ "x86_64" if target.is_like_windows => {
+ let target_ty_size = bx.cx.size_of(target_ty).bytes();
+ let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+ emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
+ }
+ // For all other architecture/OS combinations fall back to using
+ // the LLVM va_arg instruction.
+ // https://llvm.org/docs/LangRef.html#va-arg-instruction
+ _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/value.rs b/compiler/rustc_codegen_llvm/src/value.rs
new file mode 100644
index 000000000..1338a2295
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/value.rs
@@ -0,0 +1,32 @@
+pub use crate::llvm::Value;
+
+use crate::llvm;
+
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::ptr;
+
+impl PartialEq for Value {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for Value {}
+
+impl Hash for Value {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (self as *const Self).hash(hasher);
+ }
+}
+
+impl fmt::Debug for Value {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(
+ &llvm::build_string(|s| unsafe {
+ llvm::LLVMRustWriteValueToString(self, s);
+ })
+ .expect("non-UTF8 value description from LLVM"),
+ )
+ }
+}