summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler/rustc_codegen_llvm/src
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs99
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs52
-rw-r--r--compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs103
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs146
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs46
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs248
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs200
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs169
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs38
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs61
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs29
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs38
20 files changed, 765 insertions, 537 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index c6a7dc95d..9e834b83d 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,7 +211,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
- } else if let PassMode::Cast(cast, _) = &self.mode {
+ } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
@@ -274,12 +274,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_)
- | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
- | PassMode::Cast(..) => {
+ | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+ | PassMode::Cast { .. } => {
let next_arg = next();
self.store(bx, next_arg, dst);
}
@@ -332,7 +332,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
let llreturn_ty = match &self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
- PassMode::Cast(cast, _) => cast.llvm_type(cx),
+ PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
PassMode::Indirect { .. } => {
llargument_tys.push(cx.type_ptr());
cx.type_void()
@@ -340,29 +340,78 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
};
for arg in args {
+ // Note that the exact number of arguments pushed here is carefully synchronized with
+ // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+ // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
let llarg_ty = match &arg.mode {
PassMode::Ignore => continue,
- PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+ PassMode::Direct(_) => {
+ // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+ // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+ // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
+ // aggregates...
+ if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+ assert!(
+ arg.layout.is_sized(),
+ "`PassMode::Direct` for unsized type: {}",
+ arg.layout.ty
+ );
+ // This really shouldn't happen, since `immediate_llvm_type` will use
+ // `layout.fields` to turn this Rust type into an LLVM type. This means all
+ // sorts of Rust type details leak into the ABI. However wasm sadly *does*
+ // currently use this mode so we have to allow it -- but we absolutely
+ // shouldn't let any more targets do that.
+ // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+ assert!(
+ matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
+ "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+ arg.layout,
+ );
+ }
+ arg.layout.immediate_llvm_type(cx)
+ }
PassMode::Pair(..) => {
+ // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+ // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+ // LLVM IR.
+ assert!(
+ matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
+ "PassMode::Pair for type {}",
+ arg.layout.ty
+ );
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+ // `Indirect` with metadata is only for unsized types, and doesn't work with
+ // on-stack passing.
+ assert!(arg.layout.is_unsized() && !on_stack);
+ // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+ // Any two ABI-compatible unsized types have the same metadata type and
+ // moreover the same metadata value leads to the same dynamic size and
+ // alignment, so this respects ABI compatibility.
let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+ assert!(arg.layout.is_sized());
+ cx.type_ptr()
+ }
+ PassMode::Cast { cast, pad_i32 } => {
+ // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+ assert!(arg.layout.is_sized());
// add padding
if *pad_i32 {
llargument_tys.push(Reg::i32().llvm_type(cx));
}
+ // Compute the LLVM type we use for this function from the cast type.
+ // We assume here that ABI-compatible Rust types have the same cast type.
cast.llvm_type(cx)
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
};
llargument_tys.push(llarg_ty);
}
@@ -405,13 +454,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
- PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast, _) => {
+ PassMode::Cast { cast, pad_i32: _ } => {
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
_ => {}
@@ -419,25 +468,25 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
for arg in self.args.iter() {
match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(attrs);
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
}
PassMode::Direct(attrs)
- | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+ | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply(attrs);
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
assert!(!on_stack);
apply(attrs);
- apply(extra_attrs);
+ apply(meta_attrs);
}
PassMode::Pair(a, b) => {
apply(a);
apply(b);
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Cast { cast, pad_i32 } => {
if *pad_i32 {
apply(&ArgAttributes::new());
}
@@ -467,13 +516,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
}
- PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(bx.cx, attrs);
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast, _) => {
+ PassMode::Cast { cast, pad_i32: _ } => {
cast.attrs.apply_attrs_to_callsite(
llvm::AttributePlace::ReturnValue,
&bx.cx,
@@ -495,7 +544,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
for arg in self.args.iter() {
match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs);
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
attributes::apply_to_callsite(
@@ -505,18 +554,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
);
}
PassMode::Direct(attrs)
- | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+ | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply(bx.cx, attrs);
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
apply(bx.cx, attrs);
- apply(bx.cx, extra_attrs);
+ apply(bx.cx, meta_attrs);
}
PassMode::Pair(a, b) => {
apply(bx.cx, a);
apply(bx.cx, b);
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Cast { cast, pad_i32 } => {
if *pad_i32 {
apply(bx.cx, &ArgAttributes::new());
}
@@ -571,7 +620,9 @@ impl From<Conv> for llvm::CallConv {
Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
llvm::CCallConv
}
- Conv::RustCold => llvm::ColdCallConv,
+ Conv::Cold => llvm::ColdCallConv,
+ Conv::PreserveMost => llvm::PreserveMost,
+ Conv::PreserveAll => llvm::PreserveAll,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::AvrInterrupt => llvm::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index a82d2c577..f33075a88 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -367,7 +367,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
match addition {
Addition::File { path, name_in_archive } => {
let path = CString::new(path.to_str().unwrap())?;
- let name = CString::new(name_in_archive.clone())?;
+ let name = CString::new(name_in_archive.as_bytes())?;
members.push(llvm::LLVMRustArchiveMemberNew(
path.as_ptr(),
name.as_ptr(),
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index b2d28cef8..a3b0dc6b6 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,6 +1,8 @@
-use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
+use crate::back::write::{
+ self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+};
use crate::errors::{
- DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+ DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
};
use crate::llvm::{self, build_string};
use crate::{LlvmCodegenBackend, ModuleLlvm};
@@ -24,6 +26,7 @@ use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
use std::iter;
+use std::mem::ManuallyDrop;
use std::path::Path;
use std::slice;
use std::sync::Arc;
@@ -34,8 +37,12 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
- CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
- CrateType::Rlib | CrateType::ProcMacro => false,
+ CrateType::Executable
+ | CrateType::Dylib
+ | CrateType::Staticlib
+ | CrateType::Cdylib
+ | CrateType::ProcMacro => true,
+ CrateType::Rlib => false,
}
}
@@ -85,6 +92,11 @@ fn prepare_lto(
diag_handler.emit_err(LtoDylib);
return Err(FatalError);
}
+ } else if *crate_type == CrateType::ProcMacro {
+ if !cgcx.opts.unstable_opts.dylib_lto {
+ diag_handler.emit_err(LtoProcMacro);
+ return Err(FatalError);
+ }
}
}
@@ -120,6 +132,7 @@ fn prepare_lto(
info!("adding bitcode from {}", name);
match get_bitcode_slice_from_object_data(
child.data(&*archive_data).expect("corrupt rlib"),
+ cgcx,
) {
Ok(data) => {
let module = SerializedModule::FromRlib(data.to_vec());
@@ -141,10 +154,29 @@ fn prepare_lto(
Ok((symbols_below_threshold, upstream_modules))
}
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
+fn get_bitcode_slice_from_object_data<'a>(
+ obj: &'a [u8],
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<&'a [u8], LtoBitcodeFromRlib> {
+ // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
+ // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
+ // the relevant magic strings here and return.
+ if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
+ return Ok(obj);
+ }
+ // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
+ // which in the public API for sections gets treated as part of the section name, but internally
+ // in MachOObjectFile.cpp gets treated separately.
+ let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
let mut len = 0;
- let data =
- unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+ let data = unsafe {
+ llvm::LLVMRustGetSliceFromObjectDataByName(
+ obj.as_ptr(),
+ obj.len(),
+ section_name.as_ptr(),
+ &mut len,
+ )
+ };
if !data.is_null() {
assert!(len != 0);
let bc = unsafe { slice::from_raw_parts(data, len) };
@@ -441,7 +473,7 @@ fn thin_lto(
for (i, (name, buffer)) in modules.into_iter().enumerate() {
info!("local module: {} - {}", i, name);
- let cname = CString::new(name.clone()).unwrap();
+ let cname = CString::new(name.as_bytes()).unwrap();
thin_modules.push(llvm::ThinLTOModule {
identifier: cname.as_ptr(),
data: buffer.data().as_ptr(),
@@ -583,7 +615,7 @@ pub(crate) fn run_pass_manager(
module: &mut ModuleCodegen<ModuleLlvm>,
thin: bool,
) -> Result<(), FatalError> {
- let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
let config = cgcx.config(module.kind);
// Now we have one massive module inside of llmod. Time to run the
@@ -705,7 +737,7 @@ pub unsafe fn optimize_thin_module(
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
let mut module = ModuleCodegen {
- module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+ module_llvm: ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) },
name: thin_module.name().to_string(),
kind: ModuleKind::Regular,
};
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
new file mode 100644
index 000000000..36484c3c3
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -0,0 +1,103 @@
+use std::{
+ ffi::{c_char, CStr},
+ marker::PhantomData,
+ ops::Deref,
+ ptr::NonNull,
+};
+
+use rustc_data_structures::small_c_str::SmallCStr;
+
+use crate::{errors::LlvmError, llvm};
+
+/// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions.
+/// Not cloneable as there is no clone function for llvm::TargetMachine.
+#[repr(transparent)]
+pub struct OwnedTargetMachine {
+ tm_unique: NonNull<llvm::TargetMachine>,
+ phantom: PhantomData<llvm::TargetMachine>,
+}
+
+impl OwnedTargetMachine {
+ pub fn new(
+ triple: &CStr,
+ cpu: &CStr,
+ features: &CStr,
+ abi: &CStr,
+ model: llvm::CodeModel,
+ reloc: llvm::RelocModel,
+ level: llvm::CodeGenOptLevel,
+ use_soft_fp: bool,
+ function_sections: bool,
+ data_sections: bool,
+ unique_section_names: bool,
+ trap_unreachable: bool,
+ singletree: bool,
+ asm_comments: bool,
+ emit_stack_size_section: bool,
+ relax_elf_relocations: bool,
+ use_init_array: bool,
+ split_dwarf_file: &CStr,
+ output_obj_file: &CStr,
+ debug_info_compression: &CStr,
+ force_emulated_tls: bool,
+ args_cstr_buff: &[u8],
+ ) -> Result<Self, LlvmError<'static>> {
+ assert!(args_cstr_buff.len() > 0);
+ assert!(
+ *args_cstr_buff.last().unwrap() == 0,
+ "The last character must be a null terminator."
+ );
+
+ // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data
+ let tm_ptr = unsafe {
+ llvm::LLVMRustCreateTargetMachine(
+ triple.as_ptr(),
+ cpu.as_ptr(),
+ features.as_ptr(),
+ abi.as_ptr(),
+ model,
+ reloc,
+ level,
+ use_soft_fp,
+ function_sections,
+ data_sections,
+ unique_section_names,
+ trap_unreachable,
+ singletree,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ split_dwarf_file.as_ptr(),
+ output_obj_file.as_ptr(),
+ debug_info_compression.as_ptr(),
+ force_emulated_tls,
+ args_cstr_buff.as_ptr() as *const c_char,
+ args_cstr_buff.len(),
+ )
+ };
+
+ NonNull::new(tm_ptr)
+ .map(|tm_unique| Self { tm_unique, phantom: PhantomData })
+ .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) })
+ }
+}
+
+impl Deref for OwnedTargetMachine {
+ type Target = llvm::TargetMachine;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+ unsafe { self.tm_unique.as_ref() }
+ }
+}
+
+impl Drop for OwnedTargetMachine {
+ fn drop(&mut self) {
+ // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+ // OwnedTargetMachine is not copyable so there is no double free or use after free
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 47cc5bd52..c778a6e01 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,17 +1,22 @@
use crate::back::lto::ThinBuffer;
+use crate::back::owned_target_machine::OwnedTargetMachine;
use crate::back::profiling::{
selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
};
use crate::base;
use crate::common;
use crate::errors::{
- CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+ CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+ WithLlvmError, WriteBytecode,
};
use crate::llvm::{self, DiagnosticInfo, PassManager};
use crate::llvm_util;
use crate::type_::Type;
use crate::LlvmCodegenBackend;
use crate::ModuleLlvm;
+use llvm::{
+ LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
@@ -94,8 +99,8 @@ pub fn write_output_file<'ll>(
}
}
-pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
- let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine {
+ let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
// Can't use query system here quite yet because this function is invoked before the query
// system/tcx is set up.
let features = llvm_util::global_llvm_features(sess, false);
@@ -103,7 +108,7 @@ pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm:
.unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
}
-pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
tcx.output_filenames(()).split_dwarf_path(
tcx.sess.split_debuginfo(),
@@ -113,7 +118,11 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll
} else {
None
};
- let config = TargetMachineFactoryConfig { split_dwarf_file };
+
+ let output_obj_file =
+ Some(tcx.output_filenames(()).temp_path(OutputType::Object, Some(mod_name)));
+ let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
+
target_machine_factory(
&tcx.sess,
tcx.backend_optimization_level(()),
@@ -216,36 +225,73 @@ pub fn target_machine_factory(
let force_emulated_tls = sess.target.force_emulated_tls;
+ // copy the exe path, followed by path all into one buffer
+ // null terminating them so we can use them as null terminated strings
+ let args_cstr_buff = {
+ let mut args_cstr_buff: Vec<u8> = Vec::new();
+ let exe_path = std::env::current_exe().unwrap_or_default();
+ let exe_path_str = exe_path.into_os_string().into_string().unwrap_or_default();
+
+ args_cstr_buff.extend_from_slice(exe_path_str.as_bytes());
+ args_cstr_buff.push(0);
+
+ for arg in sess.expanded_args.iter() {
+ args_cstr_buff.extend_from_slice(arg.as_bytes());
+ args_cstr_buff.push(0);
+ }
+
+ args_cstr_buff
+ };
+
+ let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+ match sess.opts.debuginfo_compression {
+ rustc_session::config::DebugInfoCompression::Zlib => {
+ if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+ sess.emit_warning(UnknownCompression { algorithm: "zlib" });
+ }
+ }
+ rustc_session::config::DebugInfoCompression::Zstd => {
+ if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+ sess.emit_warning(UnknownCompression { algorithm: "zstd" });
+ }
+ }
+ rustc_session::config::DebugInfoCompression::None => {}
+ };
+ let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
Arc::new(move |config: TargetMachineFactoryConfig| {
- let split_dwarf_file =
- path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
- let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
-
- let tm = unsafe {
- llvm::LLVMRustCreateTargetMachine(
- triple.as_ptr(),
- cpu.as_ptr(),
- features.as_ptr(),
- abi.as_ptr(),
- code_model,
- reloc_model,
- opt_level,
- use_softfp,
- ffunction_sections,
- fdata_sections,
- funique_section_names,
- trap_unreachable,
- singlethread,
- asm_comments,
- emit_stack_size_section,
- relax_elf_relocations,
- use_init_array,
- split_dwarf_file.as_ptr(),
- force_emulated_tls,
- )
+ let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
+ let path = path_mapping.map_prefix(path.unwrap_or_default()).0;
+ CString::new(path.to_str().unwrap()).unwrap()
};
- tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
+ let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
+ let output_obj_file = path_to_cstring_helper(config.output_obj_file);
+
+ OwnedTargetMachine::new(
+ &triple,
+ &cpu,
+ &features,
+ &abi,
+ code_model,
+ reloc_model,
+ opt_level,
+ use_softfp,
+ ffunction_sections,
+ fdata_sections,
+ funique_section_names,
+ trap_unreachable,
+ singlethread,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ &split_dwarf_file,
+ &output_obj_file,
+ &debuginfo_compression,
+ force_emulated_tls,
+ &args_cstr_buff,
+ )
})
}
@@ -853,6 +899,27 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
asm
}
+fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+ cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos")
+ || cgcx.opts.target_triple.triple().contains("-watchos")
+}
+
+fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+ cgcx.opts.target_triple.triple().contains("-aix")
+}
+
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
+ if target_is_apple(cgcx) {
+ "__LLVM,__bitcode\0"
+ } else if target_is_aix(cgcx) {
+ ".ipa\0"
+ } else {
+ ".llvmbc\0"
+ }
+}
+
/// Embed the bitcode of an LLVM module in the LLVM module itself.
///
/// This is done primarily for iOS where it appears to be standard to compile C
@@ -913,11 +980,8 @@ unsafe fn embed_bitcode(
// Unfortunately, LLVM provides no way to set custom section flags. For ELF
// and COFF we emit the sections using module level inline assembly for that
// reason (see issue #90326 for historical background).
- let is_aix = cgcx.opts.target_triple.triple().contains("-aix");
- let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
- || cgcx.opts.target_triple.triple().contains("-darwin")
- || cgcx.opts.target_triple.triple().contains("-tvos")
- || cgcx.opts.target_triple.triple().contains("-watchos");
+ let is_aix = target_is_aix(cgcx);
+ let is_apple = target_is_apple(cgcx);
if is_apple
|| is_aix
|| cgcx.opts.target_triple.triple().starts_with("wasm")
@@ -932,13 +996,7 @@ unsafe fn embed_bitcode(
);
llvm::LLVMSetInitializer(llglobal, llconst);
- let section = if is_apple {
- "__LLVM,__bitcode\0"
- } else if is_aix {
- ".ipa\0"
- } else {
- ".llvmbc\0"
- };
+ let section = bitcode_section_name(cgcx);
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 36c098218..5254c3f9c 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -95,7 +95,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
unsafe {
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
- let is_generic = instance.args.non_erasable_generics().next().is_some();
+ let is_generic =
+ instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 24fd5bbf8..b4b2ab1e1 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -10,6 +10,7 @@ use crate::value::Value;
use cstr::cstr;
use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::errors as ssa_errors;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::base_n;
use rustc_data_structures::fx::FxHashMap;
@@ -159,9 +160,9 @@ pub unsafe fn create_module<'ll>(
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.is_builtin {
+ // tm is disposed by its drop impl
let tm = crate::back::write::create_informational_target_machine(tcx.sess);
- llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
- llvm::LLVMRustDisposeTargetMachine(tm);
+ llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm);
let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
@@ -1000,7 +1001,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
} else {
- span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
+ self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
}
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 7a82d05ce..763186a58 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::coverage::{CounterId, MappedExpressionIndex};
+use rustc_middle::mir::coverage::{CounterId, ExpressionId, Operand};
/// Must match the layout of `LLVMRustCounterKind`.
#[derive(Copy, Clone, Debug)]
@@ -30,11 +30,8 @@ pub struct Counter {
}
impl Counter {
- /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
- /// `id` is not used.
- pub fn zero() -> Self {
- Self { kind: CounterKind::Zero, id: 0 }
- }
+ /// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
+ pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
/// Constructs a new `Counter` of kind `CounterValueReference`.
pub fn counter_value_reference(counter_id: CounterId) -> Self {
@@ -42,20 +39,16 @@ impl Counter {
}
/// Constructs a new `Counter` of kind `Expression`.
- pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
- Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
- }
-
- /// Returns true if the `Counter` kind is `Zero`.
- pub fn is_zero(&self) -> bool {
- matches!(self.kind, CounterKind::Zero)
+ pub(crate) fn expression(expression_id: ExpressionId) -> Self {
+ Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
}
- /// An explicitly-named function to get the ID value, making it more obvious
- /// that the stored value is now 0-based.
- pub fn zero_based_id(&self) -> u32 {
- debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
- self.id
+ pub(crate) fn from_operand(operand: Operand) -> Self {
+ match operand {
+ Operand::Zero => Self::ZERO,
+ Operand::Counter(id) => Self::counter_value_reference(id),
+ Operand::Expression(id) => Self::expression(id),
+ }
}
}
@@ -81,6 +74,11 @@ pub struct CounterExpression {
}
impl CounterExpression {
+ /// The dummy expression `(0 - 0)` has a representation of all zeroes,
+ /// making it marginally more efficient to initialize than `(0 + 0)`.
+ pub(crate) const DUMMY: Self =
+ Self { lhs: Counter::ZERO, kind: ExprKind::Subtract, rhs: Counter::ZERO };
+
pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
Self { kind, lhs, rhs }
}
@@ -172,7 +170,7 @@ impl CounterMappingRegion {
) -> Self {
Self {
counter,
- false_counter: Counter::zero(),
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id: 0,
start_line,
@@ -220,8 +218,8 @@ impl CounterMappingRegion {
end_col: u32,
) -> Self {
Self {
- counter: Counter::zero(),
- false_counter: Counter::zero(),
+ counter: Counter::ZERO,
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id,
start_line,
@@ -243,8 +241,8 @@ impl CounterMappingRegion {
end_col: u32,
) -> Self {
Self {
- counter: Counter::zero(),
- false_counter: Counter::zero(),
+ counter: Counter::ZERO,
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id: 0,
start_line,
@@ -268,7 +266,7 @@ impl CounterMappingRegion {
) -> Self {
Self {
counter,
- false_counter: Counter::zero(),
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id: 0,
start_line,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index f1e68af25..e83110dca 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -1,10 +1,8 @@
use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
-use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
- CodeRegion, CounterId, ExpressionId, MappedExpressionIndex, Op, Operand,
-};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, ExpressionId, Op, Operand};
use rustc_middle::ty::Instance;
use rustc_middle::ty::TyCtxt;
@@ -128,6 +126,58 @@ impl<'tcx> FunctionCoverage<'tcx> {
self.unreachable_regions.push(region)
}
+ /// Perform some simplifications to make the final coverage mappings
+ /// slightly smaller.
+ ///
+ /// This method mainly exists to preserve the simplifications that were
+ /// already being performed by the Rust-side expression renumbering, so that
+ /// the resulting coverage mappings don't get worse.
+ pub(crate) fn simplify_expressions(&mut self) {
+ // The set of expressions that either were optimized out entirely, or
+ // have zero as both of their operands, and will therefore always have
+ // a value of zero. Other expressions that refer to these as operands
+ // can have those operands replaced with `Operand::Zero`.
+ let mut zero_expressions = FxIndexSet::default();
+
+ // For each expression, perform simplifications based on lower-numbered
+ // expressions, and then update the set of always-zero expressions if
+ // necessary.
+ // (By construction, expressions can only refer to other expressions
+ // that have lower IDs, so one simplification pass is sufficient.)
+ for (id, maybe_expression) in self.expressions.iter_enumerated_mut() {
+ let Some(expression) = maybe_expression else {
+ // If an expression is missing, it must have been optimized away,
+ // so any operand that refers to it can be replaced with zero.
+ zero_expressions.insert(id);
+ continue;
+ };
+
+ // If an operand refers to an expression that is always zero, then
+ // that operand can be replaced with `Operand::Zero`.
+ let maybe_set_operand_to_zero = |operand: &mut Operand| match &*operand {
+ Operand::Expression(id) if zero_expressions.contains(id) => {
+ *operand = Operand::Zero;
+ }
+ _ => (),
+ };
+ maybe_set_operand_to_zero(&mut expression.lhs);
+ maybe_set_operand_to_zero(&mut expression.rhs);
+
+ // Coverage counter values cannot be negative, so if an expression
+ // involves subtraction from zero, assume that its RHS must also be zero.
+ // (Do this after simplifications that could set the LHS to zero.)
+ if let Expression { lhs: Operand::Zero, op: Op::Subtract, .. } = expression {
+ expression.rhs = Operand::Zero;
+ }
+
+ // After the above simplifications, if both operands are zero, then
+ // we know that this expression is always zero too.
+ if let Expression { lhs: Operand::Zero, rhs: Operand::Zero, .. } = expression {
+ zero_expressions.insert(id);
+ }
+ }
+ }
+
/// Return the source hash, generated from the HIR node structure, and used to indicate whether
/// or not the source code structure changed between different compilations.
pub fn source_hash(&self) -> u64 {
@@ -146,8 +196,14 @@ impl<'tcx> FunctionCoverage<'tcx> {
self.instance
);
+ let counter_expressions = self.counter_expressions();
+ // Expression IDs are indices into `self.expressions`, and on the LLVM
+ // side they will be treated as indices into `counter_expressions`, so
+ // the two vectors should correspond 1:1.
+ assert_eq!(self.expressions.len(), counter_expressions.len());
+
let counter_regions = self.counter_regions();
- let (counter_expressions, expression_regions) = self.expressions_with_regions();
+ let expression_regions = self.expression_regions();
let unreachable_regions = self.unreachable_regions();
let counter_regions =
@@ -163,149 +219,53 @@ impl<'tcx> FunctionCoverage<'tcx> {
})
}
- fn expressions_with_regions(
- &self,
- ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
- let mut counter_expressions = Vec::with_capacity(self.expressions.len());
- let mut expression_regions = Vec::with_capacity(self.expressions.len());
- let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+ /// Convert this function's coverage expression data into a form that can be
+ /// passed through FFI to LLVM.
+ fn counter_expressions(&self) -> Vec<CounterExpression> {
+ // We know that LLVM will optimize out any unused expressions before
+ // producing the final coverage map, so there's no need to do the same
+ // thing on the Rust side unless we're confident we can do much better.
+ // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
- // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
- // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
- // and value.
- //
- // Expressions will be returned from this function in a sequential vector (array) of
- // `CounterExpression`, so the expression IDs must be mapped from their original,
- // potentially sparse set of indexes.
- //
- // An `Expression` as an operand will have already been encountered as an `Expression` with
- // operands, so its new_index will already have been generated (as a 1-up index value).
- // (If an `Expression` as an operand does not have a corresponding new_index, it was
- // probably optimized out, after the expression was injected into the MIR, so it will
- // get a `CounterKind::Zero` instead.)
- //
- // In other words, an `Expression`s at any given index can include other expressions as
- // operands, but expression operands can only come from the subset of expressions having
- // `expression_index`s lower than the referencing `Expression`. Therefore, it is
- // reasonable to look up the new index of an expression operand while the `new_indexes`
- // vector is only complete up to the current `ExpressionIndex`.
- type NewIndexes = IndexSlice<ExpressionId, Option<MappedExpressionIndex>>;
- let id_to_counter = |new_indexes: &NewIndexes, operand: Operand| match operand {
- Operand::Zero => Some(Counter::zero()),
- Operand::Counter(id) => Some(Counter::counter_value_reference(id)),
- Operand::Expression(id) => {
- self.expressions
- .get(id)
- .expect("expression id is out of range")
- .as_ref()
- // If an expression was optimized out, assume it would have produced a count
- // of zero. This ensures that expressions dependent on optimized-out
- // expressions are still valid.
- .map_or(Some(Counter::zero()), |_| new_indexes[id].map(Counter::expression))
- }
- };
-
- for (original_index, expression) in
- self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
- // Option::map() will return None to filter out missing expressions. This may happen
- // if, for example, a MIR-instrumented expression is removed during an optimization.
- entry.as_ref().map(|expression| (original_index, expression))
- })
- {
- let optional_region = &expression.region;
- let Expression { lhs, op, rhs, .. } = *expression;
-
- if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
- .map(|lhs_counter| {
- id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
- })
- {
- if lhs_counter.is_zero() && op.is_subtract() {
- // The left side of a subtraction was probably optimized out. As an example,
- // a branch condition might be evaluated as a constant expression, and the
- // branch could be removed, dropping unused counters in the process.
- //
- // Since counters are unsigned, we must assume the result of the expression
- // can be no more and no less than zero. An expression known to evaluate to zero
- // does not need to be added to the coverage map.
- //
- // Coverage test `loops_branches.rs` includes multiple variations of branches
- // based on constant conditional (literal `true` or `false`), and demonstrates
- // that the expected counts are still correct.
- debug!(
- "Expression subtracts from zero (assume unreachable): \
- original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
- original_index, lhs, op, rhs, optional_region,
- );
- rhs_counter = Counter::zero();
+ self.expressions
+ .iter()
+ .map(|expression| match expression {
+ None => {
+ // This expression ID was allocated, but we never saw the
+ // actual expression, so it must have been optimized out.
+ // Replace it with a dummy expression, and let LLVM take
+ // care of omitting it from the expression list.
+ CounterExpression::DUMMY
}
- debug_assert!(
- lhs_counter.is_zero()
- // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
- || ((lhs_counter.zero_based_id() as usize)
- <= usize::max(self.counters.len(), self.expressions.len())),
- "lhs id={} > both counters.len()={} and expressions.len()={}
- ({:?} {:?} {:?})",
- lhs_counter.zero_based_id(),
- self.counters.len(),
- self.expressions.len(),
- lhs_counter,
- op,
- rhs_counter,
- );
-
- debug_assert!(
- rhs_counter.is_zero()
- // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
- || ((rhs_counter.zero_based_id() as usize)
- <= usize::max(self.counters.len(), self.expressions.len())),
- "rhs id={} > both counters.len()={} and expressions.len()={}
- ({:?} {:?} {:?})",
- rhs_counter.zero_based_id(),
- self.counters.len(),
- self.expressions.len(),
- lhs_counter,
- op,
- rhs_counter,
- );
-
- // Both operands exist. `Expression` operands exist in `self.expressions` and have
- // been assigned a `new_index`.
- let mapped_expression_index =
- MappedExpressionIndex::from(counter_expressions.len());
- let expression = CounterExpression::new(
- lhs_counter,
- match op {
- Op::Add => ExprKind::Add,
- Op::Subtract => ExprKind::Subtract,
- },
- rhs_counter,
- );
- debug!(
- "Adding expression {:?} = {:?}, region: {:?}",
- mapped_expression_index, expression, optional_region
- );
- counter_expressions.push(expression);
- new_indexes[original_index] = Some(mapped_expression_index);
- if let Some(region) = optional_region {
- expression_regions.push((Counter::expression(mapped_expression_index), region));
+ &Some(Expression { lhs, op, rhs, .. }) => {
+ // Convert the operands and operator as normal.
+ CounterExpression::new(
+ Counter::from_operand(lhs),
+ match op {
+ Op::Add => ExprKind::Add,
+ Op::Subtract => ExprKind::Subtract,
+ },
+ Counter::from_operand(rhs),
+ )
}
- } else {
- bug!(
- "expression has one or more missing operands \
- original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
- original_index,
- lhs,
- op,
- rhs,
- optional_region,
- );
- }
- }
- (counter_expressions, expression_regions.into_iter())
+ })
+ .collect::<Vec<_>>()
+ }
+
+ fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
+ // Find all of the expression IDs that weren't optimized out AND have
+ // an attached code region, and return the corresponding mapping as a
+ // counter/region pair.
+ self.expressions
+ .iter_enumerated()
+ .filter_map(|(id, expression)| {
+ let code_region = expression.as_ref()?.region.as_ref()?;
+ Some((Counter::expression(id), code_region))
+ })
+ .collect::<Vec<_>>()
}
fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
- self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+ self.unreachable_regions.iter().map(|region| (Counter::ZERO, region))
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 97a99e510..d4e775256 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,13 +1,14 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
-use crate::coverageinfo::ffi::{Counter, CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::FunctionCoverage;
use crate::llvm;
use rustc_codegen_ssa::traits::ConstMethods;
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
-use rustc_llvm::RustString;
+use rustc_index::IndexVec;
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::CodeRegion;
@@ -55,21 +56,21 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
return;
}
- let mut mapgen = CoverageMapGenerator::new(tcx);
+ let mut global_file_table = GlobalFileTable::new(tcx);
// Encode coverage mappings and generate function records
let mut function_data = Vec::new();
- for (instance, function_coverage) in function_coverage_map {
+ for (instance, mut function_coverage) in function_coverage_map {
debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+ function_coverage.simplify_expressions();
+ let function_coverage = function_coverage;
+
let mangled_function_name = tcx.symbol_name(instance).name;
let source_hash = function_coverage.source_hash();
let is_used = function_coverage.is_used();
- let (expressions, counter_regions) =
- function_coverage.get_expressions_and_counter_regions();
- let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
- mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
- });
+ let coverage_mapping_buffer =
+ encode_mappings_for_function(&mut global_file_table, &function_coverage);
if coverage_mapping_buffer.is_empty() {
if function_coverage.is_used() {
@@ -87,19 +88,14 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
}
// Encode all filenames referenced by counters/expressions in this module
- let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
- coverageinfo::write_filenames_section_to_buffer(
- mapgen.filenames.iter().map(Symbol::as_str),
- filenames_buffer,
- );
- });
+ let filenames_buffer = global_file_table.into_filenames_buffer();
let filenames_size = filenames_buffer.len();
let filenames_val = cx.const_bytes(&filenames_buffer);
let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
// Generate the LLVM IR representation of the coverage map and store it in a well-known global
- let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+ let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
let covfun_section_name = coverageinfo::covfun_section_name(cx);
for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
@@ -118,13 +114,13 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
}
-struct CoverageMapGenerator {
- filenames: FxIndexSet<Symbol>,
+struct GlobalFileTable {
+ global_file_table: FxIndexSet<Symbol>,
}
-impl CoverageMapGenerator {
+impl GlobalFileTable {
fn new(tcx: TyCtxt<'_>) -> Self {
- let mut filenames = FxIndexSet::default();
+ let mut global_file_table = FxIndexSet::default();
// LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
// requires setting the first filename to the compilation directory.
// Since rustc generates coverage maps with relative paths, the
@@ -133,94 +129,114 @@ impl CoverageMapGenerator {
let working_dir = Symbol::intern(
&tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
);
- filenames.insert(working_dir);
- Self { filenames }
+ global_file_table.insert(working_dir);
+ Self { global_file_table }
}
- /// Using the `expressions` and `counter_regions` collected for the current function, generate
- /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
- /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
- /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
- fn write_coverage_mapping<'a>(
- &mut self,
- expressions: Vec<CounterExpression>,
- counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
- coverage_mapping_buffer: &RustString,
- ) {
- let mut counter_regions = counter_regions.collect::<Vec<_>>();
- if counter_regions.is_empty() {
- return;
- }
+ fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
+ let (global_file_id, _) = self.global_file_table.insert_full(file_name);
+ global_file_id as u32
+ }
- let mut virtual_file_mapping = Vec::new();
- let mut mapping_regions = Vec::new();
- let mut current_file_name = None;
- let mut current_file_id = 0;
-
- // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
- // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
- // `file_id` (indexing files referenced by the current function), and construct the
- // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
- // `filenames` array.
- counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
- for (counter, region) in counter_regions {
- let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
- let same_file = current_file_name.is_some_and(|p| p == file_name);
- if !same_file {
- if current_file_name.is_some() {
- current_file_id += 1;
- }
- current_file_name = Some(file_name);
- debug!(" file_id: {} = '{:?}'", current_file_id, file_name);
- let (filenames_index, _) = self.filenames.insert_full(file_name);
- virtual_file_mapping.push(filenames_index as u32);
- }
- debug!("Adding counter {:?} to map for {:?}", counter, region);
+ fn into_filenames_buffer(self) -> Vec<u8> {
+ // This method takes `self` so that the caller can't accidentally
+ // modify the original file table after encoding it into a buffer.
+
+ llvm::build_byte_buffer(|buffer| {
+ coverageinfo::write_filenames_section_to_buffer(
+ self.global_file_table.iter().map(Symbol::as_str),
+ buffer,
+ );
+ })
+ }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+ global_file_table: &mut GlobalFileTable,
+ function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+ let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
+
+ let mut counter_regions = counter_regions.collect::<Vec<_>>();
+ if counter_regions.is_empty() {
+ return Vec::new();
+ }
+
+ let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+ let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+ // Sort the list of (counter, region) mapping pairs by region, so that they
+ // can be grouped by filename. Prepare file IDs for each filename, and
+ // prepare the mapping data so that we can pass it through FFI to LLVM.
+ counter_regions.sort_by_key(|(_counter, region)| *region);
+ for counter_regions_for_file in
+ counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+ {
+ // Look up (or allocate) the global file ID for this filename.
+ let file_name = counter_regions_for_file[0].1.file_name;
+ let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+ // Associate that global file ID with a local file ID for this function.
+ let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
+ debug!(" file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+
+ // For each counter/region pair in this function+file, convert it to a
+ // form suitable for FFI.
+ for &(counter, region) in counter_regions_for_file {
+ let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
+
+ debug!("Adding counter {counter:?} to map for {region:?}");
mapping_regions.push(CounterMappingRegion::code_region(
counter,
- current_file_id,
+ local_file_id,
start_line,
start_col,
end_line,
end_col,
));
}
+ }
- // Encode and append the current function's coverage mapping data
+ // Encode the function's coverage mappings into a buffer.
+ llvm::build_byte_buffer(|buffer| {
coverageinfo::write_mapping_to_buffer(
- virtual_file_mapping,
+ virtual_file_mapping.raw,
expressions,
mapping_regions,
- coverage_mapping_buffer,
+ buffer,
);
- }
+ })
+}
- /// Construct coverage map header and the array of function records, and combine them into the
- /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
- /// specific, well-known section and name.
- fn generate_coverage_map<'ll>(
- self,
- cx: &CodegenCx<'ll, '_>,
- version: u32,
- filenames_size: usize,
- filenames_val: &'ll llvm::Value,
- ) -> &'ll llvm::Value {
- debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
-
- // Create the coverage data header (Note, fields 0 and 2 are now always zero,
- // as of `llvm::coverage::CovMapVersion::Version4`.)
- let zero_was_n_records_val = cx.const_u32(0);
- let filenames_size_val = cx.const_u32(filenames_size as u32);
- let zero_was_coverage_size_val = cx.const_u32(0);
- let version_val = cx.const_u32(version);
- let cov_data_header_val = cx.const_struct(
- &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
- /*packed=*/ false,
- );
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ version: u32,
+ filenames_size: usize,
+ filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+ debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+ // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+ // as of `llvm::coverage::CovMapVersion::Version4`.)
+ let zero_was_n_records_val = cx.const_u32(0);
+ let filenames_size_val = cx.const_u32(filenames_size as u32);
+ let zero_was_coverage_size_val = cx.const_u32(0);
+ let version_val = cx.const_u32(version);
+ let cov_data_header_val = cx.const_struct(
+ &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+ /*packed=*/ false,
+ );
- // Create the complete LLVM coverage data value to add to the LLVM IR
- cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
- }
+ // Create the complete LLVM coverage data value to add to the LLVM IR
+ cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
}
/// Construct a function record and combine it with the function's coverage mapping data.
@@ -317,10 +333,10 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
{
let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
- // If a function is marked `#[no_coverage]`, then skip generating a
+ // If a function is marked `#[coverage(off)]`, then skip generating a
// dead code stub for it.
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
- debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+ debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
continue;
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 621fd36b2..c70cb670e 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -16,7 +16,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_llvm::RustString;
use rustc_middle::bug;
-use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand};
+use rustc_middle::mir::coverage::{CounterId, CoverageKind};
use rustc_middle::mir::Coverage;
use rustc_middle::ty;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
@@ -104,144 +104,67 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
let bx = self;
+ let Some(coverage_context) = bx.coverage_context() else { return };
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ let func_coverage = coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
+
let Coverage { kind, code_region } = coverage.clone();
match kind {
CoverageKind::Counter { function_source_hash, id } => {
- if bx.set_function_source_hash(instance, function_source_hash) {
- // If `set_function_source_hash()` returned true, the coverage map is enabled,
- // so continue adding the counter.
- if let Some(code_region) = code_region {
- // Note: Some counters do not have code regions, but may still be referenced
- // from expressions. In that case, don't add the counter to the coverage map,
- // but do inject the counter intrinsic.
- bx.add_coverage_counter(instance, id, code_region);
- }
-
- let coverageinfo = bx.tcx().coverageinfo(instance.def);
-
- let fn_name = bx.get_pgo_func_name_var(instance);
- let hash = bx.const_u64(function_source_hash);
- let num_counters = bx.const_u32(coverageinfo.num_counters);
- let index = bx.const_u32(id.as_u32());
+ debug!(
+ "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+ instance, function_source_hash,
+ );
+ func_coverage.set_function_source_hash(function_source_hash);
+
+ if let Some(code_region) = code_region {
+ // Note: Some counters do not have code regions, but may still be referenced
+ // from expressions. In that case, don't add the counter to the coverage map,
+ // but do inject the counter intrinsic.
debug!(
- "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
- fn_name, hash, num_counters, index,
+ "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+ instance, id, code_region,
);
- bx.instrprof_increment(fn_name, hash, num_counters, index);
+ func_coverage.add_counter(id, code_region);
}
+ // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
+ // as that needs an exclusive borrow.
+ drop(coverage_map);
+
+ let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(function_source_hash);
+ let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let index = bx.const_u32(id.as_u32());
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+ fn_name, hash, num_counters, index,
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
}
CoverageKind::Expression { id, lhs, op, rhs } => {
- bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+ debug!(
+ "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}",
+ instance, id, lhs, op, rhs, code_region,
+ );
+ func_coverage.add_counter_expression(id, lhs, op, rhs, code_region);
}
CoverageKind::Unreachable => {
- bx.add_coverage_unreachable(
- instance,
- code_region.expect("unreachable regions always have code regions"),
+ let code_region =
+ code_region.expect("unreachable regions always have code regions");
+ debug!(
+ "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+ instance, code_region,
);
+ func_coverage.add_unreachable_region(code_region);
}
}
}
}
-// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
-// after moving most coverage code out of SSA they are now just ordinary methods.
-impl<'tcx> Builder<'_, '_, 'tcx> {
- /// Returns true if the function source hash was added to the coverage map (even if it had
- /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
- /// not enabled (a coverage map is not being generated).
- fn set_function_source_hash(
- &mut self,
- instance: Instance<'tcx>,
- function_source_hash: u64,
- ) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "ensuring function source hash is set for instance={:?}; function_source_hash={}",
- instance, function_source_hash,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .set_function_source_hash(function_source_hash);
- true
- } else {
- false
- }
- }
-
- /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
- /// is not enabled (a coverage map is not being generated).
- fn add_coverage_counter(
- &mut self,
- instance: Instance<'tcx>,
- id: CounterId,
- region: CodeRegion,
- ) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
- instance, id, region,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter(id, region);
- true
- } else {
- false
- }
- }
-
- /// Returns true if the expression was added to the coverage map; false if
- /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
- fn add_coverage_counter_expression(
- &mut self,
- instance: Instance<'tcx>,
- id: ExpressionId,
- lhs: Operand,
- op: Op,
- rhs: Operand,
- region: Option<CodeRegion>,
- ) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
- region: {:?}",
- instance, id, lhs, op, rhs, region,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter_expression(id, lhs, op, rhs, region);
- true
- } else {
- false
- }
- }
-
- /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
- /// is not enabled (a coverage map is not being generated).
- fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding unreachable code to coverage_map: instance={:?}, at {:?}",
- instance, region,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_unreachable_region(region);
- true
- } else {
- false
- }
- }
-}
-
fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
let tcx = cx.tcx;
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index d174a3593..aff764f02 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -20,7 +20,7 @@ pub fn compute_mir_scopes<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
mir: &Body<'tcx>,
- debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+ debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
) {
// Find all scopes with variables defined in them.
let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
@@ -51,7 +51,7 @@ fn make_mir_scope<'ll, 'tcx>(
instance: Instance<'tcx>,
mir: &Body<'tcx>,
variables: &Option<BitSet<SourceScope>>,
- debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+ debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
instantiated: &mut BitSet<SourceScope>,
scope: SourceScope,
) {
@@ -68,7 +68,7 @@ fn make_mir_scope<'ll, 'tcx>(
let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
debug_context.scopes[scope] = DebugScope {
file_start_pos: file.start_pos,
- file_end_pos: file.end_pos,
+ file_end_pos: file.end_position(),
..debug_context.scopes[scope]
};
instantiated.insert(scope);
@@ -86,27 +86,31 @@ fn make_mir_scope<'ll, 'tcx>(
let loc = cx.lookup_debug_loc(scope_data.span.lo());
let file_metadata = file_metadata(cx, &loc.file);
- let dbg_scope = match scope_data.inlined {
+ let parent_dbg_scope = match scope_data.inlined {
Some((callee, _)) => {
// FIXME(eddyb) this would be `self.monomorphize(&callee)`
// if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
- let callee = cx.tcx.subst_and_normalize_erasing_regions(
+ let callee = cx.tcx.instantiate_and_normalize_erasing_regions(
instance.args,
ty::ParamEnv::reveal_all(),
ty::EarlyBinder::bind(callee),
);
- let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
- cx.dbg_scope_fn(callee, callee_fn_abi, None)
+ debug_context.inlined_function_scopes.entry(callee).or_insert_with(|| {
+ let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+ cx.dbg_scope_fn(callee, callee_fn_abi, None)
+ })
}
- None => unsafe {
- llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope.dbg_scope,
- file_metadata,
- loc.line,
- loc.col,
- )
- },
+ None => parent_scope.dbg_scope,
+ };
+
+ let dbg_scope = unsafe {
+ llvm::LLVMRustDIBuilderCreateLexicalBlock(
+ DIB(cx),
+ parent_dbg_scope,
+ file_metadata,
+ loc.line,
+ loc.col,
+ )
};
let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
@@ -120,7 +124,7 @@ fn make_mir_scope<'ll, 'tcx>(
dbg_scope,
inlined_at: inlined_at.or(parent_scope.inlined_at),
file_start_pos: loc.file.start_pos,
- file_end_pos: loc.file.end_pos,
+ file_end_pos: loc.file.end_position(),
};
instantiated.insert(scope);
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index f8cbcbd5e..ed9387616 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -445,9 +445,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
}
- // Box<T, A> may have a non-ZST allocator A. In that case, we
+ // Box<T, A> may have a non-1-ZST allocator A. In that case, we
// cannot treat Box<T, A> as just an owned alias of `*mut T`.
- ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
+ ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => {
build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
}
ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 40714a0af..30cc9ea9b 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -263,11 +263,11 @@ impl CodegenCx<'_, '_> {
pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
Ok(SourceFileAndLine { sf: file, line }) => {
- let line_pos = file.lines(|lines| lines[line]);
+ let line_pos = file.lines()[line];
// Use 1-based indexing.
let line = (line + 1) as u32;
- let col = (pos - line_pos).to_u32() + 1;
+ let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
(file, line, col)
}
@@ -292,7 +292,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: &'ll Value,
mir: &mir::Body<'tcx>,
- ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+ ) -> Option<FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>> {
if self.sess().opts.debuginfo == DebugInfo::None {
return None;
}
@@ -304,8 +304,10 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
file_start_pos: BytePos(0),
file_end_pos: BytePos(0),
};
- let mut fn_debug_context =
- FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+ let mut fn_debug_context = FunctionDebugContext {
+ scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes),
+ inlined_function_scopes: Default::default(),
+ };
// Fill in all the scopes, with the information from the MIR body.
compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
@@ -347,6 +349,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
type_names::push_generic_params(
tcx,
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+ enclosing_fn_def_id,
&mut name,
);
@@ -526,7 +529,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) {
// If the method does *not* belong to a trait, proceed
if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
- let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+ let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions(
instance.args,
ty::ParamEnv::reveal_all(),
cx.tcx.type_of(impl_def_id),
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index fced6d504..665d19579 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -139,6 +139,10 @@ pub(crate) struct LtoDisallowed;
pub(crate) struct LtoDylib;
#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_proc_macro)]
+pub(crate) struct LtoProcMacro;
+
+#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_bitcode_from_rlib)]
pub(crate) struct LtoBitcodeFromRlib {
pub llvm_err: String,
@@ -226,3 +230,9 @@ pub(crate) struct WriteBytecode<'a> {
pub(crate) struct CopyBitcode {
pub err: std::io::Error,
}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+ pub algorithm: &'static str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a9b06030e..a97b803fc 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -15,7 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_hir as hir;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::{self, GenericArgsRef, Ty};
use rustc_middle::{bug, span_bug};
use rustc_span::{sym, symbol::kw, Span, Symbol};
use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
@@ -165,7 +165,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();
- let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+ let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
let llty = ty.llvm_type(self);
self.volatile_load(llty, ptr)
} else {
@@ -376,7 +376,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
_ if name.as_str().starts_with("simd_") => {
- match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ match generic_simd_intrinsic(
+ self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+ ) {
Ok(llval) => llval,
Err(()) => return,
}
@@ -386,7 +388,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
+ if let PassMode::Cast { .. } = &fn_abi.ret.mode {
self.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
@@ -911,6 +913,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
name: Symbol,
callee_ty: Ty<'tcx>,
+ fn_args: GenericArgsRef<'tcx>,
args: &[OperandRef<'tcx, &'ll Value>],
ret_ty: Ty<'tcx>,
llret_ty: &'ll Type,
@@ -1030,6 +1033,56 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
));
}
+ if name == sym::simd_shuffle_generic {
+ let idx = fn_args[2]
+ .expect_const()
+ .eval(tcx, ty::ParamEnv::reveal_all(), Some(span))
+ .unwrap()
+ .unwrap_branch();
+ let n = idx.len() as u64;
+
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+ );
+ require!(
+ in_elem == out_ty,
+ InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+ );
+
+ let total_len = in_len * 2;
+
+ let indices: Option<Vec<_>> = idx
+ .iter()
+ .enumerate()
+ .map(|(arg_idx, val)| {
+ let idx = val.unwrap_leaf().try_to_i32().unwrap();
+ if idx >= i32::try_from(total_len).unwrap() {
+ bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+ span,
+ name,
+ arg_idx: arg_idx as u64,
+ total_len: total_len.into(),
+ });
+ None
+ } else {
+ Some(bx.const_i32(idx))
+ }
+ })
+ .collect();
+ let Some(indices) = indices else {
+ return Ok(bx.const_null(llret_ty));
+ };
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ bx.const_vector(&indices),
+ ));
+ }
+
if name == sym::simd_shuffle {
// Make sure this is actually an array, since typeck only checks the length-suffixed
// version of this intrinsic.
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index d283299ac..9c5edd6bd 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -10,6 +10,7 @@
#![feature(iter_intersperse)]
#![feature(let_chains)]
#![feature(never_type)]
+#![feature(slice_group_by)]
#![feature(impl_trait_in_assoc_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
@@ -21,6 +22,7 @@ extern crate rustc_macros;
#[macro_use]
extern crate tracing;
+use back::owned_target_machine::OwnedTargetMachine;
use back::write::{create_informational_target_machine, create_target_machine};
use errors::ParseTargetMachineConfig;
@@ -38,8 +40,8 @@ use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, Subd
use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::Providers;
use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
@@ -47,10 +49,12 @@ use rustc_span::symbol::Symbol;
use std::any::Any;
use std::ffi::CStr;
use std::io::Write;
+use std::mem::ManuallyDrop;
mod back {
pub mod archive;
pub mod lto;
+ pub mod owned_target_machine;
mod profiling;
pub mod write;
}
@@ -161,7 +165,7 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
- type TargetMachine = &'static mut llvm::TargetMachine;
+ type TargetMachine = OwnedTargetMachine;
type TargetMachineError = crate::errors::LlvmError<'static>;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
@@ -400,7 +404,10 @@ impl CodegenBackend for LlvmCodegenBackend {
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
- tm: &'static mut llvm::TargetMachine,
+
+ // This field is `ManuallyDrop` because it is important that the `TargetMachine`
+ // is disposed prior to the `Context` being disposed otherwise UAFs can occur.
+ tm: ManuallyDrop<OwnedTargetMachine>,
}
unsafe impl Send for ModuleLlvm {}
@@ -411,7 +418,11 @@ impl ModuleLlvm {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
- ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx, mod_name) }
+ ModuleLlvm {
+ llmod_raw,
+ llcx,
+ tm: ManuallyDrop::new(create_target_machine(tcx, mod_name)),
+ }
}
}
@@ -419,7 +430,11 @@ impl ModuleLlvm {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
- ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
+ ModuleLlvm {
+ llmod_raw,
+ llcx,
+ tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess)),
+ }
}
}
@@ -440,7 +455,7 @@ impl ModuleLlvm {
}
};
- Ok(ModuleLlvm { llmod_raw, llcx, tm })
+ Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
}
}
@@ -452,7 +467,7 @@ impl ModuleLlvm {
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
- llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+ ManuallyDrop::drop(&mut self.tm);
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
}
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 84157d1e2..a038b3af0 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -83,12 +83,17 @@ pub enum LLVMModFlagBehavior {
// Consts for the LLVM CallConv type, pre-cast to usize.
/// LLVM CallingConv::ID. Should we wrap this?
+///
+/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h>
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum CallConv {
CCallConv = 0,
FastCallConv = 8,
ColdCallConv = 9,
+ PreserveMost = 14,
+ PreserveAll = 15,
+ Tail = 18,
X86StdcallCallConv = 64,
X86FastcallCallConv = 65,
ArmAapcsCallConv = 67,
@@ -2107,6 +2112,8 @@ extern "C" {
);
pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+
+ // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
pub fn LLVMRustCreateTargetMachine(
Triple: *const c_char,
CPU: *const c_char,
@@ -2126,9 +2133,14 @@ extern "C" {
RelaxELFRelocations: bool,
UseInitArray: bool,
SplitDwarfFile: *const c_char,
+ OutputObjFile: *const c_char,
+ DebugInfoCompression: *const c_char,
ForceEmulatedTls: bool,
- ) -> Option<&'static mut TargetMachine>;
- pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+ ArgsCstrBuff: *const c_char,
+ ArgsCstrBuffLen: usize,
+ ) -> *mut TargetMachine;
+
+ pub fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine);
pub fn LLVMRustAddLibraryInfo<'a>(
PM: &PassManager<'a>,
M: &'a Module,
@@ -2314,6 +2326,12 @@ extern "C" {
len: usize,
out_len: &mut usize,
) -> *const u8;
+ pub fn LLVMRustGetSliceFromObjectDataByName(
+ data: *const u8,
+ len: usize,
+ name: *const u8,
+ out_len: &mut usize,
+ ) -> *const u8;
pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
pub fn LLVMRustLinkerAdd(
@@ -2352,6 +2370,10 @@ extern "C" {
pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
+ pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+ pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
pub fn LLVMRustGetSymbols(
buf_ptr: *const u8,
buf_len: usize,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index a76c9c9b7..7c8ef67ff 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -303,7 +303,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
// check that all features in a given smallvec are enabled
for llvm_feature in to_llvm_features(sess, feature) {
let cstr = SmallCStr::new(llvm_feature);
- if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+ if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } {
return false;
}
}
@@ -422,14 +422,14 @@ pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess
}
unsafe {
llvm::LLVMRustPrintTargetCPUs(
- tm,
+ &tm,
cpu_cstring.as_ptr(),
callback,
&mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
);
}
}
- PrintKind::TargetFeatures => print_target_features(out, sess, tm),
+ PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
_ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
}
}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 831645579..dcc62d314 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -3,7 +3,7 @@ use crate::context::TypeLowering;
use crate::type_::Type;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Ty, TypeVisitableExt};
use rustc_target::abi::HasDataLayout;
@@ -215,20 +215,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+ // In other words, this should generally not look at the type at all, but only at the
+ // layout.
if let Abi::Scalar(scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
return llty;
}
- let llty = match *self.ty.kind() {
- ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(),
- ty::Adt(def, _) if def.is_box() => cx.type_ptr(),
- ty::FnPtr(sig) => {
- cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
- }
- _ => self.scalar_llvm_type_at(cx, scalar),
- };
+ let llty = self.scalar_llvm_type_at(cx, scalar);
cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
return llty;
}
@@ -303,27 +299,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
index: usize,
immediate: bool,
) -> &'a Type {
- // HACK(eddyb) special-case fat pointers until LLVM removes
- // pointee types, to avoid bitcasting every `OperandRef::deref`.
- match *self.ty.kind() {
- ty::Ref(..) | ty::RawPtr(_) => {
- return self.field(cx, index).llvm_type(cx);
- }
- // only wide pointer boxes are handled as pointers
- // thin pointer boxes with scalar allocators are handled by the general logic below
- ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
- let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
- return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
- }
- // `dyn* Trait` has the same ABI as `*mut dyn Trait`
- ty::Dynamic(bounds, region, ty::DynStar) => {
- let ptr_ty =
- Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
- return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
- }
- _ => {}
- }
-
+ // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+ // In other words, this should generally not look at the type at all, but only at the
+ // layout.
let Abi::ScalarPair(a, b) = self.abi else {
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
};