diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm')
30 files changed, 1246 insertions, 1014 deletions
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml index f9a5463ef..0ad39c240 100644 --- a/compiler/rustc_codegen_llvm/Cargo.toml +++ b/compiler/rustc_codegen_llvm/Cargo.toml @@ -5,14 +5,13 @@ edition = "2021" [lib] test = false -doctest = false [dependencies] bitflags = "1.0" cstr = "0.2" libc = "0.2" -libloading = "0.7.1" measureme = "10.0.0" +object = { version = "0.29.0", default-features = false, features = ["std", "read_core", "archive", "coff", "elf", "macho", "pe"] } tracing = "0.1" rustc_middle = { path = "../rustc_middle" } rustc-demangle = "0.1.21" @@ -34,3 +33,4 @@ rustc_target = { path = "../rustc_target" } smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } rustc_ast = { path = "../rustc_ast" } rustc_span = { path = "../rustc_span" } +tempfile = "3.2.0" diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 9eb3574e7..d478efc86 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -19,6 +19,7 @@ use rustc_target::abi::call::ArgAbi; pub use rustc_target::abi::call::*; use rustc_target::abi::{self, HasDataLayout, Int}; pub use rustc_target::spec::abi::Abi; +use rustc_target::spec::SanitizerSet; use libc::c_uint; use smallvec::SmallVec; @@ -90,6 +91,13 @@ fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&' if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) { attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx)); } + } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) { + // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects + // memory sanitizer's behavior. + + if regular.contains(ArgAttribute::NoUndef) { + attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx)); + } } attrs @@ -213,7 +221,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) } else if self.is_unsized_indirect() { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); - } else if let PassMode::Cast(cast) = self.mode { + } else if let PassMode::Cast(cast, _) = &self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; @@ -283,7 +291,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { } PassMode::Direct(_) | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } - | PassMode::Cast(_) => { + | PassMode::Cast(..) => { let next_arg = next(); self.store(bx, next_arg, dst); } @@ -325,20 +333,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { // Ignore "extra" args from the call site for C variadic functions. // Only the "fixed" args are part of the LLVM function signature. - let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args }; + let args = + if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args }; - let args_capacity: usize = args.iter().map(|arg| - if arg.pad.is_some() { 1 } else { 0 } + - if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 } - ).sum(); + // This capacity calculation is approximate. let mut llargument_tys = Vec::with_capacity( - if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity, + self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }, ); - let llreturn_ty = match self.ret.mode { + let llreturn_ty = match &self.ret.mode { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), - PassMode::Cast(cast) => cast.llvm_type(cx), + PassMode::Cast(cast, _) => cast.llvm_type(cx), PassMode::Indirect { .. } => { llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); cx.type_void() @@ -346,12 +352,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { }; for arg in args { - // add padding - if let Some(ty) = arg.pad { - llargument_tys.push(ty.llvm_type(cx)); - } - - let llarg_ty = match arg.mode { + let llarg_ty = match &arg.mode { PassMode::Ignore => continue, PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx), PassMode::Pair(..) => { @@ -366,7 +367,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true)); continue; } - PassMode::Cast(cast) => cast.llvm_type(cx), + PassMode::Cast(cast, pad_i32) => { + // add padding + if *pad_i32 { + llargument_tys.push(Reg::i32().llvm_type(cx)); + } + cast.llvm_type(cx) + } PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { cx.type_ptr_to(arg.memory_ty(cx)) } @@ -426,46 +433,46 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { i += 1; i - 1 }; - match self.ret.mode { - PassMode::Direct(ref attrs) => { + match &self.ret.mode { + PassMode::Direct(attrs) => { attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); } - PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { + PassMode::Indirect { attrs, extra_attrs: _, on_stack } => { assert!(!on_stack); let i = apply(attrs); let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx)); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]); } - PassMode::Cast(cast) => { + PassMode::Cast(cast, _) => { cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); } _ => {} } - for arg in &self.args { - if arg.pad.is_some() { - apply(&ArgAttributes::new()); - } - match arg.mode { + for arg in self.args.iter() { + match &arg.mode { PassMode::Ignore => {} - PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { + PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => { let i = apply(attrs); let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx)); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]); } - PassMode::Direct(ref attrs) - | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { + PassMode::Direct(attrs) + | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => { apply(attrs); } - PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => { + PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => { assert!(!on_stack); apply(attrs); apply(extra_attrs); } - PassMode::Pair(ref a, ref b) => { + PassMode::Pair(a, b) => { apply(a); apply(b); } - PassMode::Cast(cast) => { + PassMode::Cast(cast, pad_i32) => { + if *pad_i32 { + apply(&ArgAttributes::new()); + } apply(&cast.attrs); } } @@ -488,17 +495,17 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { i += 1; i - 1 }; - match self.ret.mode { - PassMode::Direct(ref attrs) => { + match &self.ret.mode { + PassMode::Direct(attrs) => { attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite); } - PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { + PassMode::Indirect { attrs, extra_attrs: _, on_stack } => { assert!(!on_stack); let i = apply(bx.cx, attrs); let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx)); attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]); } - PassMode::Cast(cast) => { + PassMode::Cast(cast, _) => { cast.attrs.apply_attrs_to_callsite( llvm::AttributePlace::ReturnValue, &bx.cx, @@ -517,13 +524,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } } - for arg in &self.args { - if arg.pad.is_some() { - apply(bx.cx, &ArgAttributes::new()); - } - match arg.mode { + for arg in self.args.iter() { + match &arg.mode { PassMode::Ignore => {} - PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { + PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => { let i = apply(bx.cx, attrs); let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx)); attributes::apply_to_callsite( @@ -532,23 +536,22 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { &[byval], ); } - PassMode::Direct(ref attrs) - | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { + PassMode::Direct(attrs) + | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => { apply(bx.cx, attrs); } - PassMode::Indirect { - ref attrs, - extra_attrs: Some(ref extra_attrs), - on_stack: _, - } => { + PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => { apply(bx.cx, attrs); apply(bx.cx, extra_attrs); } - PassMode::Pair(ref a, ref b) => { + PassMode::Pair(a, b) => { apply(bx.cx, a); apply(bx.cx, b); } - PassMode::Cast(cast) => { + PassMode::Cast(cast, pad_i32) => { + if *pad_i32 { + apply(bx.cx, &ArgAttributes::new()); + } apply(bx.cx, &cast.attrs); } } @@ -589,10 +592,6 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { - fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) { - fn_abi.apply_attrs_callsite(self, callsite) - } - fn get_param(&mut self, index: usize) -> Self::Value { llvm::get_param(self.llfn(), index as c_uint) } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index a53946995..017513721 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -3,7 +3,6 @@ use crate::builder::Builder; use crate::common::Funclet; use crate::context::CodegenCx; use crate::llvm; -use crate::llvm_util; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; @@ -20,7 +19,6 @@ use rustc_target::asm::*; use libc::{c_char, c_uint}; use smallvec::SmallVec; -use tracing::debug; impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn codegen_inline_asm( @@ -419,13 +417,6 @@ pub(crate) fn inline_asm_call<'ll>( let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len()); debug!("constraint verification result: {:?}", constraints_ok); if constraints_ok { - if unwind && llvm_util::get_version() < (13, 0, 0) { - bx.cx.sess().span_fatal( - line_spans[0], - "unwinding from inline assembly is only supported on llvm >= 13.", - ); - } - let v = llvm::LLVMRustInlineAsm( fty, asm.as_ptr().cast(), @@ -439,9 +430,9 @@ pub(crate) fn inline_asm_call<'ll>( ); let call = if let Some((dest, catch, funclet)) = dest_catch_funclet { - bx.invoke(fty, v, inputs, dest, catch, funclet) + bx.invoke(fty, None, v, inputs, dest, catch, funclet) } else { - bx.call(fty, v, inputs, None) + bx.call(fty, None, v, inputs, None) }; // Store mark in a metadata node so we can map LLVM errors @@ -560,6 +551,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> format!("{{{}}}", reg.name()) } } + // The constraints can be retrieved from + // https://llvm.org/docs/LangRef.html#supported-constraint-code-list InlineAsmRegOrRegClass::RegClass(reg) => match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", @@ -633,6 +626,8 @@ fn modifier_to_llvm( reg: InlineAsmRegClass, modifier: Option<char>, ) -> Option<char> { + // The modifiers can be retrieved from + // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index aabbe8ac2..eff2436d4 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -35,6 +35,10 @@ pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attrib /// Get LLVM attribute for the provided inline heuristic. #[inline] fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> { + if !cx.tcx.sess.opts.unstable_opts.inline_llvm { + // disable LLVM inlining + return Some(AttributeKind::NoInline.create_attr(cx.llcx)); + } match inline { InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)), InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)), @@ -386,7 +390,8 @@ pub fn from_fn_attrs<'ll, 'tcx>( ) { let span = cx .tcx - .get_attr(instance.def_id(), sym::target_feature) + .get_attrs(instance.def_id(), sym::target_feature) + .next() .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); let msg = format!( "the target features {} must all be either enabled or disabled together", diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 27039cda2..082665bba 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -2,16 +2,21 @@ use std::env; use std::ffi::{CStr, CString, OsString}; -use std::io; +use std::fs; +use std::io::{self, Write}; use std::mem; use std::path::{Path, PathBuf}; use std::ptr; use std::str; +use object::read::macho::FatArch; + +use crate::common; use crate::llvm::archive_ro::{ArchiveRO, Child}; use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport}; use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; -use rustc_session::cstore::{DllCallingConvention, DllImport}; +use rustc_data_structures::memmap::Mmap; +use rustc_session::cstore::DllImport; use rustc_session::Session; /// Helper for adding many files to an archive. @@ -52,13 +57,70 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType { } } +fn try_filter_fat_archs( + archs: object::read::Result<&[impl FatArch]>, + target_arch: object::Architecture, + archive_path: &Path, + archive_map_data: &[u8], +) -> io::Result<Option<PathBuf>> { + let archs = archs.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + let desired = match archs.iter().filter(|a| a.architecture() == target_arch).next() { + Some(a) => a, + None => return Ok(None), + }; + + let (mut new_f, extracted_path) = tempfile::Builder::new() + .suffix(archive_path.file_name().unwrap()) + .tempfile()? + .keep() + .unwrap(); + + new_f.write_all( + desired.data(archive_map_data).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?, + )?; + + Ok(Some(extracted_path)) +} + +fn try_extract_macho_fat_archive( + sess: &Session, + archive_path: &Path, +) -> io::Result<Option<PathBuf>> { + let archive_map = unsafe { Mmap::map(fs::File::open(&archive_path)?)? }; + let target_arch = match sess.target.arch.as_ref() { + "aarch64" => object::Architecture::Aarch64, + "x86_64" => object::Architecture::X86_64, + _ => return Ok(None), + }; + + match object::macho::FatHeader::parse(&*archive_map) { + Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC => { + let archs = object::macho::FatHeader::parse_arch32(&*archive_map); + try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map) + } + Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC_64 => { + let archs = object::macho::FatHeader::parse_arch64(&*archive_map); + try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map) + } + // Not a FatHeader at all, just return None. + _ => Ok(None), + } +} + impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { fn add_archive( &mut self, archive: &Path, skip: Box<dyn FnMut(&str) -> bool + 'static>, ) -> io::Result<()> { - let archive_ro = match ArchiveRO::open(archive) { + let mut archive = archive.to_path_buf(); + if self.sess.target.llvm_target.contains("-apple-macosx") { + if let Some(new_archive) = try_extract_macho_fat_archive(&self.sess, &archive)? { + archive = new_archive + } + } + let archive_ro = match ArchiveRO::open(&archive) { Ok(ar) => ar, Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), }; @@ -66,7 +128,7 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { return Ok(()); } self.additions.push(Addition::Archive { - path: archive.to_path_buf(), + path: archive, archive: archive_ro, skip: Box::new(skip), }); @@ -103,29 +165,28 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { lib_name: &str, dll_imports: &[DllImport], tmpdir: &Path, + is_direct_dependency: bool, ) -> PathBuf { + let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" }; let output_path = { let mut output_path: PathBuf = tmpdir.to_path_buf(); - output_path.push(format!("{}_imports", lib_name)); + output_path.push(format!("{}{}", lib_name, name_suffix)); output_path.with_extension("lib") }; let target = &sess.target; - let mingw_gnu_toolchain = target.vendor == "pc" - && target.os == "windows" - && target.env == "gnu" - && target.abi.is_empty(); + let mingw_gnu_toolchain = common::is_mingw_gnu_toolchain(target); let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports .iter() .map(|import: &DllImport| { if sess.target.arch == "x86" { ( - LlvmArchiveBuilder::i686_decorated_name(import, mingw_gnu_toolchain), - import.ordinal, + common::i686_decorated_name(import, mingw_gnu_toolchain, false), + import.ordinal(), ) } else { - (import.name.to_string(), import.ordinal) + (import.name.to_string(), import.ordinal()) } }) .collect(); @@ -136,7 +197,8 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { // that loaded but crashed with an AV upon calling one of the imported // functions. Therefore, use binutils to create the import library instead, // by writing a .DEF file to the temp dir and calling binutils's dlltool. - let def_file_path = tmpdir.join(format!("{}_imports", lib_name)).with_extension("def"); + let def_file_path = + tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def"); let def_file_content = format!( "EXPORTS\n{}", @@ -159,6 +221,9 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { } }; + // --no-leading-underscore: For the `import_name_type` feature to work, we need to be + // able to control the *exact* spelling of each of the symbols that are being imported: + // hence we don't want `dlltool` adding leading underscores automatically. let dlltool = find_binutils_dlltool(sess); let result = std::process::Command::new(dlltool) .args([ @@ -168,6 +233,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { lib_name, "-l", output_path.to_str().unwrap(), + "--no-leading-underscore", ]) .output(); @@ -188,10 +254,10 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { let output_path_z = rustc_fs_util::path_to_c_string(&output_path); - tracing::trace!("invoking LLVMRustWriteImportLibrary"); - tracing::trace!(" dll_name {:#?}", dll_name_z); - tracing::trace!(" output_path {}", output_path.display()); - tracing::trace!( + trace!("invoking LLVMRustWriteImportLibrary"); + trace!(" dll_name {:#?}", dll_name_z); + trace!(" output_path {}", output_path.display()); + trace!( " import names: {}", dll_imports .iter() @@ -322,22 +388,6 @@ impl<'a> LlvmArchiveBuilder<'a> { ret } } - - fn i686_decorated_name(import: &DllImport, mingw: bool) -> String { - let name = import.name; - let prefix = if mingw { "" } else { "_" }; - - match import.calling_convention { - DllCallingConvention::C => format!("{}{}", prefix, name), - DllCallingConvention::Stdcall(arg_list_size) => { - format!("{}{}@{}", prefix, name, arg_list_size) - } - DllCallingConvention::Fastcall(arg_list_size) => format!("@{}@{}", name, arg_list_size), - DllCallingConvention::Vectorcall(arg_list_size) => { - format!("{}@@{}", name, arg_list_size) - } - } - } } fn string_to_io_error(s: String) -> io::Error { diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 3731c6bcf..a49cc7f8d 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -1,15 +1,14 @@ -use crate::back::write::{ - self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers, -}; -use crate::llvm::archive_ro::ArchiveRO; -use crate::llvm::{self, build_string, False, True}; -use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm}; +use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers}; +use crate::llvm::{self, build_string}; +use crate::{LlvmCodegenBackend, ModuleLlvm}; +use object::read::archive::ArchiveFile; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared}; use rustc_codegen_ssa::back::symbol_export; use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind}; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::memmap::Mmap; use rustc_errors::{FatalError, Handler}; use rustc_hir::def_id::LOCAL_CRATE; use rustc_middle::bug; @@ -17,7 +16,6 @@ use rustc_middle::dep_graph::WorkProduct; use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; use rustc_session::cgu_reuse_tracker::CguReuse; use rustc_session::config::{self, CrateType, Lto}; -use tracing::{debug, info}; use std::ffi::{CStr, CString}; use std::fs::File; @@ -34,8 +32,8 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { - CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true, - CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false, + CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true, + CrateType::Rlib | CrateType::ProcMacro => false, } } @@ -75,17 +73,6 @@ fn prepare_lto( // with either fat or thin LTO let mut upstream_modules = Vec::new(); if cgcx.lto != Lto::ThinLocal { - if cgcx.opts.cg.prefer_dynamic { - diag_handler - .struct_err("cannot prefer dynamic linking when performing LTO") - .note( - "only 'staticlib', 'bin', and 'cdylib' outputs are \ - supported with LTO", - ) - .emit(); - return Err(FatalError); - } - // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { @@ -94,9 +81,25 @@ fn prepare_lto( static library outputs", ); return Err(e); + } else if *crate_type == CrateType::Dylib { + if !cgcx.opts.unstable_opts.dylib_lto { + return Err(diag_handler + .fatal("lto cannot be used for `dylib` crate type without `-Zdylib-lto`")); + } } } + if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { + diag_handler + .struct_err("cannot prefer dynamic linking when performing LTO") + .note( + "only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO", + ) + .emit(); + return Err(FatalError); + } + for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO"); @@ -107,14 +110,24 @@ fn prepare_lto( .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter)); } - let archive = ArchiveRO::open(path).expect("wanted an rlib"); + let archive_data = unsafe { + Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib")) + .expect("couldn't map rlib") + }; + let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib"); let obj_files = archive - .iter() - .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c)))) + .members() + .filter_map(|child| { + child.ok().and_then(|c| { + std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c)) + }) + }) .filter(|&(name, _)| looks_like_rust_object_file(name)); for (name, child) in obj_files { info!("adding bitcode from {}", name); - match get_bitcode_slice_from_object_data(child.data()) { + match get_bitcode_slice_from_object_data( + child.data(&*archive_data).expect("corrupt rlib"), + ) { Ok(data) => { let module = SerializedModule::FromRlib(data.to_vec()); upstream_modules.push((module, CString::new(name).unwrap())); @@ -565,7 +578,7 @@ pub(crate) fn run_pass_manager( module: &mut ModuleCodegen<ModuleLlvm>, thin: bool, ) -> Result<(), FatalError> { - let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name); + let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name); let config = cgcx.config(module.kind); // Now we have one massive module inside of llmod. Time to run the @@ -587,61 +600,9 @@ pub(crate) fn run_pass_manager( 1, ); } - if llvm_util::should_use_new_llvm_pass_manager( - &config.new_llvm_pass_manager, - &cgcx.target_arch, - ) { - let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; - let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); - write::optimize_with_new_llvm_pass_manager( - cgcx, - diag_handler, - module, - config, - opt_level, - opt_stage, - )?; - debug!("lto done"); - return Ok(()); - } - - let pm = llvm::LLVMCreatePassManager(); - llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm); - - if config.verify_llvm_ir { - let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast()); - llvm::LLVMRustAddPass(pm, pass.unwrap()); - } - - let opt_level = config - .opt_level - .map(|x| to_llvm_opt_settings(x).0) - .unwrap_or(llvm::CodeGenOptLevel::None); - with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| { - if thin { - llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm); - } else { - llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager( - b, pm, /* Internalize = */ False, /* RunInliner = */ True, - ); - } - }); - - // We always generate bitcode through ThinLTOBuffers, - // which do not support anonymous globals - if config.bitcode_needed() { - let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast()); - llvm::LLVMRustAddPass(pm, pass.unwrap()); - } - - if config.verify_llvm_ir { - let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast()); - llvm::LLVMRustAddPass(pm, pass.unwrap()); - } - - llvm::LLVMRunPassManager(pm, module.module_llvm.llmod()); - - llvm::LLVMDisposePassManager(pm); + let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; + let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); + write::llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage)?; } debug!("lto done"); Ok(()) diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 534d32e8a..11053a8f6 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -5,7 +5,7 @@ use crate::back::profiling::{ use crate::base; use crate::common; use crate::consts; -use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; +use crate::llvm::{self, DiagnosticInfo, PassManager}; use crate::llvm_util; use crate::type_::Type; use crate::LlvmCodegenBackend; @@ -21,14 +21,12 @@ use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{FatalError, Handler, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; -use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath}; use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::InnerSpan; use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo}; -use tracing::debug; use libc::{c_char, c_int, c_uint, c_void, size_t}; use std::ffi::CString; @@ -304,7 +302,6 @@ impl<'a> DiagnosticHandlers<'a> { remark_passes.as_ptr(), remark_passes.len(), ); - llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast()); DiagnosticHandlers { data, llcx, old_handler } } } @@ -312,9 +309,7 @@ impl<'a> DiagnosticHandlers<'a> { impl<'a> Drop for DiagnosticHandlers<'a> { fn drop(&mut self) { - use std::ptr::null_mut; unsafe { - llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut()); llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler); drop(Box::from_raw(self.data)); } @@ -342,16 +337,6 @@ fn report_inline_asm( cgcx.diag_emitter.inline_asm_error(cookie as u32, msg, level, source); } -unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) { - if user.is_null() { - return; - } - let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler)); - - let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag); - report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source); -} - unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) { if user.is_null() { return; @@ -423,7 +408,15 @@ fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> { .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap()) } -pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( +fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> { + if config.instrument_coverage { + Some(CString::new("default_%m_%p.profraw").unwrap()) + } else { + None + } +} + +pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, module: &ModuleCodegen<ModuleLlvm>, @@ -438,6 +431,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( let pgo_use_path = get_pgo_use_path(config); let pgo_sample_use_path = get_pgo_sample_use_path(config); let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO; + let instr_profile_output_path = get_instr_profile_output_path(config); // Sanitizer instrumentation is only inserted during the pre-link optimization stage. let sanitizer_options = if !is_lto { Some(llvm::SanitizerOptions { @@ -470,7 +464,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( // FIXME: NewPM doesn't provide a facility to pass custom InlineParams. // We would have to add upstream support for this first, before we can support // config.inline_threshold and our more aggressive default thresholds. - let result = llvm::LLVMRustOptimizeWithNewPassManager( + let result = llvm::LLVMRustOptimize( module.module_llvm.llmod(), &*module.module_llvm.tm, to_pass_builder_opt_level(opt_level), @@ -488,6 +482,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), config.instrument_coverage, + instr_profile_output_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), config.instrument_gcov, pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), config.debug_info_for_profiling, @@ -513,18 +508,11 @@ pub(crate) unsafe fn optimize( let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; - let tm = &*module.module_llvm.tm; let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) { - diag_handler.warn( - "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15", - ); - } - if config.emit_no_opt_bc { let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name); let out = path_to_c_string(&out); @@ -532,191 +520,24 @@ pub(crate) unsafe fn optimize( } if let Some(opt_level) = config.opt_level { - if llvm_util::should_use_new_llvm_pass_manager( - &config.new_llvm_pass_manager, - &cgcx.target_arch, - ) { - let opt_stage = match cgcx.lto { - Lto::Fat => llvm::OptStage::PreLinkFatLTO, - Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO, - _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, - _ => llvm::OptStage::PreLinkNoLTO, - }; - return optimize_with_new_llvm_pass_manager( - cgcx, - diag_handler, - module, - config, - opt_level, - opt_stage, - ); - } - - if cgcx.prof.llvm_recording_enabled() { - diag_handler - .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`"); - } - - // Create the two optimizing pass managers. These mirror what clang - // does, and are by populated by LLVM's default PassManagerBuilder. - // Each manager has a different set of passes, but they also share - // some common passes. - let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod); - let mpm = llvm::LLVMCreatePassManager(); - - { - let find_pass = |pass_name: &str| { - let pass_name = SmallCStr::new(pass_name); - llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr()) - }; - - if config.verify_llvm_ir { - // Verification should run as the very first pass. - llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap()); - } - - let mut extra_passes = Vec::new(); - let mut have_name_anon_globals_pass = false; - - for pass_name in &config.passes { - if pass_name == "lint" { - // Linting should also be performed early, directly on the generated IR. - llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap()); - continue; - } - - if let Some(pass) = find_pass(pass_name) { - extra_passes.push(pass); - } else { - diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name)); - } - - if pass_name == "name-anon-globals" { - have_name_anon_globals_pass = true; - } - } - - // Instrumentation must be inserted before optimization, - // otherwise LLVM may optimize some functions away which - // breaks llvm-cov. - // - // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp. - if config.instrument_gcov { - llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap()); - } - if config.instrument_coverage { - llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap()); - } - if config.debug_info_for_profiling { - llvm::LLVMRustAddPass(mpm, find_pass("add-discriminators").unwrap()); - } - - add_sanitizer_passes(config, &mut extra_passes); - - // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need - // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise - // we'll get errors in LLVM. - let using_thin_buffers = config.bitcode_needed(); - if !config.no_prepopulate_passes { - llvm::LLVMAddAnalysisPasses(tm, fpm); - llvm::LLVMAddAnalysisPasses(tm, mpm); - let opt_level = to_llvm_opt_settings(opt_level).0; - let prepare_for_thin_lto = cgcx.lto == Lto::Thin - || cgcx.lto == Lto::ThinLocal - || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled()); - with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| { - llvm::LLVMRustAddLastExtensionPasses( - b, - extra_passes.as_ptr(), - extra_passes.len() as size_t, - ); - llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm); - llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm); - }); - - have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; - if using_thin_buffers && !prepare_for_thin_lto { - llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap()); - have_name_anon_globals_pass = true; - } - } else { - // If we don't use the standard pipeline, directly populate the MPM - // with the extra passes. - for pass in extra_passes { - llvm::LLVMRustAddPass(mpm, pass); - } - } - - if using_thin_buffers && !have_name_anon_globals_pass { - // As described above, this will probably cause an error in LLVM - if config.no_prepopulate_passes { - diag_handler.err( - "The current compilation is going to use thin LTO buffers \ - without running LLVM's NameAnonGlobals pass. \ - This will likely cause errors in LLVM. Consider adding \ - -C passes=name-anon-globals to the compiler command line.", - ); - } else { - bug!( - "We are using thin LTO buffers without running the NameAnonGlobals pass. \ - This will likely cause errors in LLVM and should never happen." - ); - } - } - } - - diag_handler.abort_if_errors(); - - // Finally, run the actual optimization passes - { - let _timer = cgcx.prof.extra_verbose_generic_activity( - "LLVM_module_optimize_function_passes", - &*module.name, - ); - llvm::LLVMRustRunFunctionPassManager(fpm, llmod); - } - { - let _timer = cgcx.prof.extra_verbose_generic_activity( - "LLVM_module_optimize_module_passes", - &*module.name, - ); - llvm::LLVMRunPassManager(mpm, llmod); - } - - // Deallocate managers that we're now done with - llvm::LLVMDisposePassManager(fpm); - llvm::LLVMDisposePassManager(mpm); + let opt_stage = match cgcx.lto { + Lto::Fat => llvm::OptStage::PreLinkFatLTO, + Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO, + _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, + _ => llvm::OptStage::PreLinkNoLTO, + }; + return llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage); } Ok(()) } -unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) { - if config.sanitizer.contains(SanitizerSet::ADDRESS) { - let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS); - passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover)); - passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover)); - } - if config.sanitizer.contains(SanitizerSet::MEMORY) { - let track_origins = config.sanitizer_memory_track_origins as c_int; - let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY); - passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover)); - } - if config.sanitizer.contains(SanitizerSet::THREAD) { - passes.push(llvm::LLVMRustCreateThreadSanitizerPass()); - } - if config.sanitizer.contains(SanitizerSet::HWADDRESS) { - let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS); - passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover)); - } -} - pub(crate) fn link( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, mut modules: Vec<ModuleCodegen<ModuleLlvm>>, ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { use super::lto::{Linker, ModuleBuffer}; - // Sort the modules by name to ensure to ensure deterministic behavior. + // Sort the modules by name to ensure deterministic behavior. modules.sort_by(|a, b| a.name.cmp(&b.name)); let (first, elements) = modules.split_first().expect("Bug! modules must contain at least one module."); @@ -1076,72 +897,6 @@ unsafe fn embed_bitcode( } } -pub unsafe fn with_llvm_pmb( - llmod: &llvm::Module, - config: &ModuleConfig, - opt_level: llvm::CodeGenOptLevel, - prepare_for_thin_lto: bool, - f: &mut dyn FnMut(&llvm::PassManagerBuilder), -) { - use std::ptr; - - // Create the PassManagerBuilder for LLVM. We configure it with - // reasonable defaults and prepare it to actually populate the pass - // manager. - let builder = llvm::LLVMRustPassManagerBuilderCreate(); - let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1); - let inline_threshold = config.inline_threshold; - let pgo_gen_path = get_pgo_gen_path(config); - let pgo_use_path = get_pgo_use_path(config); - let pgo_sample_use_path = get_pgo_sample_use_path(config); - - llvm::LLVMRustConfigurePassManagerBuilder( - builder, - opt_level, - config.merge_functions, - config.vectorize_slp, - config.vectorize_loop, - prepare_for_thin_lto, - pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), - pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), - pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), - opt_size as c_int, - ); - - llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins); - - // Here we match what clang does (kinda). For O0 we only inline - // always-inline functions (but don't add lifetime intrinsics), at O1 we - // inline with lifetime intrinsics, and O2+ we add an inliner with a - // thresholds copied from clang. - match (opt_level, opt_size, inline_threshold) { - (.., Some(t)) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t); - } - (llvm::CodeGenOptLevel::Aggressive, ..) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275); - } - (_, llvm::CodeGenOptSizeDefault, _) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75); - } - (_, llvm::CodeGenOptSizeAggressive, _) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25); - } - (llvm::CodeGenOptLevel::None, ..) => { - llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers); - } - (llvm::CodeGenOptLevel::Less, ..) => { - llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers); - } - (llvm::CodeGenOptLevel::Default, ..) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225); - } - } - - f(builder); - llvm::LLVMRustPassManagerBuilderDispose(builder); -} - // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`. // This is required to satisfy `dllimport` references to static data in .rlibs // when using MSVC linker. We do this only for data, as linker can fix up diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 86f92dc02..5b2bbdb4b 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -19,6 +19,8 @@ use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; +use cstr::cstr; + use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; @@ -107,11 +109,14 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen } // Create the llvm.used and llvm.compiler.used variables. - if !cx.used_statics().borrow().is_empty() { - cx.create_used_variable() + if !cx.used_statics.borrow().is_empty() { + cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow()); } - if !cx.compiler_used_statics().borrow().is_empty() { - cx.create_compiler_used_variable() + if !cx.compiler_used_statics.borrow().is_empty() { + cx.create_used_variable_impl( + cstr!("llvm.compiler.used"), + &*cx.compiler_used_statics.borrow(), + ); } // Run replace-all-uses-with for statics that need it. This must diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index d3096c73a..fca43a0d8 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1,15 +1,14 @@ +use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::common::Funclet; use crate::context::CodegenCx; -use crate::llvm::{self, BasicBlock, False}; -use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope}; -use crate::llvm_util; +use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use cstr::cstr; use libc::{c_char, c_uint}; -use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind}; +use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; @@ -28,7 +27,6 @@ use std::ffi::CStr; use std::iter; use std::ops::Deref; use std::ptr; -use tracing::{debug, instrument}; // All Builders must have an llfn associated with them #[must_use] @@ -217,6 +215,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn invoke( &mut self, llty: &'ll Type, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, @@ -229,7 +228,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { + let invoke = unsafe { llvm::LLVMRustBuildInvoke( self.llbuilder, llty, @@ -241,7 +240,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { bundle, UNNAMED, ) + }; + if let Some(fn_abi) = fn_abi { + fn_abi.apply_attrs_callsite(self, invoke); } + invoke } fn unreachable(&mut self) { @@ -408,20 +411,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - bx.dynamic_alloca(ty, align) - } - - fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { unsafe { - let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); + let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } - fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value { + fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value { unsafe { - let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); + let alloca = + llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } @@ -726,11 +726,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) } } - fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> { + fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.fptoint_sat(false, val, dest_ty) } - fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> { + fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.fptoint_sat(true, val, dest_ty) } @@ -1038,35 +1038,23 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - mut order: rustc_codegen_ssa::common::AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, failure_order: rustc_codegen_ssa::common::AtomicOrdering, weak: bool, ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; - if llvm_util::get_version() < (13, 0, 0) { - use rustc_codegen_ssa::common::AtomicOrdering::*; - // Older llvm has the pre-C++17 restriction on - // success and failure memory ordering, - // requiring the former to be at least as strong as the latter. - // So, for llvm 12, we upgrade the success ordering to a stronger - // one if necessary. - match (order, failure_order) { - (Relaxed, Acquire) => order = Acquire, - (Release, Acquire) => order = AcquireRelease, - (_, SequentiallyConsistent) => order = SequentiallyConsistent, - _ => {} - } - } unsafe { - llvm::LLVMRustBuildAtomicCmpXchg( + let value = llvm::LLVMBuildAtomicCmpXchg( self.llbuilder, dst, cmp, src, AtomicOrdering::from_generic(order), AtomicOrdering::from_generic(failure_order), - weak, - ) + llvm::False, // SingleThreaded + ); + llvm::LLVMSetWeak(value, weak); + value } } fn atomic_rmw( @@ -1083,7 +1071,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { dst, src, AtomicOrdering::from_generic(order), - False, + llvm::False, // SingleThreaded ) } } @@ -1091,13 +1079,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn atomic_fence( &mut self, order: rustc_codegen_ssa::common::AtomicOrdering, - scope: rustc_codegen_ssa::common::SynchronizationScope, + scope: SynchronizationScope, ) { + let single_threaded = match scope { + SynchronizationScope::SingleThread => llvm::True, + SynchronizationScope::CrossThread => llvm::False, + }; unsafe { - llvm::LLVMRustBuildAtomicFence( + llvm::LLVMBuildFence( self.llbuilder, AtomicOrdering::from_generic(order), - SynchronizationScope::from_generic(scope), + single_threaded, + UNNAMED, ); } } @@ -1155,6 +1148,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn call( &mut self, llty: &'ll Type, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, @@ -1165,7 +1159,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { + let call = unsafe { llvm::LLVMRustBuildCall( self.llbuilder, llty, @@ -1174,7 +1168,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { args.len() as c_uint, bundle, ) + }; + if let Some(fn_abi) = fn_abi { + fn_abi.apply_attrs_callsite(self, call); } + call } fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { @@ -1407,7 +1405,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); - self.call(ty, f, args, None) + self.call(ty, None, f, args, None) } fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { @@ -1444,51 +1442,32 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } } - fn fptoint_sat_broken_in_llvm(&self) -> bool { - match self.tcx.sess.target.arch.as_ref() { - // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083 - "riscv64" => llvm_util::get_version() < (13, 0, 0), - _ => false, - } - } - - fn fptoint_sat( - &mut self, - signed: bool, - val: &'ll Value, - dest_ty: &'ll Type, - ) -> Option<&'ll Value> { - if !self.fptoint_sat_broken_in_llvm() { - let src_ty = self.cx.val_ty(val); - let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector - { - assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); - ( - self.cx.element_type(src_ty), - self.cx.element_type(dest_ty), - Some(self.cx.vector_length(src_ty)), - ) - } else { - (src_ty, dest_ty, None) - }; - let float_width = self.cx.float_width(float_ty); - let int_width = self.cx.int_width(int_ty); - - let instr = if signed { "fptosi" } else { "fptoui" }; - let name = if let Some(vector_length) = vector_length { - format!( - "llvm.{}.sat.v{}i{}.v{}f{}", - instr, vector_length, int_width, vector_length, float_width - ) - } else { - format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) - }; - let f = - self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); - Some(self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None)) + fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + let src_ty = self.cx.val_ty(val); + let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector { + assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); + ( + self.cx.element_type(src_ty), + self.cx.element_type(dest_ty), + Some(self.cx.vector_length(src_ty)), + ) } else { - None - } + (src_ty, dest_ty, None) + }; + let float_width = self.cx.float_width(float_ty); + let int_width = self.cx.int_width(int_ty); + + let instr = if signed { "fptosi" } else { "fptoui" }; + let name = if let Some(vector_length) = vector_length { + format!( + "llvm.{}.sat.v{}i{}.v{}f{}", + instr, vector_length, int_width, vector_length, float_width + ) + } else { + format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) + }; + let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); + self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None) } pub(crate) fn landing_pad( diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index 72155d874..6f0d1b7ce 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -6,11 +6,11 @@ use crate::abi::FnAbiLlvmExt; use crate::attributes; +use crate::common; use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; use rustc_codegen_ssa::traits::*; -use tracing::debug; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, TypeVisitable}; @@ -79,13 +79,18 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> llfn } } else { - let llfn = cx.declare_fn(sym, fn_abi); + let instance_def_id = instance.def_id(); + let llfn = if tcx.sess.target.arch == "x86" && + let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym) + { + cx.declare_fn(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&tcx.sess.target), true), fn_abi) + } else { + cx.declare_fn(sym, fn_abi) + }; debug!("get_fn: not casting pointer!"); attributes::from_fn_attrs(cx, llfn, instance); - let instance_def_id = instance.def_id(); - // Apply an appropriate linkage/visibility value to our item that we // just declared. // @@ -174,7 +179,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> // MinGW: For backward compatibility we rely on the linker to decide whether it // should use dllimport for functions. if cx.use_dll_storage_attrs - && tcx.is_dllimport_foreign_item(instance_def_id) + && let Some(library) = tcx.native_library(instance_def_id) + && library.kind.is_dllimport() && !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc") { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index fb4da9a5f..acee9134f 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -10,13 +10,17 @@ use crate::value::Value; use rustc_ast::Mutability; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; +use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::TyCtxt; +use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size}; +use rustc_target::spec::Target; use libc::{c_char, c_uint}; -use tracing::debug; +use std::fmt::Write; /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". @@ -211,7 +215,11 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> { - try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) }) + try_as_const_integral(v).and_then(|v| unsafe { + let mut i = 0u64; + let success = llvm::LLVMRustConstIntGetZExtValue(v, &mut i); + success.then_some(i) + }) } fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> { @@ -222,10 +230,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) } - fn zst_to_backend(&self, _llty: &'ll Type) -> &'ll Value { - self.const_undef(self.type_ix(0)) - } - fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value { let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { @@ -357,3 +361,74 @@ fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> { unsafe { llvm::LLVMIsAConstantInt(v) } } + +pub(crate) fn get_dllimport<'tcx>( + tcx: TyCtxt<'tcx>, + id: DefId, + name: &str, +) -> Option<&'tcx DllImport> { + tcx.native_library(id) + .map(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name)) + .flatten() +} + +pub(crate) fn is_mingw_gnu_toolchain(target: &Target) -> bool { + target.vendor == "pc" && target.os == "windows" && target.env == "gnu" && target.abi.is_empty() +} + +pub(crate) fn i686_decorated_name( + dll_import: &DllImport, + mingw: bool, + disable_name_mangling: bool, +) -> String { + let name = dll_import.name.as_str(); + + let (add_prefix, add_suffix) = match dll_import.import_name_type { + Some(PeImportNameType::NoPrefix) => (false, true), + Some(PeImportNameType::Undecorated) => (false, false), + _ => (true, true), + }; + + // Worst case: +1 for disable name mangling, +1 for prefix, +4 for suffix (@@__). + let mut decorated_name = String::with_capacity(name.len() + 6); + + if disable_name_mangling { + // LLVM uses a binary 1 ('\x01') prefix to a name to indicate that mangling needs to be disabled. + decorated_name.push('\x01'); + } + + let prefix = if add_prefix && dll_import.is_fn { + match dll_import.calling_convention { + DllCallingConvention::C | DllCallingConvention::Vectorcall(_) => None, + DllCallingConvention::Stdcall(_) => (!mingw + || dll_import.import_name_type == Some(PeImportNameType::Decorated)) + .then_some('_'), + DllCallingConvention::Fastcall(_) => Some('@'), + } + } else if !dll_import.is_fn && !mingw { + // For static variables, prefix with '_' on MSVC. + Some('_') + } else { + None + }; + if let Some(prefix) = prefix { + decorated_name.push(prefix); + } + + decorated_name.push_str(name); + + if add_suffix && dll_import.is_fn { + match dll_import.calling_convention { + DllCallingConvention::C => {} + DllCallingConvention::Stdcall(arg_list_size) + | DllCallingConvention::Fastcall(arg_list_size) => { + write!(&mut decorated_name, "@{}", arg_list_size).unwrap(); + } + DllCallingConvention::Vectorcall(arg_list_size) => { + write!(&mut decorated_name, "@@{}", arg_list_size).unwrap(); + } + } + } + + decorated_name +} diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 18467e370..dd3c43ba5 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -1,5 +1,5 @@ use crate::base; -use crate::common::CodegenCx; +use crate::common::{self, CodegenCx}; use crate::debuginfo; use crate::llvm::{self, True}; use crate::llvm_util; @@ -23,16 +23,15 @@ use rustc_target::abi::{ AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange, }; use std::ops::Range; -use tracing::debug; pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value { let alloc = alloc.inner(); - let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1); + let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; - // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, - // so `range` must be within the bounds of `alloc` and not contain or overlap a relocation. + // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range` + // must be within the bounds of `alloc` and not contain or overlap a pointer provenance. fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>( llvals: &mut Vec<&'ll Value>, cx: &'a CodegenCx<'ll, 'b>, @@ -79,12 +78,12 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< } let mut next_offset = 0; - for &(offset, alloc_id) in alloc.relocations().iter() { + for &(offset, alloc_id) in alloc.provenance().iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; if offset > next_offset { - // This `inspect` is okay since we have checked that it is not within a relocation, it + // This `inspect` is okay since we have checked that there is no provenance, it // is within the bounds of the allocation, and it doesn't affect interpreter execution // (we inspect the result after interpreter execution). append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset); @@ -93,7 +92,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< dl.endian, // This `inspect` is okay since it is within the bounds of the allocation, it doesn't // affect interpreter execution (we inspect the result after interpreter execution), - // and we properly interpret the relocation as a relocation pointer offset. + // and we properly interpret the provenance as a relocation pointer offset. alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), ) .expect("const_alloc_to_llvm: could not read relocation pointer") @@ -121,7 +120,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< } if alloc.len() >= next_offset { let range = next_offset..alloc.len(); - // This `inspect` is okay since we have check that it is after all relocations, it is + // This `inspect` is okay since we have check that it is after all provenance, it is // within the bounds of the allocation, and it doesn't affect interpreter execution (we // inspect the result after interpreter execution). append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range); @@ -160,7 +159,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, - span_def_id: DefId, + def_id: DefId, ) -> &'ll Value { let llty = cx.layout_of(ty).llvm_type(cx); if let Some(linkage) = attrs.linkage { @@ -175,7 +174,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( cx.layout_of(mt.ty).llvm_type(cx) } else { cx.sess().span_fatal( - cx.tcx.def_span(span_def_id), + cx.tcx.def_span(def_id), "must have type `*const T` or `*mut T` due to `#[linkage]` attribute", ) }; @@ -194,7 +193,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( real_name.push_str(sym); let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| { cx.sess().span_fatal( - cx.tcx.def_span(span_def_id), + cx.tcx.def_span(def_id), &format!("symbol `{}` is already defined", &sym), ) }); @@ -202,6 +201,10 @@ fn check_and_apply_linkage<'ll, 'tcx>( llvm::LLVMSetInitializer(g2, g1); g2 } + } else if cx.tcx.sess.target.arch == "x86" && + let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym) + { + cx.declare_global(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&cx.tcx.sess.target), true), llty) } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global @@ -329,7 +332,10 @@ impl<'ll> CodegenCx<'ll, '_> { } } - if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + if self.use_dll_storage_attrs + && let Some(library) = self.tcx.native_library(def_id) + && library.kind.is_dllimport() + { // For foreign (native) libs we know the exact storage type to use. unsafe { llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); @@ -475,7 +481,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { // // We could remove this hack whenever we decide to drop macOS 10.10 support. if self.tcx.sess.target.is_like_osx { - // The `inspect` method is okay here because we checked relocations, and + // The `inspect` method is okay here because we checked for provenance, and // because we are doing this access to inspect the final interpreter state // (not as part of the interpreter execution). // @@ -483,7 +489,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { // happens to be zero. Instead, we should only check the value of defined bytes // and set all undefined bytes to zero if this allocation is headed for the // BSS. - let all_bytes_are_zero = alloc.relocations().is_empty() + let all_bytes_are_zero = alloc.provenance().is_empty() && alloc .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()) .iter() @@ -507,9 +513,9 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { section.as_str().as_ptr().cast(), section.as_str().len() as c_uint, ); - assert!(alloc.relocations().is_empty()); + assert!(alloc.provenance().is_empty()); - // The `inspect` method is okay here because we checked relocations, and + // The `inspect` method is okay here because we checked for provenance, and // because we are doing this access to inspect the final interpreter state (not // as part of the interpreter execution). let bytes = @@ -549,7 +555,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage // on other targets, in particular MachO targets have *their* static constructor // lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However, - // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_typeck`, + // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`, // so we don't need to take care of it here. self.add_compiler_used_global(g); } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 5857b83f6..79ddfd884 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -142,17 +142,6 @@ pub unsafe fn create_module<'ll>( let mut target_data_layout = sess.target.data_layout.to_string(); let llvm_version = llvm_util::get_version(); - if llvm_version < (13, 0, 0) { - if sess.target.arch == "powerpc64" { - target_data_layout = target_data_layout.replace("-S128", ""); - } - if sess.target.arch == "wasm32" { - target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(); - } - if sess.target.arch == "wasm64" { - target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string(); - } - } if llvm_version < (14, 0, 0) { if sess.target.llvm_target == "i686-pc-windows-msvc" || sess.target.llvm_target == "i586-pc-windows-msvc" @@ -165,6 +154,11 @@ pub unsafe fn create_module<'ll>( target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", ""); } } + if llvm_version < (16, 0, 0) { + if sess.target.arch == "s390x" { + target_data_layout = target_data_layout.replace("-v128:64", ""); + } + } // Ensure the data-layout values hardcoded remain the defaults. if sess.target.is_builtin { @@ -464,7 +458,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { self.coverage_cx.as_ref() } - fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { + pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { let section = cstr!("llvm.metadata"); let array = self.const_array(self.type_ptr_to(self.type_i8()), values); @@ -562,14 +556,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.codegen_unit } - fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> { - &self.used_statics - } - - fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> { - &self.compiler_used_statics - } - fn set_frame_pointer_type(&self, llfn: &'ll Value) { if let Some(attr) = attributes::frame_pointer_type_attr(self) { attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]); @@ -583,17 +569,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs); } - fn create_used_variable(&self) { - self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow()); - } - - fn create_compiler_used_variable(&self) { - self.create_used_variable_impl( - cstr!("llvm.compiler.used"), - &*self.compiler_used_statics.borrow(), - ); - } - fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { if self.get_declared_value("main").is_none() { Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type)) @@ -897,6 +872,9 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void); ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void); } + + ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p); + None } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 58f391692..433f04320 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -16,8 +16,6 @@ use rustc_middle::ty::TyCtxt; use std::ffi::CString; -use tracing::debug; - /// Generates and exports the Coverage Map. /// /// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions @@ -131,7 +129,7 @@ impl CoverageMapGenerator { // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5) // requires setting the first filename to the compilation directory. // Since rustc generates coverage maps with relative paths, the - // compilation directory can be combined with the the relative paths + // compilation directory can be combined with the relative paths // to get absolute paths, if needed. let working_dir = tcx .sess diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 98ba38356..964a632b6 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -28,7 +28,6 @@ use std::cell::RefCell; use std::ffi::CString; use std::iter; -use tracing::debug; pub mod mapgen; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index bd84100e0..163ccd946 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -42,7 +42,6 @@ use rustc_span::{self, FileNameDisplayPreference, SourceFile}; use rustc_symbol_mangling::typeid_for_trait_ref; use rustc_target::abi::{Align, Size}; use smallvec::smallvec; -use tracing::debug; use libc::{c_char, c_longlong, c_uint}; use std::borrow::Cow; @@ -51,7 +50,6 @@ use std::hash::{Hash, Hasher}; use std::iter; use std::path::{Path, PathBuf}; use std::ptr; -use tracing::instrument; impl PartialEq for llvm::Metadata { fn eq(&self, other: &Self) -> bool { @@ -114,6 +112,7 @@ macro_rules! return_if_di_node_created_in_meantime { } /// Extract size and alignment from a TyAndLayout. +#[inline] fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) { (ty_and_layout.size, ty_and_layout.align.abi) } @@ -1499,24 +1498,18 @@ fn vcall_visibility_metadata<'ll, 'tcx>( // If there is not LTO and the visibility in public, we have to assume that the vtable can // be seen from anywhere. With multiple CGUs, the vtable is quasi-public. (Lto::No | Lto::ThinLocal, Visibility::Public, _) - | (Lto::No, Visibility::Restricted(_) | Visibility::Invisible, false) => { - VCallVisibility::Public - } + | (Lto::No, Visibility::Restricted(_), false) => VCallVisibility::Public, // With LTO and a quasi-public visibility, the usages of the functions of the vtable are // all known by the `LinkageUnit`. // FIXME: LLVM only supports this optimization for `Lto::Fat` currently. Once it also // supports `Lto::Thin` the `VCallVisibility` may have to be adjusted for those. (Lto::Fat | Lto::Thin, Visibility::Public, _) - | ( - Lto::ThinLocal | Lto::Thin | Lto::Fat, - Visibility::Restricted(_) | Visibility::Invisible, - false, - ) => VCallVisibility::LinkageUnit, + | (Lto::ThinLocal | Lto::Thin | Lto::Fat, Visibility::Restricted(_), false) => { + VCallVisibility::LinkageUnit + } // If there is only one CGU, private vtables can only be seen by that CGU/translation unit // and therefore we know of all usages of functions in the vtable. - (_, Visibility::Restricted(_) | Visibility::Invisible, true) => { - VCallVisibility::TranslationUnit - } + (_, Visibility::Restricted(_), true) => VCallVisibility::TranslationUnit, }; let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index d6e2c8ccd..129e336c7 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -1,19 +1,21 @@ use std::borrow::Cow; use libc::c_uint; -use rustc_codegen_ssa::debuginfo::{ - type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo, +use rustc_codegen_ssa::{ + debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo}, + traits::ConstMethods, }; + +use rustc_index::vec::IndexVec; use rustc_middle::{ bug, ty::{ self, layout::{LayoutOf, TyAndLayout}, - util::Discr, - AdtDef, GeneratorSubsts, + AdtDef, GeneratorSubsts, Ty, }, }; -use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants}; +use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants}; use smallvec::smallvec; use crate::{ @@ -21,9 +23,9 @@ use crate::{ debuginfo::{ metadata::{ build_field_di_node, closure_saved_names_of_captured_variables, - enums::tag_base_type, - file_metadata, generator_layout_and_saved_local_names, size_and_align_of, - type_map::{self, UniqueTypeId}, + enums::{tag_base_type, DiscrResult}, + file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node, + type_map::{self, Stub, UniqueTypeId}, unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA, UNKNOWN_LINE_NUMBER, }, @@ -35,59 +37,161 @@ use crate::{ }, }; -/// In CPP-like mode, we generate a union of structs for each variant and an -/// explicit discriminant field roughly equivalent to the following C/C++ code: +// The names of the associated constants in each variant wrapper struct. +// These have to match up with the names being used in `intrinsic.natvis`. +const ASSOC_CONST_DISCR_NAME: &str = "NAME"; +const ASSOC_CONST_DISCR_EXACT: &str = "DISCR_EXACT"; +const ASSOC_CONST_DISCR_BEGIN: &str = "DISCR_BEGIN"; +const ASSOC_CONST_DISCR_END: &str = "DISCR_END"; + +const ASSOC_CONST_DISCR128_EXACT_LO: &str = "DISCR128_EXACT_LO"; +const ASSOC_CONST_DISCR128_EXACT_HI: &str = "DISCR128_EXACT_HI"; +const ASSOC_CONST_DISCR128_BEGIN_LO: &str = "DISCR128_BEGIN_LO"; +const ASSOC_CONST_DISCR128_BEGIN_HI: &str = "DISCR128_BEGIN_HI"; +const ASSOC_CONST_DISCR128_END_LO: &str = "DISCR128_END_LO"; +const ASSOC_CONST_DISCR128_END_HI: &str = "DISCR128_END_HI"; + +// The name of the tag field in the top-level union +const TAG_FIELD_NAME: &str = "tag"; +const TAG_FIELD_NAME_128_LO: &str = "tag128_lo"; +const TAG_FIELD_NAME_128_HI: &str = "tag128_hi"; + +// We assign a "virtual" discriminant value to the sole variant of +// a single-variant enum. +const SINGLE_VARIANT_VIRTUAL_DISR: u64 = 0; + +/// In CPP-like mode, we generate a union with a field for each variant and an +/// explicit tag field. The field of each variant has a struct type +/// that encodes the discrimiant of the variant and it's data layout. +/// The union also has a nested enumeration type that is only used for encoding +/// variant names in an efficient way. Its enumerator values do _not_ correspond +/// to the enum's discriminant values. +/// It's roughly equivalent to the following C/C++ code: /// /// ```c -/// union enum$<{fully-qualified-name}> { -/// struct {variant 0 name} { -/// <variant 0 fields> +/// union enum2$<{fully-qualified-name}> { +/// struct Variant0 { +/// struct {name-of-variant-0} { +/// <variant 0 fields> +/// } value; +/// +/// static VariantNames NAME = {name-of-variant-0}; +/// static int_type DISCR_EXACT = {discriminant-of-variant-0}; /// } variant0; +/// /// <other variant structs> -/// {name} discriminant; +/// +/// int_type tag; +/// +/// enum VariantNames { +/// <name-of-variant-0> = 0, // The numeric values are variant index, +/// <name-of-variant-1> = 1, // not discriminant values. +/// <name-of-variant-2> = 2, +/// ... +/// } /// } /// ``` /// -/// As you can see, the type name is wrapped `enum$`. This way we can have a -/// single NatVis rule for handling all enums. +/// As you can see, the type name is wrapped in `enum2$<_>`. This way we can +/// have a single NatVis rule for handling all enums. The `2` in `enum2$<_>` +/// is an encoding version tag, so that debuggers can decide to decode this +/// differently than the previous `enum$<_>` encoding emitted by earlier +/// compiler versions. /// -/// At the LLVM IR level this looks like +/// Niche-tag enums have one special variant, usually called the +/// "untagged variant". This variant has a field that +/// doubles as the tag of the enum. The variant is active when the value of +/// that field is within a pre-defined range. Therefore the variant struct +/// has a `DISCR_BEGIN` and `DISCR_END` field instead of `DISCR_EXACT` in +/// that case. Both `DISCR_BEGIN` and `DISCR_END` are inclusive bounds. +/// Note that these ranges can wrap around, so that `DISCR_END < DISCR_BEGIN`. /// -/// ```txt -/// DW_TAG_union_type (top-level type for enum) -/// DW_TAG_member (member for variant 1) -/// DW_TAG_member (member for variant 2) -/// DW_TAG_member (member for variant 3) -/// DW_TAG_structure_type (type of variant 1) -/// DW_TAG_structure_type (type of variant 2) -/// DW_TAG_structure_type (type of variant 3) -/// DW_TAG_enumeration_type (type of tag) -/// ``` +/// Single-variant enums don't actually have a tag field. In this case we +/// emit a static tag field (that always has the value 0) so we can use the +/// same representation (and NatVis). /// -/// The above encoding applies for enums with a direct tag. For niche-tag we have to do things -/// differently in order to allow a NatVis visualizer to extract all the information needed: -/// We generate a union of two fields, one for the dataful variant -/// and one that just points to the discriminant (which is some field within the dataful variant). -/// We also create a DW_TAG_enumeration_type DIE that contains tag values for the non-dataful -/// variants and make the discriminant field that type. We then use NatVis to render the enum type -/// correctly in Windbg/VS. This will generate debuginfo roughly equivalent to the following C: +/// For niche-layout enums it's possible to have a 128-bit tag. NatVis, VS, and +/// WinDbg (the main targets for CPP-like debuginfo at the moment) don't support +/// 128-bit integers, so all values involved get split into two 64-bit fields. +/// Instead of the `tag` field, we generate two fields `tag128_lo` and `tag128_hi`, +/// Instead of `DISCR_EXACT`, we generate `DISCR128_EXACT_LO` and `DISCR128_EXACT_HI`, +/// and so on. /// -/// ```c -/// union enum$<{name}, {min niche}, {max niche}, {dataful variant name}> { -/// struct <dataful variant name> { -/// <fields in dataful variant> -/// } dataful_variant; -/// enum Discriminant$ { -/// <non-dataful variants> -/// } discriminant; +/// +/// The following pseudocode shows how to decode an enum value in a debugger: +/// +/// ```text +/// +/// fn find_active_variant(enum_value) -> (VariantName, VariantValue) { +/// let is_128_bit = enum_value.has_field("tag128_lo"); +/// +/// if !is_128_bit { +/// // Note: `tag` can be a static field for enums with only one +/// // inhabited variant. +/// let tag = enum_value.field("tag").value; +/// +/// // For each variant, check if it is a match. Only one of them will match, +/// // so if we find it we can return it immediately. +/// for variant_field in enum_value.fields().filter(|f| f.name.starts_with("variant")) { +/// if variant_field.has_field("DISCR_EXACT") { +/// // This variant corresponds to a single tag value +/// if variant_field.field("DISCR_EXACT").value == tag { +/// return (variant_field.field("NAME"), variant_field.value); +/// } +/// } else { +/// // This is a range variant +/// let begin = variant_field.field("DISCR_BEGIN"); +/// let end = variant_field.field("DISCR_END"); +/// +/// if is_in_range(tag, begin, end) { +/// return (variant_field.field("NAME"), variant_field.value); +/// } +/// } +/// } +/// } else { +/// // Basically the same as with smaller tags, we just have to +/// // stitch the values together. +/// let tag: u128 = (enum_value.field("tag128_lo").value as u128) | +/// (enum_value.field("tag128_hi").value as u128 << 64); +/// +/// for variant_field in enum_value.fields().filter(|f| f.name.starts_with("variant")) { +/// if variant_field.has_field("DISCR128_EXACT_LO") { +/// let discr_exact = (variant_field.field("DISCR128_EXACT_LO" as u128) | +/// (variant_field.field("DISCR128_EXACT_HI") as u128 << 64); +/// +/// // This variant corresponds to a single tag value +/// if discr_exact.value == tag { +/// return (variant_field.field("NAME"), variant_field.value); +/// } +/// } else { +/// // This is a range variant +/// let begin = (variant_field.field("DISCR128_BEGIN_LO").value as u128) | +/// (variant_field.field("DISCR128_BEGIN_HI").value as u128 << 64); +/// let end = (variant_field.field("DISCR128_END_LO").value as u128) | +/// (variant_field.field("DISCR128_END_HI").value as u128 << 64); +/// +/// if is_in_range(tag, begin, end) { +/// return (variant_field.field("NAME"), variant_field.value); +/// } +/// } +/// } +/// } +/// +/// // We should have found an active variant at this point. +/// unreachable!(); /// } -/// ``` /// -/// The NatVis in `intrinsic.natvis` matches on the type name `enum$<*, *, *, *>` -/// and evaluates `this.discriminant`. If the value is between the min niche and max -/// niche, then the enum is in the dataful variant and `this.dataful_variant` is -/// rendered. Otherwise, the enum is in one of the non-dataful variants. In that -/// case, we just need to render the name of the `this.discriminant` enum. +/// // Check if a value is within the given range +/// // (where the range might wrap around the value space) +/// fn is_in_range(value, start, end) -> bool { +/// if start < end { +/// value >= start && value <= end +/// } else { +/// value >= start || value <= end +/// } +/// } +/// +/// ``` pub(super) fn build_enum_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, unique_type_id: UniqueTypeId<'tcx>, @@ -135,27 +239,28 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( ref variants, tag_field, .. - } => build_union_fields_for_direct_tag_enum( + } => build_union_fields_for_enum( cx, enum_adt_def, enum_type_and_layout, enum_type_di_node, - &mut variants.indices(), + variants.indices(), tag_field, + None, ), Variants::Multiple { - tag_encoding: TagEncoding::Niche { dataful_variant, .. }, + tag_encoding: TagEncoding::Niche { untagged_variant, .. }, ref variants, tag_field, .. - } => build_union_fields_for_niche_tag_enum( + } => build_union_fields_for_enum( cx, enum_adt_def, enum_type_and_layout, enum_type_di_node, - dataful_variant, - &mut variants.indices(), + variants.indices(), tag_field, + Some(untagged_variant), ), } }, @@ -217,137 +322,344 @@ fn build_single_variant_union_fields<'ll, 'tcx>( let variant_layout = enum_type_and_layout.for_variant(cx, variant_index); let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node( cx, - enum_type_and_layout.ty, + enum_type_and_layout, enum_type_di_node, variant_index, enum_adt_def.variant(variant_index), variant_layout, ); - // NOTE: The field name of the union is the same as the variant name, not "variant0". - let variant_name = enum_adt_def.variant(variant_index).name.as_str(); + let tag_base_type = cx.tcx.types.u32; + let tag_base_type_di_node = type_di_node(cx, tag_base_type); + let tag_base_type_align = cx.align_of(tag_base_type); + + let variant_names_type_di_node = build_variant_names_type_di_node( + cx, + enum_type_di_node, + std::iter::once(( + variant_index, + Cow::from(enum_adt_def.variant(variant_index).name.as_str()), + )), + ); - smallvec![build_field_di_node( + let variant_struct_type_wrapper_di_node = build_variant_struct_wrapper_type_di_node( cx, + enum_type_and_layout, enum_type_di_node, - variant_name, - // NOTE: We use the size and align of the entire type, not from variant_layout - // since the later is sometimes smaller (if it has fewer fields). - size_and_align_of(enum_type_and_layout), - Size::ZERO, - DIFlags::FlagZero, + variant_index, + None, variant_struct_type_di_node, - )] + variant_names_type_di_node, + tag_base_type_di_node, + tag_base_type, + DiscrResult::NoDiscriminant, + ); + + smallvec![ + build_field_di_node( + cx, + enum_type_di_node, + &variant_union_field_name(variant_index), + // NOTE: We use the size and align of the entire type, not from variant_layout + // since the later is sometimes smaller (if it has fewer fields). + size_and_align_of(enum_type_and_layout), + Size::ZERO, + DIFlags::FlagZero, + variant_struct_type_wrapper_di_node, + ), + unsafe { + llvm::LLVMRustDIBuilderCreateStaticMemberType( + DIB(cx), + enum_type_di_node, + TAG_FIELD_NAME.as_ptr().cast(), + TAG_FIELD_NAME.len(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + variant_names_type_di_node, + DIFlags::FlagZero, + Some(cx.const_u64(SINGLE_VARIANT_VIRTUAL_DISR)), + tag_base_type_align.bits() as u32, + ) + } + ] } -fn build_union_fields_for_direct_tag_enum<'ll, 'tcx>( +fn build_union_fields_for_enum<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, enum_adt_def: AdtDef<'tcx>, enum_type_and_layout: TyAndLayout<'tcx>, enum_type_di_node: &'ll DIType, - variant_indices: &mut dyn Iterator<Item = VariantIdx>, + variant_indices: impl Iterator<Item = VariantIdx> + Clone, tag_field: usize, + untagged_variant_index: Option<VariantIdx>, ) -> SmallVec<&'ll DIType> { + let tag_base_type = super::tag_base_type(cx, enum_type_and_layout); + + let variant_names_type_di_node = build_variant_names_type_di_node( + cx, + enum_type_di_node, + variant_indices.clone().map(|variant_index| { + let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str()); + (variant_index, variant_name) + }), + ); + let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_indices .map(|variant_index| { let variant_layout = enum_type_and_layout.for_variant(cx, variant_index); + let variant_def = enum_adt_def.variant(variant_index); + + let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node( + cx, + enum_type_and_layout, + enum_type_di_node, + variant_index, + variant_def, + variant_layout, + ); + VariantFieldInfo { variant_index, - variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node( - cx, - enum_type_and_layout.ty, - enum_type_di_node, - variant_index, - enum_adt_def.variant(variant_index), - variant_layout, - ), + variant_struct_type_di_node, source_info: None, + discr: super::compute_discriminant_value(cx, enum_type_and_layout, variant_index), } }) .collect(); - let discr_type_name = cx.tcx.item_name(enum_adt_def.did()); - let tag_base_type = super::tag_base_type(cx, enum_type_and_layout); - let discr_type_di_node = super::build_enumeration_type_di_node( - cx, - discr_type_name.as_str(), - tag_base_type, - &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| { - (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str())) - }), - enum_type_di_node, - ); - build_union_fields_for_direct_tag_enum_or_generator( cx, enum_type_and_layout, enum_type_di_node, &variant_field_infos, - discr_type_di_node, + variant_names_type_di_node, + tag_base_type, tag_field, + untagged_variant_index, ) } -fn build_union_fields_for_niche_tag_enum<'ll, 'tcx>( +// The base type of the VariantNames DW_AT_enumeration_type is always the same. +// It has nothing to do with the tag of the enum and just has to be big enough +// to hold all variant names. +fn variant_names_enum_base_type<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> Ty<'tcx> { + cx.tcx.types.u32 +} + +/// This function builds a DW_AT_enumeration_type that contains an entry for +/// each variant. Note that this has nothing to do with the discriminant. The +/// numeric value of each enumerator corresponds to the variant index. The +/// type is only used for efficiently encoding the name of each variant in +/// debuginfo. +fn build_variant_names_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, - enum_adt_def: AdtDef<'tcx>, - enum_type_and_layout: TyAndLayout<'tcx>, - enum_type_di_node: &'ll DIType, - dataful_variant_index: VariantIdx, - variant_indices: &mut dyn Iterator<Item = VariantIdx>, - tag_field: usize, -) -> SmallVec<&'ll DIType> { - let dataful_variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node( + containing_scope: &'ll DIType, + variants: impl Iterator<Item = (VariantIdx, Cow<'tcx, str>)>, +) -> &'ll DIType { + // Create an enumerator for each variant. + super::build_enumeration_type_di_node( cx, - enum_type_and_layout.ty, - enum_type_di_node, - dataful_variant_index, - &enum_adt_def.variant(dataful_variant_index), - enum_type_and_layout.for_variant(cx, dataful_variant_index), - ); + "VariantNames", + variant_names_enum_base_type(cx), + variants.map(|(variant_index, variant_name)| (variant_name, variant_index.as_u32() as u64)), + containing_scope, + ) +} - let tag_base_type = super::tag_base_type(cx, enum_type_and_layout); - // Create an DW_TAG_enumerator for each variant except the dataful one. - let discr_type_di_node = super::build_enumeration_type_di_node( +fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + enum_or_generator_type_and_layout: TyAndLayout<'tcx>, + enum_or_generator_type_di_node: &'ll DIType, + variant_index: VariantIdx, + untagged_variant_index: Option<VariantIdx>, + variant_struct_type_di_node: &'ll DIType, + variant_names_type_di_node: &'ll DIType, + tag_base_type_di_node: &'ll DIType, + tag_base_type: Ty<'tcx>, + discr: DiscrResult, +) -> &'ll DIType { + type_map::build_type_with_children( cx, - "Discriminant$", - tag_base_type, - &mut variant_indices.filter_map(|variant_index| { - if let Some(discr_val) = - super::compute_discriminant_value(cx, enum_type_and_layout, variant_index) - { - let discr = Discr { val: discr_val as u128, ty: tag_base_type }; - let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str()); - Some((discr, variant_name)) - } else { - debug_assert_eq!(variant_index, dataful_variant_index); - None - } - }), - enum_type_di_node, - ); - - smallvec![ - build_field_di_node( - cx, - enum_type_di_node, - "dataful_variant", - size_and_align_of(enum_type_and_layout), - Size::ZERO, - DIFlags::FlagZero, - dataful_variant_struct_type_di_node, - ), - build_field_di_node( + type_map::stub( cx, - enum_type_di_node, - "discriminant", - cx.size_and_align_of(tag_base_type), - enum_type_and_layout.fields.offset(tag_field), + Stub::Struct, + UniqueTypeId::for_enum_variant_struct_type_wrapper( + cx.tcx, + enum_or_generator_type_and_layout.ty, + variant_index, + ), + &variant_struct_wrapper_type_name(variant_index), + // NOTE: We use size and align of enum_type, not from variant_layout: + size_and_align_of(enum_or_generator_type_and_layout), + Some(enum_or_generator_type_di_node), DIFlags::FlagZero, - discr_type_di_node, ), - ] + |cx, wrapper_struct_type_di_node| { + enum DiscrKind { + Exact(u64), + Exact128(u128), + Range(u64, u64), + Range128(u128, u128), + } + + let (tag_base_type_size, tag_base_type_align) = cx.size_and_align_of(tag_base_type); + let is_128_bits = tag_base_type_size.bits() > 64; + + let discr = match discr { + DiscrResult::NoDiscriminant => DiscrKind::Exact(SINGLE_VARIANT_VIRTUAL_DISR), + DiscrResult::Value(discr_val) => { + if is_128_bits { + DiscrKind::Exact128(discr_val) + } else { + debug_assert_eq!(discr_val, discr_val as u64 as u128); + DiscrKind::Exact(discr_val as u64) + } + } + DiscrResult::Range(min, max) => { + assert_eq!(Some(variant_index), untagged_variant_index); + if is_128_bits { + DiscrKind::Range128(min, max) + } else { + debug_assert_eq!(min, min as u64 as u128); + debug_assert_eq!(max, max as u64 as u128); + DiscrKind::Range(min as u64, max as u64) + } + } + }; + + let mut fields = SmallVec::new(); + + // We always have a field for the value + fields.push(build_field_di_node( + cx, + wrapper_struct_type_di_node, + "value", + size_and_align_of(enum_or_generator_type_and_layout), + Size::ZERO, + DIFlags::FlagZero, + variant_struct_type_di_node, + )); + + let build_assoc_const = + |name: &str, type_di_node: &'ll DIType, value: u64, align: Align| unsafe { + llvm::LLVMRustDIBuilderCreateStaticMemberType( + DIB(cx), + wrapper_struct_type_di_node, + name.as_ptr().cast(), + name.len(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + type_di_node, + DIFlags::FlagZero, + Some(cx.const_u64(value)), + align.bits() as u32, + ) + }; + + // We also always have an associated constant for the discriminant value + // of the variant. + fields.push(build_assoc_const( + ASSOC_CONST_DISCR_NAME, + variant_names_type_di_node, + variant_index.as_u32() as u64, + cx.align_of(variant_names_enum_base_type(cx)), + )); + + // Emit the discriminant value (or range) corresponding to the variant. + match discr { + DiscrKind::Exact(discr_val) => { + fields.push(build_assoc_const( + ASSOC_CONST_DISCR_EXACT, + tag_base_type_di_node, + discr_val, + tag_base_type_align, + )); + } + DiscrKind::Exact128(discr_val) => { + let align = cx.align_of(cx.tcx.types.u64); + let type_di_node = type_di_node(cx, cx.tcx.types.u64); + let Split128 { hi, lo } = split_128(discr_val); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR128_EXACT_LO, + type_di_node, + lo, + align, + )); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR128_EXACT_HI, + type_di_node, + hi, + align, + )); + } + DiscrKind::Range(begin, end) => { + fields.push(build_assoc_const( + ASSOC_CONST_DISCR_BEGIN, + tag_base_type_di_node, + begin, + tag_base_type_align, + )); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR_END, + tag_base_type_di_node, + end, + tag_base_type_align, + )); + } + DiscrKind::Range128(begin, end) => { + let align = cx.align_of(cx.tcx.types.u64); + let type_di_node = type_di_node(cx, cx.tcx.types.u64); + let Split128 { hi: begin_hi, lo: begin_lo } = split_128(begin); + let Split128 { hi: end_hi, lo: end_lo } = split_128(end); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR128_BEGIN_HI, + type_di_node, + begin_hi, + align, + )); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR128_BEGIN_LO, + type_di_node, + begin_lo, + align, + )); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR128_END_HI, + type_di_node, + end_hi, + align, + )); + + fields.push(build_assoc_const( + ASSOC_CONST_DISCR128_END_LO, + type_di_node, + end_lo, + align, + )); + } + } + + fields + }, + NO_GENERICS, + ) + .di_node +} + +struct Split128 { + hi: u64, + lo: u64, +} + +fn split_128(value: u128) -> Split128 { + Split128 { hi: (value >> 64) as u64, lo: value as u64 } } fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>( @@ -369,6 +681,29 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>( let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id); let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx); + let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len(); + + let tag_base_type = tag_base_type(cx, generator_type_and_layout); + + let variant_names_type_di_node = build_variant_names_type_di_node( + cx, + generator_type_di_node, + variant_range + .clone() + .map(|variant_index| (variant_index, GeneratorSubsts::variant_name(variant_index))), + ); + + let discriminants: IndexVec<VariantIdx, DiscrResult> = { + let discriminants_iter = generator_substs.discriminants(generator_def_id, cx.tcx); + let mut discriminants: IndexVec<VariantIdx, DiscrResult> = + IndexVec::with_capacity(variant_count); + for (variant_index, discr) in discriminants_iter { + // Assert that the index in the IndexMap matches up with the given VariantIdx. + assert_eq!(variant_index, discriminants.next_index()); + discriminants.push(DiscrResult::Value(discr.val)); + } + discriminants + }; // Build the type node for each field. let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range @@ -391,29 +726,24 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>( None }; - VariantFieldInfo { variant_index, variant_struct_type_di_node, source_info } + VariantFieldInfo { + variant_index, + variant_struct_type_di_node, + source_info, + discr: discriminants[variant_index], + } }) .collect(); - let tag_base_type = tag_base_type(cx, generator_type_and_layout); - let discr_type_name = "Discriminant$"; - let discr_type_di_node = super::build_enumeration_type_di_node( - cx, - discr_type_name, - tag_base_type, - &mut generator_substs - .discriminants(generator_def_id, cx.tcx) - .map(|(variant_index, discr)| (discr, GeneratorSubsts::variant_name(variant_index))), - generator_type_di_node, - ); - build_union_fields_for_direct_tag_enum_or_generator( cx, generator_type_and_layout, generator_type_di_node, &variant_field_infos[..], - discr_type_di_node, + variant_names_type_di_node, + tag_base_type, tag_field, + None, ) } @@ -425,8 +755,11 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>( enum_type_di_node: &'ll DIType, variant_field_infos: &[VariantFieldInfo<'ll>], discr_type_di_node: &'ll DIType, + tag_base_type: Ty<'tcx>, tag_field: usize, + untagged_variant_index: Option<VariantIdx>, ) -> SmallVec<&'ll DIType> { + let tag_base_type_di_node = type_di_node(cx, tag_base_type); let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1); // We create a field in the union for each variant ... @@ -438,6 +771,19 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>( let field_name = variant_union_field_name(variant_member_info.variant_index); let (size, align) = size_and_align_of(enum_type_and_layout); + let variant_struct_type_wrapper = build_variant_struct_wrapper_type_di_node( + cx, + enum_type_and_layout, + enum_type_di_node, + variant_member_info.variant_index, + untagged_variant_index, + variant_member_info.variant_struct_type_di_node, + discr_type_di_node, + tag_base_type_di_node, + tag_base_type, + variant_member_info.discr, + ); + // We use LLVMRustDIBuilderCreateMemberType() member type directly because // the build_field_di_node() function does not support specifying a source location, // which is something that we don't do anywhere else. @@ -456,7 +802,7 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>( // Union fields are always at offset zero Size::ZERO.bits(), DIFlags::FlagZero, - variant_member_info.variant_struct_type_di_node, + variant_struct_type_wrapper, ) } })); @@ -466,16 +812,53 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>( cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout)) ); - // ... and a field for the discriminant. - unions_fields.push(build_field_di_node( - cx, - enum_type_di_node, - "discriminant", - cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), - enum_type_and_layout.fields.offset(tag_field), - DIFlags::FlagZero, - discr_type_di_node, - )); + // ... and a field for the tag. If the tag is 128 bits wide, this will actually + // be two 64-bit fields. + let is_128_bits = cx.size_of(tag_base_type).bits() > 64; + + if is_128_bits { + let type_di_node = type_di_node(cx, cx.tcx.types.u64); + let size_and_align = cx.size_and_align_of(cx.tcx.types.u64); + + let (lo_offset, hi_offset) = match cx.tcx.data_layout.endian { + Endian::Little => (0, 8), + Endian::Big => (8, 0), + }; + + let tag_field_offset = enum_type_and_layout.fields.offset(tag_field).bytes(); + let lo_offset = Size::from_bytes(tag_field_offset + lo_offset); + let hi_offset = Size::from_bytes(tag_field_offset + hi_offset); + + unions_fields.push(build_field_di_node( + cx, + enum_type_di_node, + TAG_FIELD_NAME_128_LO, + size_and_align, + lo_offset, + DIFlags::FlagZero, + type_di_node, + )); + + unions_fields.push(build_field_di_node( + cx, + enum_type_di_node, + TAG_FIELD_NAME_128_HI, + size_and_align, + hi_offset, + DIFlags::FlagZero, + type_di_node, + )); + } else { + unions_fields.push(build_field_di_node( + cx, + enum_type_di_node, + TAG_FIELD_NAME, + cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), + enum_type_and_layout.fields.offset(tag_field), + DIFlags::FlagZero, + tag_base_type_di_node, + )); + } unions_fields } @@ -485,6 +868,7 @@ struct VariantFieldInfo<'ll> { variant_index: VariantIdx, variant_struct_type_di_node: &'ll DIType, source_info: Option<(&'ll DIFile, c_uint)>, + discr: DiscrResult, } fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> { @@ -512,3 +896,29 @@ fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> { .map(|&s| Cow::from(s)) .unwrap_or_else(|| format!("variant{}", variant_index.as_usize()).into()) } + +fn variant_struct_wrapper_type_name(variant_index: VariantIdx) -> Cow<'static, str> { + const PRE_ALLOCATED: [&str; 16] = [ + "Variant0", + "Variant1", + "Variant2", + "Variant3", + "Variant4", + "Variant5", + "Variant6", + "Variant7", + "Variant8", + "Variant9", + "Variant10", + "Variant11", + "Variant12", + "Variant13", + "Variant14", + "Variant15", + ]; + + PRE_ALLOCATED + .get(variant_index.as_usize()) + .map(|&s| Cow::from(s)) + .unwrap_or_else(|| format!("Variant{}", variant_index.as_usize()).into()) +} diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index 73e01d045..14044d0f9 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -10,7 +10,6 @@ use rustc_middle::{ ty::{ self, layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout}, - util::Discr, AdtDef, GeneratorSubsts, Ty, VariantDef, }, }; @@ -90,8 +89,11 @@ fn build_c_style_enum_di_node<'ll, 'tcx>( cx, &compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false), tag_base_type(cx, enum_type_and_layout), - &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| { - (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str())) + enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| { + let name = Cow::from(enum_adt_def.variant(variant_index).name.as_str()); + // Is there anything we can do to support 128-bit C-Style enums? + let value = discr.val as u64; + (name, value) }), containing_scope, ), @@ -152,7 +154,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, type_name: &str, base_type: Ty<'tcx>, - variants: &mut dyn Iterator<Item = (Discr<'tcx>, Cow<'tcx, str>)>, + enumerators: impl Iterator<Item = (Cow<'tcx, str>, u64)>, containing_scope: &'ll DIType, ) -> &'ll DIType { let is_unsigned = match base_type.kind() { @@ -161,18 +163,15 @@ fn build_enumeration_type_di_node<'ll, 'tcx>( _ => bug!("build_enumeration_type_di_node() called with non-integer tag type."), }; - let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = variants - .map(|(discr, variant_name)| { - unsafe { - Some(llvm::LLVMRustDIBuilderCreateEnumerator( - DIB(cx), - variant_name.as_ptr().cast(), - variant_name.len(), - // FIXME: what if enumeration has i128 discriminant? - discr.val as i64, - is_unsigned, - )) - } + let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = enumerators + .map(|(name, value)| unsafe { + Some(llvm::LLVMRustDIBuilderCreateEnumerator( + DIB(cx), + name.as_ptr().cast(), + name.len(), + value as i64, + is_unsigned, + )) }) .collect(); @@ -247,23 +246,27 @@ fn build_enumeration_type_di_node<'ll, 'tcx>( /// and a DW_TAG_member for each field (but not the discriminant). fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, - enum_type: Ty<'tcx>, + enum_type_and_layout: TyAndLayout<'tcx>, enum_type_di_node: &'ll DIType, variant_index: VariantIdx, variant_def: &VariantDef, variant_layout: TyAndLayout<'tcx>, ) -> &'ll DIType { - debug_assert_eq!(variant_layout.ty, enum_type); + debug_assert_eq!(variant_layout.ty, enum_type_and_layout.ty); type_map::build_type_with_children( cx, type_map::stub( cx, Stub::Struct, - UniqueTypeId::for_enum_variant_struct_type(cx.tcx, enum_type, variant_index), + UniqueTypeId::for_enum_variant_struct_type( + cx.tcx, + enum_type_and_layout.ty, + variant_index, + ), variant_def.name.as_str(), // NOTE: We use size and align of enum_type, not from variant_layout: - cx.size_and_align_of(enum_type), + size_and_align_of(enum_type_and_layout), Some(enum_type_di_node), DIFlags::FlagZero, ), @@ -290,9 +293,9 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( type_di_node(cx, field_layout.ty), ) }) - .collect() + .collect::<SmallVec<_>>() }, - |cx| build_generic_type_param_di_nodes(cx, enum_type), + |cx| build_generic_type_param_di_nodes(cx, enum_type_and_layout.ty), ) .di_node } @@ -398,39 +401,60 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>( .di_node } +#[derive(Copy, Clone)] +enum DiscrResult { + NoDiscriminant, + Value(u128), + Range(u128, u128), +} + +impl DiscrResult { + fn opt_single_val(&self) -> Option<u128> { + if let Self::Value(d) = *self { Some(d) } else { None } + } +} + /// Returns the discriminant value corresponding to the variant index. /// /// Will return `None` if there is less than two variants (because then the enum won't have) -/// a tag, and if this is the dataful variant of a niche-layout enum (because then there is no +/// a tag, and if this is the untagged variant of a niche-layout enum (because then there is no /// single discriminant value). fn compute_discriminant_value<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, enum_type_and_layout: TyAndLayout<'tcx>, variant_index: VariantIdx, -) -> Option<u64> { +) -> DiscrResult { match enum_type_and_layout.layout.variants() { - &Variants::Single { .. } => None, - &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => Some( - enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val - as u64, + &Variants::Single { .. } => DiscrResult::NoDiscriminant, + &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => DiscrResult::Value( + enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val, ), &Variants::Multiple { - tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant }, + tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, untagged_variant }, tag, .. } => { - if variant_index == dataful_variant { - None + if variant_index == untagged_variant { + let valid_range = enum_type_and_layout + .for_variant(cx, variant_index) + .largest_niche + .as_ref() + .unwrap() + .valid_range; + + let min = valid_range.start.min(valid_range.end); + let min = tag.size(cx).truncate(min); + + let max = valid_range.start.max(valid_range.end); + let max = tag.size(cx).truncate(max); + + DiscrResult::Range(min, max) } else { let value = (variant_index.as_u32() as u128) .wrapping_sub(niche_variants.start().as_u32() as u128) .wrapping_add(niche_start); let value = tag.size(cx).truncate(value); - // NOTE(eddyb) do *NOT* remove this assert, until - // we pass the full 128-bit value to LLVM, otherwise - // truncation will be silent and remain undetected. - assert_eq!(value as u64 as u128, value); - Some(value as u64) + DiscrResult::Value(value) } } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index f1935e0ec..becbccc43 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -88,7 +88,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( variant_name: Cow::from(enum_adt_def.variant(variant_index).name.as_str()), variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node( cx, - enum_type, + enum_type_and_layout, enum_type_di_node, variant_index, enum_adt_def.variant(variant_index), @@ -378,7 +378,7 @@ fn build_discr_member_di_node<'ll, 'tcx>( /// /// The DW_AT_discr_value is optional, and is omitted if /// - This is the only variant of a univariant enum (i.e. their is no discriminant) -/// - This is the "dataful" variant of a niche-layout enum +/// - This is the "untagged" variant of a niche-layout enum /// (where only the other variants are identified by a single value) /// /// There is only ever a single member, the type of which is a struct that describes the @@ -413,7 +413,13 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>( enum_type_and_layout.size.bits(), enum_type_and_layout.align.abi.bits() as u32, Size::ZERO.bits(), - discr_value.map(|v| cx.const_u64(v)), + discr_value.opt_single_val().map(|value| { + // NOTE(eddyb) do *NOT* remove this assert, until + // we pass the full 128-bit value to LLVM, otherwise + // truncation will be silent and remain undetected. + assert_eq!(value as u64 as u128, value); + cx.const_u64(value as u64) + }), DIFlags::FlagZero, variant_member_info.variant_struct_type_di_node, ) diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs index ce2f419c4..e30622cbd 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs @@ -47,6 +47,8 @@ pub(super) enum UniqueTypeId<'tcx> { VariantPart(Ty<'tcx>, private::HiddenZst), /// The ID for the artificial struct type describing a single enum variant. VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst), + /// The ID for the additional wrapper struct type describing an enum variant in CPP-like mode. + VariantStructTypeCppLikeWrapper(Ty<'tcx>, VariantIdx, private::HiddenZst), /// The ID of the artificial type we create for VTables. VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, private::HiddenZst), } @@ -71,6 +73,15 @@ impl<'tcx> UniqueTypeId<'tcx> { UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst) } + pub fn for_enum_variant_struct_type_wrapper( + tcx: TyCtxt<'tcx>, + enum_ty: Ty<'tcx>, + variant_idx: VariantIdx, + ) -> Self { + debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)); + UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst) + } + pub fn for_vtable_ty( tcx: TyCtxt<'tcx>, self_type: Ty<'tcx>, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index cf591295b..b23fe3fc9 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -39,7 +39,6 @@ use smallvec::SmallVec; use std::cell::OnceCell; use std::cell::RefCell; use std::iter; -use tracing::debug; mod create_scope_map; pub mod gdb; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs index 8f2436739..a40cfc8b2 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs @@ -6,7 +6,7 @@ use super::CodegenUnitDebugContext; use rustc_hir::def_id::DefId; use rustc_middle::ty::layout::{HasParamEnv, LayoutOf}; use rustc_middle::ty::{self, DefIdTree, Ty}; -use tracing::trace; +use trace; use crate::common::CodegenCx; use crate::llvm; diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index fa0ecd18f..f79ef1172 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -22,7 +22,6 @@ use rustc_codegen_ssa::traits::TypeMembershipMethods; use rustc_middle::ty::Ty; use rustc_symbol_mangling::typeid::typeid_for_fnabi; use smallvec::SmallVec; -use tracing::debug; /// Declare a function. /// @@ -33,6 +32,7 @@ fn declare_raw_fn<'ll>( name: &str, callconv: llvm::CallConv, unnamed: llvm::UnnamedAddr, + visibility: llvm::Visibility, ty: &'ll Type, ) -> &'ll Value { debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); @@ -42,6 +42,7 @@ fn declare_raw_fn<'ll>( llvm::SetFunctionCallConv(llfn, callconv); llvm::SetUnnamedAddress(llfn, unnamed); + llvm::set_visibility(llfn, visibility); let mut attrs = SmallVec::<[_; 4]>::new(); @@ -79,7 +80,14 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { unnamed: llvm::UnnamedAddr, fn_type: &'ll Type, ) -> &'ll Value { - declare_raw_fn(self, name, llvm::CCallConv, unnamed, fn_type) + // Declare C ABI functions with the visibility used by C by default. + let visibility = if self.tcx.sess.target.default_hidden_visibility { + llvm::Visibility::Hidden + } else { + llvm::Visibility::Default + }; + + declare_raw_fn(self, name, llvm::CCallConv, unnamed, visibility, fn_type) } /// Declare a Rust function. @@ -96,6 +104,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { name, fn_abi.llvm_cconv(), llvm::UnnamedAddr::Global, + llvm::Visibility::Default, fn_abi.llvm_type(self), ); fn_abi.apply_attrs_llfn(self, llfn); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 9f3647492..825011941 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -71,6 +71,7 @@ fn get_simple_intrinsic<'ll>( sym::nearbyintf64 => "llvm.nearbyint.f64", sym::roundf32 => "llvm.round.f32", sym::roundf64 => "llvm.round.f64", + sym::ptr_mask => "llvm.ptrmask", _ => return None, }; Some(cx.get_intrinsic(llvm_name)) @@ -107,6 +108,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let (simple_ty, simple_fn) = simple.unwrap(); self.call( simple_ty, + None, simple_fn, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, @@ -161,7 +163,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); let ptr = args[0].immediate(); - let load = if let PassMode::Cast(ty) = fn_abi.ret.mode { + let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let llty = ty.llvm_type(self); let ptr = self.pointercast(ptr, self.type_ptr_to(llty)); self.volatile_load(llty, ptr) @@ -374,7 +376,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_abi.ret.mode { + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); @@ -434,7 +436,7 @@ fn try_intrinsic<'ll>( ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.call(try_func_ty, try_func, &[data], None); + bx.call(try_func_ty, None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx().data_layout.i32_align.abi; @@ -533,7 +535,7 @@ fn codegen_msvc_try<'ll>( let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(bx.type_i8p(), ptr_align); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None); + bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); bx.ret(bx.const_i32(0)); @@ -577,7 +579,7 @@ fn codegen_msvc_try<'ll>( let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); let ptr = bx.load(bx.type_i8p(), slot, ptr_align); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet)); + bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". @@ -585,7 +587,7 @@ fn codegen_msvc_try<'ll>( let flags = bx.const_i32(64); let null = bx.const_null(bx.type_i8p()); let funclet = bx.catch_pad(cs, &[null, flags, null]); - bx.call(catch_ty, catch_func, &[data, null], Some(&funclet)); + bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet)); bx.catch_ret(&funclet, caught); bx.switch_to_block(caught); @@ -594,7 +596,7 @@ fn codegen_msvc_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -637,7 +639,7 @@ fn codegen_gnu_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.invoke(try_func_ty, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -655,13 +657,13 @@ fn codegen_gnu_try<'ll>( bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, catch_func, &[data, ptr], None); + bx.call(catch_ty, None, catch_func, &[data, ptr], None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -701,7 +703,7 @@ fn codegen_emcc_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.invoke(try_func_ty, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -740,13 +742,13 @@ fn codegen_emcc_try<'ll>( let catch_data = bx.bitcast(catch_data, bx.type_i8p()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, catch_func, &[data, catch_data], None); + bx.call(catch_ty, None, catch_func, &[data, catch_data], None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -1216,8 +1218,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty); - let c = - bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); + let c = bx.call( + fn_ty, + None, + f, + &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), + None, + ); Ok(c) } @@ -1416,8 +1423,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( llvm_elem_vec_ty, ); let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = - bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); + let v = bx.call( + fn_ty, + None, + f, + &[args[1].immediate(), alignment, mask, args[0].immediate()], + None, + ); return Ok(v); } @@ -1542,8 +1554,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t); let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = - bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); + let v = bx.call( + fn_ty, + None, + f, + &[args[0].immediate(), args[1].immediate(), alignment, mask], + None, + ); return Ok(v); } @@ -1704,6 +1721,97 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, bitwise_red!(simd_reduce_all: vector_reduce_and, true); bitwise_red!(simd_reduce_any: vector_reduce_or, true); + if name == sym::simd_cast_ptr { + require_simd!(ret_ty, "return"); + let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, + in_ty, + ret_ty, + out_len + ); + + match in_elem.kind() { + ty::RawPtr(p) => { + let (metadata, check_sized) = p.ty.ptr_metadata_ty(bx.tcx, |ty| { + bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) + }); + assert!(!check_sized); // we are in codegen, so we shouldn't see these types + require!(metadata.is_unit(), "cannot cast fat pointer `{}`", in_elem) + } + _ => return_error!("expected pointer, got `{}`", in_elem), + } + match out_elem.kind() { + ty::RawPtr(p) => { + let (metadata, check_sized) = p.ty.ptr_metadata_ty(bx.tcx, |ty| { + bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) + }); + assert!(!check_sized); // we are in codegen, so we shouldn't see these types + require!(metadata.is_unit(), "cannot cast to fat pointer `{}`", out_elem) + } + _ => return_error!("expected pointer, got `{}`", out_elem), + } + + if in_elem == out_elem { + return Ok(args[0].immediate()); + } else { + return Ok(bx.pointercast(args[0].immediate(), llret_ty)); + } + } + + if name == sym::simd_expose_addr { + require_simd!(ret_ty, "return"); + let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, + in_ty, + ret_ty, + out_len + ); + + match in_elem.kind() { + ty::RawPtr(_) => {} + _ => return_error!("expected pointer, got `{}`", in_elem), + } + match out_elem.kind() { + ty::Uint(ty::UintTy::Usize) => {} + _ => return_error!("expected `usize`, got `{}`", out_elem), + } + + return Ok(bx.ptrtoint(args[0].immediate(), llret_ty)); + } + + if name == sym::simd_from_exposed_addr { + require_simd!(ret_ty, "return"); + let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, + in_ty, + ret_ty, + out_len + ); + + match in_elem.kind() { + ty::Uint(ty::UintTy::Usize) => {} + _ => return_error!("expected `usize`, got `{}`", in_elem), + } + match out_elem.kind() { + ty::RawPtr(_) => {} + _ => return_error!("expected pointer, got `{}`", out_elem), + } + + return Ok(bx.inttoptr(args[0].immediate(), llret_ty)); + } + if name == sym::simd_cast || name == sym::simd_as { require_simd!(ret_ty, "return"); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); @@ -1900,7 +2008,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = bx.call(fn_ty, f, &[lhs, rhs], None); + let v = bx.call(fn_ty, None, f, &[lhs, rhs], None); return Ok(v); } diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 636d689a3..89c7e51d0 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -7,7 +7,6 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![feature(hash_raw_entry)] #![feature(let_chains)] -#![feature(let_else)] #![feature(extern_types)] #![feature(once_cell)] #![feature(iter_intersperse)] @@ -16,6 +15,8 @@ #[macro_use] extern crate rustc_macros; +#[macro_use] +extern crate tracing; use back::write::{create_informational_target_machine, create_target_machine}; @@ -130,12 +131,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend { ) -> TargetMachineFactoryFn<Self> { back::write::target_machine_factory(sess, optlvl, target_features) } - fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { - llvm_util::target_cpu(sess) - } - fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> { - llvm_util::tune_cpu(sess) - } fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T> where @@ -169,7 +164,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend { impl WriteBackendMethods for LlvmCodegenBackend { type Module = ModuleLlvm; type ModuleBuffer = back::lto::ModuleBuffer; - type Context = llvm::Context; type TargetMachine = &'static mut llvm::TargetMachine; type ThinData = back::lto::ThinData; type ThinBuffer = back::lto::ThinBuffer; diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs index 64db4f746..7d9489702 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs @@ -83,17 +83,6 @@ impl<'a> Child<'a> { } } } - - pub fn data(&self) -> &'a [u8] { - unsafe { - let mut data_len = 0; - let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len); - if data_ptr.is_null() { - panic!("failed to read data from archive child"); - } - slice::from_raw_parts(data_ptr as *const u8, data_len as usize) - } - } } impl<'a> Drop for Child<'a> { diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 3139f93bf..42cb694c0 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -400,27 +400,6 @@ impl AtomicOrdering { } } -/// LLVMRustSynchronizationScope -#[derive(Copy, Clone)] -#[repr(C)] -pub enum SynchronizationScope { - SingleThread, - CrossThread, -} - -impl SynchronizationScope { - pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self { - match sc { - rustc_codegen_ssa::common::SynchronizationScope::SingleThread => { - SynchronizationScope::SingleThread - } - rustc_codegen_ssa::common::SynchronizationScope::CrossThread => { - SynchronizationScope::CrossThread - } - } - } -} - /// LLVMRustFileType #[derive(Copy, Clone)] #[repr(C)] @@ -1096,7 +1075,7 @@ extern "C" { pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value; pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value; pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value; - pub fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong; + pub fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool; pub fn LLVMRustConstInt128Get( ConstantVal: &ConstantInt, SExt: bool, @@ -1782,16 +1761,18 @@ extern "C" { Order: AtomicOrdering, ) -> &'a Value; - pub fn LLVMRustBuildAtomicCmpXchg<'a>( + pub fn LLVMBuildAtomicCmpXchg<'a>( B: &Builder<'a>, LHS: &'a Value, CMP: &'a Value, RHS: &'a Value, Order: AtomicOrdering, FailureOrder: AtomicOrdering, - Weak: Bool, + SingleThreaded: Bool, ) -> &'a Value; + pub fn LLVMSetWeak(CmpXchgInst: &Value, IsWeak: Bool); + pub fn LLVMBuildAtomicRMW<'a>( B: &Builder<'a>, Op: AtomicRmwBinOp, @@ -1801,27 +1782,19 @@ extern "C" { SingleThreaded: Bool, ) -> &'a Value; - pub fn LLVMRustBuildAtomicFence( - B: &Builder<'_>, + pub fn LLVMBuildFence<'a>( + B: &Builder<'a>, Order: AtomicOrdering, - Scope: SynchronizationScope, - ); + SingleThreaded: Bool, + Name: *const c_char, + ) -> &'a Value; /// Writes a module to the specified path. Returns 0 on success. pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int; - /// Creates a pass manager. + /// Creates a legacy pass manager -- only used for final codegen. pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>; - /// Creates a function-by-function pass manager - pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> &mut PassManager<'_>; - - /// Disposes a pass manager. - pub fn LLVMDisposePassManager<'a>(PM: &'a mut PassManager<'a>); - - /// Runs a pass manager on a module. - pub fn LLVMRunPassManager<'a>(PM: &PassManager<'a>, M: &'a Module) -> Bool; - pub fn LLVMInitializePasses(); pub fn LLVMTimeTraceProfilerInitialize(); @@ -1832,32 +1805,6 @@ extern "C" { pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>); - pub fn LLVMRustPassManagerBuilderCreate() -> &'static mut PassManagerBuilder; - pub fn LLVMRustPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder); - pub fn LLVMRustPassManagerBuilderUseInlinerWithThreshold( - PMB: &PassManagerBuilder, - threshold: c_uint, - ); - pub fn LLVMRustPassManagerBuilderPopulateModulePassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - ); - - pub fn LLVMRustPassManagerBuilderPopulateFunctionPassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - ); - pub fn LLVMRustPassManagerBuilderPopulateLTOPassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - Internalize: Bool, - RunInliner: Bool, - ); - pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - ); - pub fn LLVMGetHostCPUFeatures() -> *mut c_char; pub fn LLVMDisposeMessage(message: *mut c_char); @@ -2079,6 +2026,19 @@ extern "C" { Ty: &'a DIType, ) -> &'a DIType; + pub fn LLVMRustDIBuilderCreateStaticMemberType<'a>( + Builder: &DIBuilder<'a>, + Scope: &'a DIDescriptor, + Name: *const c_char, + NameLen: size_t, + File: &'a DIFile, + LineNo: c_uint, + Ty: &'a DIType, + Flags: DIFlags, + val: Option<&'a Value>, + AlignInBits: u32, + ) -> &'a DIDerivedType; + pub fn LLVMRustDIBuilderCreateLexicalBlock<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIScope, @@ -2249,22 +2209,6 @@ extern "C" { pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; - pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>; - pub fn LLVMRustCreateAddressSanitizerFunctionPass(Recover: bool) -> &'static mut Pass; - pub fn LLVMRustCreateModuleAddressSanitizerPass(Recover: bool) -> &'static mut Pass; - pub fn LLVMRustCreateMemorySanitizerPass( - TrackOrigins: c_int, - Recover: bool, - ) -> &'static mut Pass; - pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass; - pub fn LLVMRustCreateHWAddressSanitizerPass(Recover: bool) -> &'static mut Pass; - pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass); - pub fn LLVMRustAddLastExtensionPasses( - PMB: &PassManagerBuilder, - Passes: *const &'static mut Pass, - NumPasses: size_t, - ); - pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool; pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine); @@ -2298,29 +2242,11 @@ extern "C" { SplitDwarfFile: *const c_char, ) -> Option<&'static mut TargetMachine>; pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine); - pub fn LLVMRustAddBuilderLibraryInfo<'a>( - PMB: &'a PassManagerBuilder, - M: &'a Module, - DisableSimplifyLibCalls: bool, - ); - pub fn LLVMRustConfigurePassManagerBuilder( - PMB: &PassManagerBuilder, - OptLevel: CodeGenOptLevel, - MergeFunctions: bool, - SLPVectorize: bool, - LoopVectorize: bool, - PrepareForThinLTO: bool, - PGOGenPath: *const c_char, - PGOUsePath: *const c_char, - PGOSampleUsePath: *const c_char, - SizeLevel: c_int, - ); pub fn LLVMRustAddLibraryInfo<'a>( PM: &PassManager<'a>, M: &'a Module, DisableSimplifyLibCalls: bool, ); - pub fn LLVMRustRunFunctionPassManager<'a>(PM: &PassManager<'a>, M: &'a Module); pub fn LLVMRustWriteOutputFile<'a>( T: &'a TargetMachine, PM: &PassManager<'a>, @@ -2329,7 +2255,7 @@ extern "C" { DwoOutput: *const c_char, FileType: FileType, ) -> LLVMRustResult; - pub fn LLVMRustOptimizeWithNewPassManager<'a>( + pub fn LLVMRustOptimize<'a>( M: &'a Module, TM: &'a TargetMachine, OptLevel: PassBuilderOptLevel, @@ -2347,6 +2273,7 @@ extern "C" { PGOGenPath: *const c_char, PGOUsePath: *const c_char, InstrumentCoverage: bool, + InstrProfileOutput: *const c_char, InstrumentGCOV: bool, PGOSampleUsePath: *const c_char, DebugInfoForProfiling: bool, @@ -2366,7 +2293,6 @@ extern "C" { pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); pub fn LLVMRustPrintPasses(); pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char); - pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool); pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t); pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>; @@ -2375,7 +2301,6 @@ extern "C" { AIR: &ArchiveIterator<'a>, ) -> Option<&'a mut ArchiveChild<'a>>; pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char; - pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char; pub fn LLVMRustArchiveChildFree<'a>(ACR: &'a mut ArchiveChild<'a>); pub fn LLVMRustArchiveIteratorFree<'a>(AIR: &'a mut ArchiveIterator<'a>); pub fn LLVMRustDestroyArchive(AR: &'static mut Archive); @@ -2410,12 +2335,6 @@ extern "C" { cookie_out: &mut c_uint, ) -> &'a SMDiagnostic; - pub fn LLVMRustSetInlineAsmDiagnosticHandler( - C: &Context, - H: InlineAsmDiagHandlerTy, - CX: *mut c_void, - ); - #[allow(improper_ctypes)] pub fn LLVMRustUnpackSMDiagnostic( d: &SMDiagnostic, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index a0a640473..2fd58567c 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -1,7 +1,6 @@ use crate::back::write::create_informational_target_machine; -use crate::{llvm, llvm_util}; +use crate::llvm; use libc::c_int; -use libloading::Library; use rustc_codegen_ssa::target_features::{ supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES, }; @@ -15,9 +14,7 @@ use rustc_span::symbol::Symbol; use rustc_target::spec::{MergeFunctions, PanicStrategy}; use smallvec::{smallvec, SmallVec}; use std::ffi::{CStr, CString}; -use tracing::debug; -use std::mem; use std::path::Path; use std::ptr; use std::slice; @@ -92,16 +89,6 @@ unsafe fn configure_llvm(sess: &Session) { add("-generate-arange-section", false); } - // Disable the machine outliner by default in LLVM versions 11 and LLVM - // version 12, where it leads to miscompilation. - // - // Ref: - // - https://github.com/rust-lang/rust/issues/85351 - // - https://reviews.llvm.org/D103167 - if llvm_util::get_version() < (13, 0, 0) { - add("-enable-machine-outliner=never", false); - } - match sess.opts.unstable_opts.merge_functions.unwrap_or(sess.target.merge_functions) { MergeFunctions::Disabled | MergeFunctions::Trampolines => {} MergeFunctions::Aliases => { @@ -131,22 +118,6 @@ unsafe fn configure_llvm(sess: &Session) { llvm::LLVMInitializePasses(); - // Use the legacy plugin registration if we don't use the new pass manager - if !should_use_new_llvm_pass_manager( - &sess.opts.unstable_opts.new_llvm_pass_manager, - &sess.target.arch, - ) { - // Register LLVM plugins by loading them into the compiler process. - for plugin in &sess.opts.unstable_opts.llvm_plugins { - let lib = Library::new(plugin).unwrap_or_else(|e| bug!("couldn't load plugin: {}", e)); - debug!("LLVM plugin loaded successfully {:?} ({})", lib, plugin); - - // Intentionally leak the dynamic library. We can't ever unload it - // since the library can make things that will live arbitrarily long. - mem::forget(lib); - } - } - rustc_llvm::initialize_available_targets(); llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr()); @@ -165,6 +136,10 @@ pub fn time_trace_profiler_finish(file_name: &Path) { // // To find a list of LLVM's names, check llvm-project/llvm/include/llvm/Support/*TargetParser.def // where the * matches the architecture's name +// +// For targets not present in the above location, see llvm-project/llvm/lib/Target/{ARCH}/*.td +// where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`. +// // Beware to not use the llvm github project for this, but check the git submodule // found in src/llvm-project // Though note that Rust can also be build with an external precompiled version of LLVM @@ -440,6 +415,8 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str .features .split(',') .filter(|v| !v.is_empty() && backend_feature_name(v).is_some()) + // Drop +atomics-32 feature introduced in LLVM 15. + .filter(|v| *v != "+atomics-32" || get_version() >= (15, 0, 0)) .map(String::from), ); @@ -544,19 +521,3 @@ pub fn tune_cpu(sess: &Session) -> Option<&str> { let name = sess.opts.unstable_opts.tune_cpu.as_ref()?; Some(handle_native(name)) } - -pub(crate) fn should_use_new_llvm_pass_manager(user_opt: &Option<bool>, target_arch: &str) -> bool { - // The new pass manager is enabled by default for LLVM >= 13. - // This matches Clang, which also enables it since Clang 13. - - // Since LLVM 15, the legacy pass manager is no longer supported. - if llvm_util::get_version() >= (15, 0, 0) { - return true; - } - - // There are some perf issues with the new pass manager when targeting - // s390x with LLVM 13, so enable the new pass manager only with LLVM 14. - // See https://github.com/rust-lang/rust/issues/89609. - let min_version = if target_arch == "s390x" { 14 } else { 13 }; - user_opt.unwrap_or_else(|| llvm_util::get_version() >= (min_version, 0, 0)) -} diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 6e9428485..1eceb7f5c 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -11,7 +11,6 @@ use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; use rustc_middle::ty::{self, Instance, TypeVisitable}; use rustc_session::config::CrateType; use rustc_target::spec::RelocModel; -use tracing::debug; impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> { fn predefine_static( diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 9f0e6c80b..dc1165835 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -11,7 +11,6 @@ use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F32, F64}; use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants}; use smallvec::{smallvec, SmallVec}; -use tracing::debug; use std::fmt::Write; |