diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:11:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:13:23 +0000 |
commit | 20431706a863f92cb37dc512fef6e48d192aaf2c (patch) | |
tree | 2867f13f5fd5437ba628c67d7f87309ccadcd286 /compiler/rustc_codegen_llvm/src | |
parent | Releasing progress-linux version 1.65.0+dfsg1-2~progress7.99u1. (diff) | |
download | rustc-20431706a863f92cb37dc512fef6e48d192aaf2c.tar.xz rustc-20431706a863f92cb37dc512fef6e48d192aaf2c.zip |
Merging upstream version 1.66.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 8 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/back/archive.rs | 67 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/back/lto.rs | 99 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/back/write.rs | 261 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/base.rs | 13 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 56 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/callee.rs | 3 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 7 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 26 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/declare.rs | 12 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 52 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/lib.rs | 8 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 110 | ||||
-rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm_util.rs | 36 |
16 files changed, 220 insertions, 544 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 26f5225f6..d478efc86 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -592,10 +592,6 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { - fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) { - fn_abi.apply_attrs_callsite(self, callsite) - } - fn get_param(&mut self, index: usize) -> Self::Value { llvm::get_param(self.llfn(), index as c_uint) } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 5202ac697..017513721 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -430,9 +430,9 @@ pub(crate) fn inline_asm_call<'ll>( ); let call = if let Some((dest, catch, funclet)) = dest_catch_funclet { - bx.invoke(fty, v, inputs, dest, catch, funclet) + bx.invoke(fty, None, v, inputs, dest, catch, funclet) } else { - bx.call(fty, v, inputs, None) + bx.call(fty, None, v, inputs, None) }; // Store mark in a metadata node so we can map LLVM errors @@ -551,6 +551,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> format!("{{{}}}", reg.name()) } } + // The constraints can be retrieved from + // https://llvm.org/docs/LangRef.html#supported-constraint-code-list InlineAsmRegOrRegClass::RegClass(reg) => match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", @@ -624,6 +626,8 @@ fn modifier_to_llvm( reg: InlineAsmRegClass, modifier: Option<char>, ) -> Option<char> { + // The modifiers can be retrieved from + // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index ed96355a0..082665bba 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -2,16 +2,20 @@ use std::env; use std::ffi::{CStr, CString, OsString}; -use std::io; +use std::fs; +use std::io::{self, Write}; use std::mem; use std::path::{Path, PathBuf}; use std::ptr; use std::str; +use object::read::macho::FatArch; + use crate::common; use crate::llvm::archive_ro::{ArchiveRO, Child}; use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport}; use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; +use rustc_data_structures::memmap::Mmap; use rustc_session::cstore::DllImport; use rustc_session::Session; @@ -53,13 +57,70 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType { } } +fn try_filter_fat_archs( + archs: object::read::Result<&[impl FatArch]>, + target_arch: object::Architecture, + archive_path: &Path, + archive_map_data: &[u8], +) -> io::Result<Option<PathBuf>> { + let archs = archs.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + let desired = match archs.iter().filter(|a| a.architecture() == target_arch).next() { + Some(a) => a, + None => return Ok(None), + }; + + let (mut new_f, extracted_path) = tempfile::Builder::new() + .suffix(archive_path.file_name().unwrap()) + .tempfile()? + .keep() + .unwrap(); + + new_f.write_all( + desired.data(archive_map_data).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?, + )?; + + Ok(Some(extracted_path)) +} + +fn try_extract_macho_fat_archive( + sess: &Session, + archive_path: &Path, +) -> io::Result<Option<PathBuf>> { + let archive_map = unsafe { Mmap::map(fs::File::open(&archive_path)?)? }; + let target_arch = match sess.target.arch.as_ref() { + "aarch64" => object::Architecture::Aarch64, + "x86_64" => object::Architecture::X86_64, + _ => return Ok(None), + }; + + match object::macho::FatHeader::parse(&*archive_map) { + Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC => { + let archs = object::macho::FatHeader::parse_arch32(&*archive_map); + try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map) + } + Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC_64 => { + let archs = object::macho::FatHeader::parse_arch64(&*archive_map); + try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map) + } + // Not a FatHeader at all, just return None. + _ => Ok(None), + } +} + impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { fn add_archive( &mut self, archive: &Path, skip: Box<dyn FnMut(&str) -> bool + 'static>, ) -> io::Result<()> { - let archive_ro = match ArchiveRO::open(archive) { + let mut archive = archive.to_path_buf(); + if self.sess.target.llvm_target.contains("-apple-macosx") { + if let Some(new_archive) = try_extract_macho_fat_archive(&self.sess, &archive)? { + archive = new_archive + } + } + let archive_ro = match ArchiveRO::open(&archive) { Ok(ar) => ar, Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), }; @@ -67,7 +128,7 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { return Ok(()); } self.additions.push(Addition::Archive { - path: archive.to_path_buf(), + path: archive, archive: archive_ro, skip: Box::new(skip), }); diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index a89df00e2..a49cc7f8d 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -1,8 +1,6 @@ -use crate::back::write::{ - self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers, -}; -use crate::llvm::{self, build_string, False, True}; -use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm}; +use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers}; +use crate::llvm::{self, build_string}; +use crate::{LlvmCodegenBackend, ModuleLlvm}; use object::read::archive::ArchiveFile; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared}; use rustc_codegen_ssa::back::symbol_export; @@ -34,8 +32,8 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { - CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true, - CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false, + CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true, + CrateType::Rlib | CrateType::ProcMacro => false, } } @@ -75,17 +73,6 @@ fn prepare_lto( // with either fat or thin LTO let mut upstream_modules = Vec::new(); if cgcx.lto != Lto::ThinLocal { - if cgcx.opts.cg.prefer_dynamic { - diag_handler - .struct_err("cannot prefer dynamic linking when performing LTO") - .note( - "only 'staticlib', 'bin', and 'cdylib' outputs are \ - supported with LTO", - ) - .emit(); - return Err(FatalError); - } - // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { @@ -94,9 +81,25 @@ fn prepare_lto( static library outputs", ); return Err(e); + } else if *crate_type == CrateType::Dylib { + if !cgcx.opts.unstable_opts.dylib_lto { + return Err(diag_handler + .fatal("lto cannot be used for `dylib` crate type without `-Zdylib-lto`")); + } } } + if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { + diag_handler + .struct_err("cannot prefer dynamic linking when performing LTO") + .note( + "only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO", + ) + .emit(); + return Err(FatalError); + } + for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO"); @@ -575,7 +578,7 @@ pub(crate) fn run_pass_manager( module: &mut ModuleCodegen<ModuleLlvm>, thin: bool, ) -> Result<(), FatalError> { - let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name); + let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name); let config = cgcx.config(module.kind); // Now we have one massive module inside of llmod. Time to run the @@ -597,61 +600,9 @@ pub(crate) fn run_pass_manager( 1, ); } - if llvm_util::should_use_new_llvm_pass_manager( - &config.new_llvm_pass_manager, - &cgcx.target_arch, - ) { - let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; - let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); - write::optimize_with_new_llvm_pass_manager( - cgcx, - diag_handler, - module, - config, - opt_level, - opt_stage, - )?; - debug!("lto done"); - return Ok(()); - } - - let pm = llvm::LLVMCreatePassManager(); - llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm); - - if config.verify_llvm_ir { - let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast()); - llvm::LLVMRustAddPass(pm, pass.unwrap()); - } - - let opt_level = config - .opt_level - .map(|x| to_llvm_opt_settings(x).0) - .unwrap_or(llvm::CodeGenOptLevel::None); - with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| { - if thin { - llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm); - } else { - llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager( - b, pm, /* Internalize = */ False, /* RunInliner = */ True, - ); - } - }); - - // We always generate bitcode through ThinLTOBuffers, - // which do not support anonymous globals - if config.bitcode_needed() { - let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast()); - llvm::LLVMRustAddPass(pm, pass.unwrap()); - } - - if config.verify_llvm_ir { - let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast()); - llvm::LLVMRustAddPass(pm, pass.unwrap()); - } - - llvm::LLVMRunPassManager(pm, module.module_llvm.llmod()); - - llvm::LLVMDisposePassManager(pm); + let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; + let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); + write::llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage)?; } debug!("lto done"); Ok(()) diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index a695df840..11053a8f6 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -21,7 +21,6 @@ use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{FatalError, Handler, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; -use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath}; use rustc_session::Session; @@ -417,7 +416,7 @@ fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> { } } -pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( +pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, module: &ModuleCodegen<ModuleLlvm>, @@ -465,7 +464,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( // FIXME: NewPM doesn't provide a facility to pass custom InlineParams. // We would have to add upstream support for this first, before we can support // config.inline_threshold and our more aggressive default thresholds. - let result = llvm::LLVMRustOptimizeWithNewPassManager( + let result = llvm::LLVMRustOptimize( module.module_llvm.llmod(), &*module.module_llvm.tm, to_pass_builder_opt_level(opt_level), @@ -509,18 +508,11 @@ pub(crate) unsafe fn optimize( let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; - let tm = &*module.module_llvm.tm; let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) { - diag_handler.warn( - "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15", - ); - } - if config.emit_no_opt_bc { let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name); let out = path_to_c_string(&out); @@ -528,191 +520,24 @@ pub(crate) unsafe fn optimize( } if let Some(opt_level) = config.opt_level { - if llvm_util::should_use_new_llvm_pass_manager( - &config.new_llvm_pass_manager, - &cgcx.target_arch, - ) { - let opt_stage = match cgcx.lto { - Lto::Fat => llvm::OptStage::PreLinkFatLTO, - Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO, - _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, - _ => llvm::OptStage::PreLinkNoLTO, - }; - return optimize_with_new_llvm_pass_manager( - cgcx, - diag_handler, - module, - config, - opt_level, - opt_stage, - ); - } - - if cgcx.prof.llvm_recording_enabled() { - diag_handler - .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`"); - } - - // Create the two optimizing pass managers. These mirror what clang - // does, and are by populated by LLVM's default PassManagerBuilder. - // Each manager has a different set of passes, but they also share - // some common passes. - let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod); - let mpm = llvm::LLVMCreatePassManager(); - - { - let find_pass = |pass_name: &str| { - let pass_name = SmallCStr::new(pass_name); - llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr()) - }; - - if config.verify_llvm_ir { - // Verification should run as the very first pass. - llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap()); - } - - let mut extra_passes = Vec::new(); - let mut have_name_anon_globals_pass = false; - - for pass_name in &config.passes { - if pass_name == "lint" { - // Linting should also be performed early, directly on the generated IR. - llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap()); - continue; - } - - if let Some(pass) = find_pass(pass_name) { - extra_passes.push(pass); - } else { - diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name)); - } - - if pass_name == "name-anon-globals" { - have_name_anon_globals_pass = true; - } - } - - // Instrumentation must be inserted before optimization, - // otherwise LLVM may optimize some functions away which - // breaks llvm-cov. - // - // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp. - if config.instrument_gcov { - llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap()); - } - if config.instrument_coverage { - llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap()); - } - if config.debug_info_for_profiling { - llvm::LLVMRustAddPass(mpm, find_pass("add-discriminators").unwrap()); - } - - add_sanitizer_passes(config, &mut extra_passes); - - // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need - // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise - // we'll get errors in LLVM. - let using_thin_buffers = config.bitcode_needed(); - if !config.no_prepopulate_passes { - llvm::LLVMAddAnalysisPasses(tm, fpm); - llvm::LLVMAddAnalysisPasses(tm, mpm); - let opt_level = to_llvm_opt_settings(opt_level).0; - let prepare_for_thin_lto = cgcx.lto == Lto::Thin - || cgcx.lto == Lto::ThinLocal - || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled()); - with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| { - llvm::LLVMRustAddLastExtensionPasses( - b, - extra_passes.as_ptr(), - extra_passes.len() as size_t, - ); - llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm); - llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm); - }); - - have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; - if using_thin_buffers && !prepare_for_thin_lto { - llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap()); - have_name_anon_globals_pass = true; - } - } else { - // If we don't use the standard pipeline, directly populate the MPM - // with the extra passes. - for pass in extra_passes { - llvm::LLVMRustAddPass(mpm, pass); - } - } - - if using_thin_buffers && !have_name_anon_globals_pass { - // As described above, this will probably cause an error in LLVM - if config.no_prepopulate_passes { - diag_handler.err( - "The current compilation is going to use thin LTO buffers \ - without running LLVM's NameAnonGlobals pass. \ - This will likely cause errors in LLVM. Consider adding \ - -C passes=name-anon-globals to the compiler command line.", - ); - } else { - bug!( - "We are using thin LTO buffers without running the NameAnonGlobals pass. \ - This will likely cause errors in LLVM and should never happen." - ); - } - } - } - - diag_handler.abort_if_errors(); - - // Finally, run the actual optimization passes - { - let _timer = cgcx.prof.extra_verbose_generic_activity( - "LLVM_module_optimize_function_passes", - &*module.name, - ); - llvm::LLVMRustRunFunctionPassManager(fpm, llmod); - } - { - let _timer = cgcx.prof.extra_verbose_generic_activity( - "LLVM_module_optimize_module_passes", - &*module.name, - ); - llvm::LLVMRunPassManager(mpm, llmod); - } - - // Deallocate managers that we're now done with - llvm::LLVMDisposePassManager(fpm); - llvm::LLVMDisposePassManager(mpm); + let opt_stage = match cgcx.lto { + Lto::Fat => llvm::OptStage::PreLinkFatLTO, + Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO, + _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, + _ => llvm::OptStage::PreLinkNoLTO, + }; + return llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage); } Ok(()) } -unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) { - if config.sanitizer.contains(SanitizerSet::ADDRESS) { - let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS); - passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover)); - passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover)); - } - if config.sanitizer.contains(SanitizerSet::MEMORY) { - let track_origins = config.sanitizer_memory_track_origins as c_int; - let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY); - passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover)); - } - if config.sanitizer.contains(SanitizerSet::THREAD) { - passes.push(llvm::LLVMRustCreateThreadSanitizerPass()); - } - if config.sanitizer.contains(SanitizerSet::HWADDRESS) { - let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS); - passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover)); - } -} - pub(crate) fn link( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, mut modules: Vec<ModuleCodegen<ModuleLlvm>>, ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { use super::lto::{Linker, ModuleBuffer}; - // Sort the modules by name to ensure to ensure deterministic behavior. + // Sort the modules by name to ensure deterministic behavior. modules.sort_by(|a, b| a.name.cmp(&b.name)); let (first, elements) = modules.split_first().expect("Bug! modules must contain at least one module."); @@ -1072,72 +897,6 @@ unsafe fn embed_bitcode( } } -pub unsafe fn with_llvm_pmb( - llmod: &llvm::Module, - config: &ModuleConfig, - opt_level: llvm::CodeGenOptLevel, - prepare_for_thin_lto: bool, - f: &mut dyn FnMut(&llvm::PassManagerBuilder), -) { - use std::ptr; - - // Create the PassManagerBuilder for LLVM. We configure it with - // reasonable defaults and prepare it to actually populate the pass - // manager. - let builder = llvm::LLVMRustPassManagerBuilderCreate(); - let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1); - let inline_threshold = config.inline_threshold; - let pgo_gen_path = get_pgo_gen_path(config); - let pgo_use_path = get_pgo_use_path(config); - let pgo_sample_use_path = get_pgo_sample_use_path(config); - - llvm::LLVMRustConfigurePassManagerBuilder( - builder, - opt_level, - config.merge_functions, - config.vectorize_slp, - config.vectorize_loop, - prepare_for_thin_lto, - pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), - pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), - pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), - opt_size as c_int, - ); - - llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins); - - // Here we match what clang does (kinda). For O0 we only inline - // always-inline functions (but don't add lifetime intrinsics), at O1 we - // inline with lifetime intrinsics, and O2+ we add an inliner with a - // thresholds copied from clang. - match (opt_level, opt_size, inline_threshold) { - (.., Some(t)) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t); - } - (llvm::CodeGenOptLevel::Aggressive, ..) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275); - } - (_, llvm::CodeGenOptSizeDefault, _) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75); - } - (_, llvm::CodeGenOptSizeAggressive, _) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25); - } - (llvm::CodeGenOptLevel::None, ..) => { - llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers); - } - (llvm::CodeGenOptLevel::Less, ..) => { - llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers); - } - (llvm::CodeGenOptLevel::Default, ..) => { - llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225); - } - } - - f(builder); - llvm::LLVMRustPassManagerBuilderDispose(builder); -} - // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`. // This is required to satisfy `dllimport` references to static data in .rlibs // when using MSVC linker. We do this only for data, as linker can fix up diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 86f92dc02..5b2bbdb4b 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -19,6 +19,8 @@ use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; +use cstr::cstr; + use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; @@ -107,11 +109,14 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen } // Create the llvm.used and llvm.compiler.used variables. - if !cx.used_statics().borrow().is_empty() { - cx.create_used_variable() + if !cx.used_statics.borrow().is_empty() { + cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow()); } - if !cx.compiler_used_statics().borrow().is_empty() { - cx.create_compiler_used_variable() + if !cx.compiler_used_statics.borrow().is_empty() { + cx.create_used_variable_impl( + cstr!("llvm.compiler.used"), + &*cx.compiler_used_statics.borrow(), + ); } // Run replace-all-uses-with for statics that need it. This must diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 63b63c6a1..fca43a0d8 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1,14 +1,14 @@ +use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::common::Funclet; use crate::context::CodegenCx; -use crate::llvm::{self, BasicBlock, False}; -use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope}; +use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use cstr::cstr; use libc::{c_char, c_uint}; -use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind}; +use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; @@ -215,6 +215,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn invoke( &mut self, llty: &'ll Type, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, @@ -227,7 +228,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { + let invoke = unsafe { llvm::LLVMRustBuildInvoke( self.llbuilder, llty, @@ -239,7 +240,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { bundle, UNNAMED, ) + }; + if let Some(fn_abi) = fn_abi { + fn_abi.apply_attrs_callsite(self, invoke); } + invoke } fn unreachable(&mut self) { @@ -406,20 +411,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - bx.dynamic_alloca(ty, align) - } - - fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { unsafe { - let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); + let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } - fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value { + fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value { unsafe { - let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); + let alloca = + llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } @@ -1042,15 +1044,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; unsafe { - llvm::LLVMRustBuildAtomicCmpXchg( + let value = llvm::LLVMBuildAtomicCmpXchg( self.llbuilder, dst, cmp, src, AtomicOrdering::from_generic(order), AtomicOrdering::from_generic(failure_order), - weak, - ) + llvm::False, // SingleThreaded + ); + llvm::LLVMSetWeak(value, weak); + value } } fn atomic_rmw( @@ -1067,7 +1071,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { dst, src, AtomicOrdering::from_generic(order), - False, + llvm::False, // SingleThreaded ) } } @@ -1075,13 +1079,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn atomic_fence( &mut self, order: rustc_codegen_ssa::common::AtomicOrdering, - scope: rustc_codegen_ssa::common::SynchronizationScope, + scope: SynchronizationScope, ) { + let single_threaded = match scope { + SynchronizationScope::SingleThread => llvm::True, + SynchronizationScope::CrossThread => llvm::False, + }; unsafe { - llvm::LLVMRustBuildAtomicFence( + llvm::LLVMBuildFence( self.llbuilder, AtomicOrdering::from_generic(order), - SynchronizationScope::from_generic(scope), + single_threaded, + UNNAMED, ); } } @@ -1139,6 +1148,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn call( &mut self, llty: &'ll Type, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, @@ -1149,7 +1159,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); - unsafe { + let call = unsafe { llvm::LLVMRustBuildCall( self.llbuilder, llty, @@ -1158,7 +1168,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { args.len() as c_uint, bundle, ) + }; + if let Some(fn_abi) = fn_abi { + fn_abi.apply_attrs_callsite(self, call); } + call } fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { @@ -1391,7 +1405,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); - self.call(ty, f, args, None) + self.call(ty, None, f, args, None) } fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { @@ -1453,7 +1467,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) }; let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); - self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None) + self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None) } pub(crate) fn landing_pad( diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index b83c1e8f0..6f0d1b7ce 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -179,7 +179,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> // MinGW: For backward compatibility we rely on the linker to decide whether it // should use dllimport for functions. if cx.use_dll_storage_attrs - && tcx.is_dllimport_foreign_item(instance_def_id) + && let Some(library) = tcx.native_library(instance_def_id) + && library.kind.is_dllimport() && !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc") { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index a559f7f3d..dd3c43ba5 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -332,7 +332,10 @@ impl<'ll> CodegenCx<'ll, '_> { } } - if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + if self.use_dll_storage_attrs + && let Some(library) = self.tcx.native_library(def_id) + && library.kind.is_dllimport() + { // For foreign (native) libs we know the exact storage type to use. unsafe { llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); @@ -552,7 +555,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage // on other targets, in particular MachO targets have *their* static constructor // lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However, - // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_typeck`, + // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`, // so we don't need to take care of it here. self.add_compiler_used_global(g); } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 67ffc7cb9..79ddfd884 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -154,6 +154,11 @@ pub unsafe fn create_module<'ll>( target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", ""); } } + if llvm_version < (16, 0, 0) { + if sess.target.arch == "s390x" { + target_data_layout = target_data_layout.replace("-v128:64", ""); + } + } // Ensure the data-layout values hardcoded remain the defaults. if sess.target.is_builtin { @@ -453,7 +458,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { self.coverage_cx.as_ref() } - fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { + pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { let section = cstr!("llvm.metadata"); let array = self.const_array(self.type_ptr_to(self.type_i8()), values); @@ -551,14 +556,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.codegen_unit } - fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> { - &self.used_statics - } - - fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> { - &self.compiler_used_statics - } - fn set_frame_pointer_type(&self, llfn: &'ll Value) { if let Some(attr) = attributes::frame_pointer_type_attr(self) { attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]); @@ -572,17 +569,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs); } - fn create_used_variable(&self) { - self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow()); - } - - fn create_compiler_used_variable(&self) { - self.create_used_variable_impl( - cstr!("llvm.compiler.used"), - &*self.compiler_used_statics.borrow(), - ); - } - fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { if self.get_declared_value("main").is_none() { Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type)) diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 0d1df6fb1..433f04320 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -129,7 +129,7 @@ impl CoverageMapGenerator { // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5) // requires setting the first filename to the compilation directory. // Since rustc generates coverage maps with relative paths, the - // compilation directory can be combined with the the relative paths + // compilation directory can be combined with the relative paths // to get absolute paths, if needed. let working_dir = tcx .sess diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 0f663a267..f79ef1172 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -32,6 +32,7 @@ fn declare_raw_fn<'ll>( name: &str, callconv: llvm::CallConv, unnamed: llvm::UnnamedAddr, + visibility: llvm::Visibility, ty: &'ll Type, ) -> &'ll Value { debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); @@ -41,6 +42,7 @@ fn declare_raw_fn<'ll>( llvm::SetFunctionCallConv(llfn, callconv); llvm::SetUnnamedAddress(llfn, unnamed); + llvm::set_visibility(llfn, visibility); let mut attrs = SmallVec::<[_; 4]>::new(); @@ -78,7 +80,14 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { unnamed: llvm::UnnamedAddr, fn_type: &'ll Type, ) -> &'ll Value { - declare_raw_fn(self, name, llvm::CCallConv, unnamed, fn_type) + // Declare C ABI functions with the visibility used by C by default. + let visibility = if self.tcx.sess.target.default_hidden_visibility { + llvm::Visibility::Hidden + } else { + llvm::Visibility::Default + }; + + declare_raw_fn(self, name, llvm::CCallConv, unnamed, visibility, fn_type) } /// Declare a Rust function. @@ -95,6 +104,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { name, fn_abi.llvm_cconv(), llvm::UnnamedAddr::Global, + llvm::Visibility::Default, fn_abi.llvm_type(self), ); fn_abi.apply_attrs_llfn(self, llfn); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index a640de42a..825011941 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -108,6 +108,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let (simple_ty, simple_fn) = simple.unwrap(); self.call( simple_ty, + None, simple_fn, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, @@ -435,7 +436,7 @@ fn try_intrinsic<'ll>( ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.call(try_func_ty, try_func, &[data], None); + bx.call(try_func_ty, None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx().data_layout.i32_align.abi; @@ -534,7 +535,7 @@ fn codegen_msvc_try<'ll>( let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(bx.type_i8p(), ptr_align); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None); + bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); bx.ret(bx.const_i32(0)); @@ -578,7 +579,7 @@ fn codegen_msvc_try<'ll>( let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); let ptr = bx.load(bx.type_i8p(), slot, ptr_align); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet)); + bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". @@ -586,7 +587,7 @@ fn codegen_msvc_try<'ll>( let flags = bx.const_i32(64); let null = bx.const_null(bx.type_i8p()); let funclet = bx.catch_pad(cs, &[null, flags, null]); - bx.call(catch_ty, catch_func, &[data, null], Some(&funclet)); + bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet)); bx.catch_ret(&funclet, caught); bx.switch_to_block(caught); @@ -595,7 +596,7 @@ fn codegen_msvc_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -638,7 +639,7 @@ fn codegen_gnu_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.invoke(try_func_ty, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -656,13 +657,13 @@ fn codegen_gnu_try<'ll>( bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, catch_func, &[data, ptr], None); + bx.call(catch_ty, None, catch_func, &[data, ptr], None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -702,7 +703,7 @@ fn codegen_emcc_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); - bx.invoke(try_func_ty, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -741,13 +742,13 @@ fn codegen_emcc_try<'ll>( let catch_data = bx.bitcast(catch_data, bx.type_i8p()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); - bx.call(catch_ty, catch_func, &[data, catch_data], None); + bx.call(catch_ty, None, catch_func, &[data, catch_data], None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -1217,8 +1218,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty); - let c = - bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); + let c = bx.call( + fn_ty, + None, + f, + &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), + None, + ); Ok(c) } @@ -1417,8 +1423,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( llvm_elem_vec_ty, ); let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = - bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); + let v = bx.call( + fn_ty, + None, + f, + &[args[1].immediate(), alignment, mask, args[0].immediate()], + None, + ); return Ok(v); } @@ -1543,8 +1554,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t); let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = - bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); + let v = bx.call( + fn_ty, + None, + f, + &[args[0].immediate(), args[1].immediate(), alignment, mask], + None, + ); return Ok(v); } @@ -1992,7 +2008,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = bx.call(fn_ty, f, &[lhs, rhs], None); + let v = bx.call(fn_ty, None, f, &[lhs, rhs], None); return Ok(v); } diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 42c65e04e..89c7e51d0 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -7,7 +7,6 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![feature(hash_raw_entry)] #![feature(let_chains)] -#![cfg_attr(bootstrap, feature(let_else))] #![feature(extern_types)] #![feature(once_cell)] #![feature(iter_intersperse)] @@ -132,12 +131,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend { ) -> TargetMachineFactoryFn<Self> { back::write::target_machine_factory(sess, optlvl, target_features) } - fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { - llvm_util::target_cpu(sess) - } - fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> { - llvm_util::tune_cpu(sess) - } fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T> where @@ -171,7 +164,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend { impl WriteBackendMethods for LlvmCodegenBackend { type Module = ModuleLlvm; type ModuleBuffer = back::lto::ModuleBuffer; - type Context = llvm::Context; type TargetMachine = &'static mut llvm::TargetMachine; type ThinData = back::lto::ThinData; type ThinBuffer = back::lto::ThinBuffer; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index ce27dc5a5..42cb694c0 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -400,27 +400,6 @@ impl AtomicOrdering { } } -/// LLVMRustSynchronizationScope -#[derive(Copy, Clone)] -#[repr(C)] -pub enum SynchronizationScope { - SingleThread, - CrossThread, -} - -impl SynchronizationScope { - pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self { - match sc { - rustc_codegen_ssa::common::SynchronizationScope::SingleThread => { - SynchronizationScope::SingleThread - } - rustc_codegen_ssa::common::SynchronizationScope::CrossThread => { - SynchronizationScope::CrossThread - } - } - } -} - /// LLVMRustFileType #[derive(Copy, Clone)] #[repr(C)] @@ -1782,16 +1761,18 @@ extern "C" { Order: AtomicOrdering, ) -> &'a Value; - pub fn LLVMRustBuildAtomicCmpXchg<'a>( + pub fn LLVMBuildAtomicCmpXchg<'a>( B: &Builder<'a>, LHS: &'a Value, CMP: &'a Value, RHS: &'a Value, Order: AtomicOrdering, FailureOrder: AtomicOrdering, - Weak: Bool, + SingleThreaded: Bool, ) -> &'a Value; + pub fn LLVMSetWeak(CmpXchgInst: &Value, IsWeak: Bool); + pub fn LLVMBuildAtomicRMW<'a>( B: &Builder<'a>, Op: AtomicRmwBinOp, @@ -1801,27 +1782,19 @@ extern "C" { SingleThreaded: Bool, ) -> &'a Value; - pub fn LLVMRustBuildAtomicFence( - B: &Builder<'_>, + pub fn LLVMBuildFence<'a>( + B: &Builder<'a>, Order: AtomicOrdering, - Scope: SynchronizationScope, - ); + SingleThreaded: Bool, + Name: *const c_char, + ) -> &'a Value; /// Writes a module to the specified path. Returns 0 on success. pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int; - /// Creates a pass manager. + /// Creates a legacy pass manager -- only used for final codegen. pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>; - /// Creates a function-by-function pass manager - pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> &mut PassManager<'_>; - - /// Disposes a pass manager. - pub fn LLVMDisposePassManager<'a>(PM: &'a mut PassManager<'a>); - - /// Runs a pass manager on a module. - pub fn LLVMRunPassManager<'a>(PM: &PassManager<'a>, M: &'a Module) -> Bool; - pub fn LLVMInitializePasses(); pub fn LLVMTimeTraceProfilerInitialize(); @@ -1832,32 +1805,6 @@ extern "C" { pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>); - pub fn LLVMRustPassManagerBuilderCreate() -> &'static mut PassManagerBuilder; - pub fn LLVMRustPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder); - pub fn LLVMRustPassManagerBuilderUseInlinerWithThreshold( - PMB: &PassManagerBuilder, - threshold: c_uint, - ); - pub fn LLVMRustPassManagerBuilderPopulateModulePassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - ); - - pub fn LLVMRustPassManagerBuilderPopulateFunctionPassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - ); - pub fn LLVMRustPassManagerBuilderPopulateLTOPassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - Internalize: Bool, - RunInliner: Bool, - ); - pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( - PMB: &PassManagerBuilder, - PM: &PassManager<'_>, - ); - pub fn LLVMGetHostCPUFeatures() -> *mut c_char; pub fn LLVMDisposeMessage(message: *mut c_char); @@ -2262,22 +2209,6 @@ extern "C" { pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; - pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>; - pub fn LLVMRustCreateAddressSanitizerFunctionPass(Recover: bool) -> &'static mut Pass; - pub fn LLVMRustCreateModuleAddressSanitizerPass(Recover: bool) -> &'static mut Pass; - pub fn LLVMRustCreateMemorySanitizerPass( - TrackOrigins: c_int, - Recover: bool, - ) -> &'static mut Pass; - pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass; - pub fn LLVMRustCreateHWAddressSanitizerPass(Recover: bool) -> &'static mut Pass; - pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass); - pub fn LLVMRustAddLastExtensionPasses( - PMB: &PassManagerBuilder, - Passes: *const &'static mut Pass, - NumPasses: size_t, - ); - pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool; pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine); @@ -2311,29 +2242,11 @@ extern "C" { SplitDwarfFile: *const c_char, ) -> Option<&'static mut TargetMachine>; pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine); - pub fn LLVMRustAddBuilderLibraryInfo<'a>( - PMB: &'a PassManagerBuilder, - M: &'a Module, - DisableSimplifyLibCalls: bool, - ); - pub fn LLVMRustConfigurePassManagerBuilder( - PMB: &PassManagerBuilder, - OptLevel: CodeGenOptLevel, - MergeFunctions: bool, - SLPVectorize: bool, - LoopVectorize: bool, - PrepareForThinLTO: bool, - PGOGenPath: *const c_char, - PGOUsePath: *const c_char, - PGOSampleUsePath: *const c_char, - SizeLevel: c_int, - ); pub fn LLVMRustAddLibraryInfo<'a>( PM: &PassManager<'a>, M: &'a Module, DisableSimplifyLibCalls: bool, ); - pub fn LLVMRustRunFunctionPassManager<'a>(PM: &PassManager<'a>, M: &'a Module); pub fn LLVMRustWriteOutputFile<'a>( T: &'a TargetMachine, PM: &PassManager<'a>, @@ -2342,7 +2255,7 @@ extern "C" { DwoOutput: *const c_char, FileType: FileType, ) -> LLVMRustResult; - pub fn LLVMRustOptimizeWithNewPassManager<'a>( + pub fn LLVMRustOptimize<'a>( M: &'a Module, TM: &'a TargetMachine, OptLevel: PassBuilderOptLevel, @@ -2380,7 +2293,6 @@ extern "C" { pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); pub fn LLVMRustPrintPasses(); pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char); - pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool); pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t); pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>; diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 60707a1c3..2fd58567c 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -1,7 +1,6 @@ use crate::back::write::create_informational_target_machine; -use crate::{llvm, llvm_util}; +use crate::llvm; use libc::c_int; -use libloading::Library; use rustc_codegen_ssa::target_features::{ supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES, }; @@ -16,7 +15,6 @@ use rustc_target::spec::{MergeFunctions, PanicStrategy}; use smallvec::{smallvec, SmallVec}; use std::ffi::{CStr, CString}; -use std::mem; use std::path::Path; use std::ptr; use std::slice; @@ -120,22 +118,6 @@ unsafe fn configure_llvm(sess: &Session) { llvm::LLVMInitializePasses(); - // Use the legacy plugin registration if we don't use the new pass manager - if !should_use_new_llvm_pass_manager( - &sess.opts.unstable_opts.new_llvm_pass_manager, - &sess.target.arch, - ) { - // Register LLVM plugins by loading them into the compiler process. - for plugin in &sess.opts.unstable_opts.llvm_plugins { - let lib = Library::new(plugin).unwrap_or_else(|e| bug!("couldn't load plugin: {}", e)); - debug!("LLVM plugin loaded successfully {:?} ({})", lib, plugin); - - // Intentionally leak the dynamic library. We can't ever unload it - // since the library can make things that will live arbitrarily long. - mem::forget(lib); - } - } - rustc_llvm::initialize_available_targets(); llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr()); @@ -539,19 +521,3 @@ pub fn tune_cpu(sess: &Session) -> Option<&str> { let name = sess.opts.unstable_opts.tune_cpu.as_ref()?; Some(handle_native(name)) } - -pub(crate) fn should_use_new_llvm_pass_manager(user_opt: &Option<bool>, target_arch: &str) -> bool { - // The new pass manager is enabled by default for LLVM >= 13. - // This matches Clang, which also enables it since Clang 13. - - // Since LLVM 15, the legacy pass manager is no longer supported. - if llvm_util::get_version() >= (15, 0, 0) { - return true; - } - - // There are some perf issues with the new pass manager when targeting - // s390x with LLVM 13, so enable the new pass manager only with LLVM 14. - // See https://github.com/rust-lang/rust/issues/89609. - let min_version = if target_arch == "s390x" { 14 } else { 13 }; - user_opt.unwrap_or_else(|| llvm_util::get_version() >= (min_version, 0, 0)) -} |