summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm/src/back
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/back')
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs383
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs936
-rw-r--r--compiler/rustc_codegen_llvm/src/back/profiling.rs58
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs1212
4 files changed, 2589 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
new file mode 100644
index 000000000..27039cda2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -0,0 +1,383 @@
+//! A helper class for dealing with static archives
+
+use std::env;
+use std::ffi::{CStr, CString, OsString};
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use std::str;
+
+use crate::llvm::archive_ro::{ArchiveRO, Child};
+use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::cstore::{DllCallingConvention, DllImport};
+use rustc_session::Session;
+
+/// Helper for adding many files to an archive.
+#[must_use = "must call build() to finish building the archive"]
+pub struct LlvmArchiveBuilder<'a> {
+ sess: &'a Session,
+ additions: Vec<Addition>,
+}
+
+enum Addition {
+ File { path: PathBuf, name_in_archive: String },
+ Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> },
+}
+
+impl Addition {
+ fn path(&self) -> &Path {
+ match self {
+ Addition::File { path, .. } | Addition::Archive { path, .. } => path,
+ }
+ }
+}
+
+fn is_relevant_child(c: &Child<'_>) -> bool {
+ match c.name() {
+ Some(name) => !name.contains("SYMDEF"),
+ None => false,
+ }
+}
+
+/// Map machine type strings to values of LLVM's MachineTypes enum.
+fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
+ match cpu {
+ "x86_64" => LLVMMachineType::AMD64,
+ "x86" => LLVMMachineType::I386,
+ "aarch64" => LLVMMachineType::ARM64,
+ "arm" => LLVMMachineType::ARM,
+ _ => panic!("unsupported cpu type {}", cpu),
+ }
+}
+
+impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
+ fn add_archive(
+ &mut self,
+ archive: &Path,
+ skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> io::Result<()> {
+ let archive_ro = match ArchiveRO::open(archive) {
+ Ok(ar) => ar,
+ Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+ };
+ if self.additions.iter().any(|ar| ar.path() == archive) {
+ return Ok(());
+ }
+ self.additions.push(Addition::Archive {
+ path: archive.to_path_buf(),
+ archive: archive_ro,
+ skip: Box::new(skip),
+ });
+ Ok(())
+ }
+
+ /// Adds an arbitrary file to this archive
+ fn add_file(&mut self, file: &Path) {
+ let name = file.file_name().unwrap().to_str().unwrap();
+ self.additions
+ .push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() });
+ }
+
+ /// Combine the provided files, rlibs, and native libraries into a single
+ /// `Archive`.
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ match self.build_with_llvm(output) {
+ Ok(any_members) => any_members,
+ Err(e) => self.sess.fatal(&format!("failed to build archive: {}", e)),
+ }
+ }
+}
+
+pub struct LlvmArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ sess: &Session,
+ lib_name: &str,
+ dll_imports: &[DllImport],
+ tmpdir: &Path,
+ ) -> PathBuf {
+ let output_path = {
+ let mut output_path: PathBuf = tmpdir.to_path_buf();
+ output_path.push(format!("{}_imports", lib_name));
+ output_path.with_extension("lib")
+ };
+
+ let target = &sess.target;
+ let mingw_gnu_toolchain = target.vendor == "pc"
+ && target.os == "windows"
+ && target.env == "gnu"
+ && target.abi.is_empty();
+
+ let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports
+ .iter()
+ .map(|import: &DllImport| {
+ if sess.target.arch == "x86" {
+ (
+ LlvmArchiveBuilder::i686_decorated_name(import, mingw_gnu_toolchain),
+ import.ordinal,
+ )
+ } else {
+ (import.name.to_string(), import.ordinal)
+ }
+ })
+ .collect();
+
+ if mingw_gnu_toolchain {
+ // The binutils linker used on -windows-gnu targets cannot read the import
+ // libraries generated by LLVM: in our attempts, the linker produced an .EXE
+ // that loaded but crashed with an AV upon calling one of the imported
+ // functions. Therefore, use binutils to create the import library instead,
+ // by writing a .DEF file to the temp dir and calling binutils's dlltool.
+ let def_file_path = tmpdir.join(format!("{}_imports", lib_name)).with_extension("def");
+
+ let def_file_content = format!(
+ "EXPORTS\n{}",
+ import_name_and_ordinal_vector
+ .into_iter()
+ .map(|(name, ordinal)| {
+ match ordinal {
+ Some(n) => format!("{} @{} NONAME", name, n),
+ None => name,
+ }
+ })
+ .collect::<Vec<String>>()
+ .join("\n")
+ );
+
+ match std::fs::write(&def_file_path, def_file_content) {
+ Ok(_) => {}
+ Err(e) => {
+ sess.fatal(&format!("Error writing .DEF file: {}", e));
+ }
+ };
+
+ let dlltool = find_binutils_dlltool(sess);
+ let result = std::process::Command::new(dlltool)
+ .args([
+ "-d",
+ def_file_path.to_str().unwrap(),
+ "-D",
+ lib_name,
+ "-l",
+ output_path.to_str().unwrap(),
+ ])
+ .output();
+
+ match result {
+ Err(e) => {
+ sess.fatal(&format!("Error calling dlltool: {}", e));
+ }
+ Ok(output) if !output.status.success() => sess.fatal(&format!(
+ "Dlltool could not create import library: {}\n{}",
+ String::from_utf8_lossy(&output.stdout),
+ String::from_utf8_lossy(&output.stderr)
+ )),
+ _ => {}
+ }
+ } else {
+ // we've checked for \0 characters in the library name already
+ let dll_name_z = CString::new(lib_name).unwrap();
+
+ let output_path_z = rustc_fs_util::path_to_c_string(&output_path);
+
+ tracing::trace!("invoking LLVMRustWriteImportLibrary");
+ tracing::trace!(" dll_name {:#?}", dll_name_z);
+ tracing::trace!(" output_path {}", output_path.display());
+ tracing::trace!(
+ " import names: {}",
+ dll_imports
+ .iter()
+ .map(|import| import.name.to_string())
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+
+ // All import names are Rust identifiers and therefore cannot contain \0 characters.
+ // FIXME: when support for #[link_name] is implemented, ensure that the import names
+ // still don't contain any \0 characters. Also need to check that the names don't
+ // contain substrings like " @" or "NONAME" that are keywords or otherwise reserved
+ // in definition files.
+ let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> =
+ import_name_and_ordinal_vector
+ .into_iter()
+ .map(|(name, ordinal)| (CString::new(name).unwrap(), ordinal))
+ .collect();
+
+ let ffi_exports: Vec<LLVMRustCOFFShortExport> = cstring_import_name_and_ordinal_vector
+ .iter()
+ .map(|(name_z, ordinal)| LLVMRustCOFFShortExport::new(name_z.as_ptr(), *ordinal))
+ .collect();
+ let result = unsafe {
+ crate::llvm::LLVMRustWriteImportLibrary(
+ dll_name_z.as_ptr(),
+ output_path_z.as_ptr(),
+ ffi_exports.as_ptr(),
+ ffi_exports.len(),
+ llvm_machine_type(&sess.target.arch) as u16,
+ !sess.target.is_like_msvc,
+ )
+ };
+
+ if result == crate::llvm::LLVMRustResult::Failure {
+ sess.fatal(&format!(
+ "Error creating import library for {}: {}",
+ lib_name,
+ llvm::last_error().unwrap_or("unknown LLVM error".to_string())
+ ));
+ }
+ };
+
+ output_path
+ }
+}
+
+impl<'a> LlvmArchiveBuilder<'a> {
+ fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> {
+ let kind = &*self.sess.target.archive_format;
+ let kind = kind.parse::<ArchiveKind>().map_err(|_| kind).unwrap_or_else(|kind| {
+ self.sess.fatal(&format!("Don't know how to build archive of type: {}", kind))
+ });
+
+ let mut additions = mem::take(&mut self.additions);
+ let mut strings = Vec::new();
+ let mut members = Vec::new();
+
+ let dst = CString::new(output.to_str().unwrap())?;
+
+ unsafe {
+ for addition in &mut additions {
+ match addition {
+ Addition::File { path, name_in_archive } => {
+ let path = CString::new(path.to_str().unwrap())?;
+ let name = CString::new(name_in_archive.clone())?;
+ members.push(llvm::LLVMRustArchiveMemberNew(
+ path.as_ptr(),
+ name.as_ptr(),
+ None,
+ ));
+ strings.push(path);
+ strings.push(name);
+ }
+ Addition::Archive { archive, skip, .. } => {
+ for child in archive.iter() {
+ let child = child.map_err(string_to_io_error)?;
+ if !is_relevant_child(&child) {
+ continue;
+ }
+ let child_name = child.name().unwrap();
+ if skip(child_name) {
+ continue;
+ }
+
+ // It appears that LLVM's archive writer is a little
+ // buggy if the name we pass down isn't just the
+ // filename component, so chop that off here and
+ // pass it in.
+ //
+ // See LLVM bug 25877 for more info.
+ let child_name =
+ Path::new(child_name).file_name().unwrap().to_str().unwrap();
+ let name = CString::new(child_name)?;
+ let m = llvm::LLVMRustArchiveMemberNew(
+ ptr::null(),
+ name.as_ptr(),
+ Some(child.raw),
+ );
+ members.push(m);
+ strings.push(name);
+ }
+ }
+ }
+ }
+
+ let r = llvm::LLVMRustWriteArchive(
+ dst.as_ptr(),
+ members.len() as libc::size_t,
+ members.as_ptr() as *const &_,
+ true,
+ kind,
+ );
+ let ret = if r.into_result().is_err() {
+ let err = llvm::LLVMRustGetLastError();
+ let msg = if err.is_null() {
+ "failed to write archive".into()
+ } else {
+ String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+ };
+ Err(io::Error::new(io::ErrorKind::Other, msg))
+ } else {
+ Ok(!members.is_empty())
+ };
+ for member in members {
+ llvm::LLVMRustArchiveMemberFree(member);
+ }
+ ret
+ }
+ }
+
+ fn i686_decorated_name(import: &DllImport, mingw: bool) -> String {
+ let name = import.name;
+ let prefix = if mingw { "" } else { "_" };
+
+ match import.calling_convention {
+ DllCallingConvention::C => format!("{}{}", prefix, name),
+ DllCallingConvention::Stdcall(arg_list_size) => {
+ format!("{}{}@{}", prefix, name, arg_list_size)
+ }
+ DllCallingConvention::Fastcall(arg_list_size) => format!("@{}@{}", name, arg_list_size),
+ DllCallingConvention::Vectorcall(arg_list_size) => {
+ format!("{}@@{}", name, arg_list_size)
+ }
+ }
+ }
+}
+
+fn string_to_io_error(s: String) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+}
+
+fn find_binutils_dlltool(sess: &Session) -> OsString {
+ assert!(sess.target.options.is_like_windows && !sess.target.options.is_like_msvc);
+ if let Some(dlltool_path) = &sess.opts.unstable_opts.dlltool {
+ return dlltool_path.clone().into_os_string();
+ }
+
+ let mut tool_name: OsString = if sess.host.arch != sess.target.arch {
+ // We are cross-compiling, so we need the tool with the prefix matching our target
+ if sess.target.arch == "x86" {
+ "i686-w64-mingw32-dlltool"
+ } else {
+ "x86_64-w64-mingw32-dlltool"
+ }
+ } else {
+ // We are not cross-compiling, so we just want `dlltool`
+ "dlltool"
+ }
+ .into();
+
+ if sess.host.options.is_like_windows {
+ // If we're compiling on Windows, add the .exe suffix
+ tool_name.push(".exe");
+ }
+
+ // NOTE: it's not clear how useful it is to explicitly search PATH.
+ for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
+ let full_path = dir.join(&tool_name);
+ if full_path.is_file() {
+ return full_path.into_os_string();
+ }
+ }
+
+ // The user didn't specify the location of the dlltool binary, and we weren't able
+ // to find the appropriate one on the PATH. Just return the name of the tool
+ // and let the invocation fail with a hopefully useful error message.
+ tool_name
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
new file mode 100644
index 000000000..3731c6bcf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -0,0 +1,936 @@
+use crate::back::write::{
+ self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
+};
+use crate::llvm::archive_ro::ArchiveRO;
+use crate::llvm::{self, build_string, False, True};
+use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{FatalError, Handler};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, CrateType, Lto};
+use tracing::{debug, info};
+
+use std::ffi::{CStr, CString};
+use std::fs::File;
+use std::io;
+use std::iter;
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::sync::Arc;
+
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+ match crate_type {
+ CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true,
+ CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false,
+ }
+}
+
+fn prepare_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
+ let export_threshold = match cgcx.lto {
+ // We're just doing LTO for our one crate
+ Lto::ThinLocal => SymbolExportLevel::Rust,
+
+ // We're doing LTO for the entire crate graph
+ Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+ Lto::No => panic!("didn't request LTO but we're doing LTO"),
+ };
+
+ let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+ if info.level.is_below_threshold(export_threshold) || info.used {
+ Some(CString::new(name.as_str()).unwrap())
+ } else {
+ None
+ }
+ };
+ let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ let mut symbols_below_threshold = {
+ let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
+ };
+ info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+ // If we're performing LTO for the entire crate graph, then for each of our
+ // upstream dependencies, find the corresponding rlib and load the bitcode
+ // from the archive.
+ //
+ // We save off all the bytecode and LLVM module ids for later processing
+ // with either fat or thin LTO
+ let mut upstream_modules = Vec::new();
+ if cgcx.lto != Lto::ThinLocal {
+ if cgcx.opts.cg.prefer_dynamic {
+ diag_handler
+ .struct_err("cannot prefer dynamic linking when performing LTO")
+ .note(
+ "only 'staticlib', 'bin', and 'cdylib' outputs are \
+ supported with LTO",
+ )
+ .emit();
+ return Err(FatalError);
+ }
+
+ // Make sure we actually can run LTO
+ for crate_type in cgcx.crate_types.iter() {
+ if !crate_type_allows_lto(*crate_type) {
+ let e = diag_handler.fatal(
+ "lto can only be run for executables, cdylibs and \
+ static library outputs",
+ );
+ return Err(e);
+ }
+ }
+
+ for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+ let exported_symbols =
+ cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ {
+ let _timer =
+ cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ symbols_below_threshold
+ .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+ }
+
+ let archive = ArchiveRO::open(path).expect("wanted an rlib");
+ let obj_files = archive
+ .iter()
+ .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c))))
+ .filter(|&(name, _)| looks_like_rust_object_file(name));
+ for (name, child) in obj_files {
+ info!("adding bitcode from {}", name);
+ match get_bitcode_slice_from_object_data(child.data()) {
+ Ok(data) => {
+ let module = SerializedModule::FromRlib(data.to_vec());
+ upstream_modules.push((module, CString::new(name).unwrap()));
+ }
+ Err(msg) => return Err(diag_handler.fatal(&msg)),
+ }
+ }
+ }
+ }
+
+ Ok((symbols_below_threshold, upstream_modules))
+}
+
+fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
+ let mut len = 0;
+ let data =
+ unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+ if !data.is_null() {
+ assert!(len != 0);
+ let bc = unsafe { slice::from_raw_parts(data, len) };
+
+ // `bc` must be a sub-slice of `obj`.
+ assert!(obj.as_ptr() <= bc.as_ptr());
+ assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
+
+ Ok(bc)
+ } else {
+ assert!(len == 0);
+ let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
+ Err(format!("failed to get bitcode from object file for LTO ({})", msg))
+ }
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ fat_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ cached_modules,
+ upstream_modules,
+ &symbols_below_threshold,
+ )
+}
+
+/// Performs thin LTO by performing necessary global analysis and returning two
+/// lists, one of the modules that need optimization and another for modules that
+/// can simply be copied over from the incr. comp. cache.
+pub(crate) fn run_thin(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<(String, ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ if cgcx.opts.cg.linker_plugin_lto.enabled() {
+ unreachable!(
+ "We should never reach this case if the LTO step \
+ is deferred to the linker"
+ );
+ }
+ thin_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ upstream_modules,
+ cached_modules,
+ &symbols_below_threshold,
+ )
+}
+
+pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
+ let name = module.name.clone();
+ let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
+ (name, buffer)
+}
+
+fn fat_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
+ info!("going for a fat lto");
+
+ // Sort out all our lists of incoming modules into two lists.
+ //
+ // * `serialized_modules` (also and argument to this function) contains all
+ // modules that are serialized in-memory.
+ // * `in_memory` contains modules which are already parsed and in-memory,
+ // such as from multi-CGU builds.
+ //
+ // All of `cached_modules` (cached from previous incremental builds) can
+ // immediately go onto the `serialized_modules` modules list and then we can
+ // split the `modules` array into these two lists.
+ let mut in_memory = Vec::new();
+ serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+ info!("pushing cached module {:?}", wp.cgu_name);
+ (buffer, CString::new(wp.cgu_name).unwrap())
+ }));
+ for module in modules {
+ match module {
+ FatLTOInput::InMemory(m) => in_memory.push(m),
+ FatLTOInput::Serialized { name, buffer } => {
+ info!("pushing serialized module {:?}", name);
+ let buffer = SerializedModule::Local(buffer);
+ serialized_modules.push((buffer, CString::new(name).unwrap()));
+ }
+ }
+ }
+
+ // Find the "costliest" module and merge everything into that codegen unit.
+ // All the other modules will be serialized and reparsed into the new
+ // context, so this hopefully avoids serializing and parsing the largest
+ // codegen unit.
+ //
+ // Additionally use a regular module as the base here to ensure that various
+ // file copy operations in the backend work correctly. The only other kind
+ // of module here should be an allocator one, and if your crate is smaller
+ // than the allocator module then the size doesn't really matter anyway.
+ let costliest_module = in_memory
+ .iter()
+ .enumerate()
+ .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+ .map(|(i, module)| {
+ let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+ (cost, i)
+ })
+ .max();
+
+ // If we found a costliest module, we're good to go. Otherwise all our
+ // inputs were serialized which could happen in the case, for example, that
+ // all our inputs were incrementally reread from the cache and we're just
+ // re-executing the LTO passes. If that's the case deserialize the first
+ // module and create a linker with it.
+ let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
+ Some((_cost, i)) => in_memory.remove(i),
+ None => {
+ assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+ let (buffer, name) = serialized_modules.remove(0);
+ info!("no in-memory regular modules to choose from, parsing {:?}", name);
+ ModuleCodegen {
+ module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?,
+ name: name.into_string().unwrap(),
+ kind: ModuleKind::Regular,
+ }
+ }
+ };
+ let mut serialized_bitcode = Vec::new();
+ {
+ let (llcx, llmod) = {
+ let llvm = &module.module_llvm;
+ (&llvm.llcx, llvm.llmod())
+ };
+ info!("using {:?} as a base module", module.name);
+
+ // The linking steps below may produce errors and diagnostics within LLVM
+ // which we'd like to handle and print, so set up our diagnostic handlers
+ // (which get unregistered when they go out of scope below).
+ let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ // For all other modules we codegened we'll need to link them into our own
+ // bitcode. All modules were codegened in their own LLVM context, however,
+ // and we want to move everything to the same LLVM context. Currently the
+ // way we know of to do that is to serialize them to a string and them parse
+ // them later. Not great but hey, that's why it's "fat" LTO, right?
+ for module in in_memory {
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ let llmod_id = CString::new(&module.name[..]).unwrap();
+ serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+ }
+ // Sort the modules to ensure we produce deterministic results.
+ serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+ // For all serialized bitcode files we parse them and link them in as we did
+ // above, this is all mostly handled in C++. Like above, though, we don't
+ // know much about the memory management here so we err on the side of being
+ // save and persist everything with the original module.
+ let mut linker = Linker::new(llmod);
+ for (bc_decoded, name) in serialized_modules {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
+ recorder.record_arg(format!("{:?}", name))
+ });
+ info!("linking {:?}", name);
+ let data = bc_decoded.data();
+ linker.add(data).map_err(|()| {
+ let msg = format!("failed to load bitcode of module {:?}", name);
+ write::llvm_err(diag_handler, &msg)
+ })?;
+ serialized_bitcode.push(bc_decoded);
+ }
+ drop(linker);
+ save_temp_bitcode(cgcx, &module, "lto.input");
+
+ // Internalize everything below threshold to help strip out more modules and such.
+ unsafe {
+ let ptr = symbols_below_threshold.as_ptr();
+ llvm::LLVMRustRunRestrictionPass(
+ llmod,
+ ptr as *const *const libc::c_char,
+ symbols_below_threshold.len() as libc::size_t,
+ );
+ save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+ }
+ }
+
+ Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+}
+
+pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
+
+impl<'a> Linker<'a> {
+ pub(crate) fn new(llmod: &'a llvm::Module) -> Self {
+ unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
+ }
+
+ pub(crate) fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
+ unsafe {
+ if llvm::LLVMRustLinkerAdd(
+ self.0,
+ bytecode.as_ptr() as *const libc::c_char,
+ bytecode.len(),
+ ) {
+ Ok(())
+ } else {
+ Err(())
+ }
+ }
+ }
+}
+
+impl Drop for Linker<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+/// 1. Prepare a "summary" of each LLVM module in question which describes
+/// the values inside, cost of the values, etc.
+/// 2. Merge the summaries of all modules in question into one "index"
+/// 3. Perform some global analysis on this index
+/// 4. For each module, use the index and analysis calculated previously to
+/// perform local transformations on the module, for example inlining
+/// small functions from other modules.
+/// 5. Run thin-specific optimization passes over each module, and then code
+/// generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<(String, ThinBuffer)>,
+ serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
+ unsafe {
+ info!("going for that thin, thin LTO");
+
+ let green_modules: FxHashMap<_, _> =
+ cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+
+ let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+ let mut thin_buffers = Vec::with_capacity(modules.len());
+ let mut module_names = Vec::with_capacity(full_scope_len);
+ let mut thin_modules = Vec::with_capacity(full_scope_len);
+
+ for (i, (name, buffer)) in modules.into_iter().enumerate() {
+ info!("local module: {} - {}", i, name);
+ let cname = CString::new(name.clone()).unwrap();
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: cname.as_ptr(),
+ data: buffer.data().as_ptr(),
+ len: buffer.data().len(),
+ });
+ thin_buffers.push(buffer);
+ module_names.push(cname);
+ }
+
+ // FIXME: All upstream crates are deserialized internally in the
+ // function below to extract their summary and modules. Note that
+ // unlike the loop above we *must* decode and/or read something
+ // here as these are all just serialized files on disk. An
+ // improvement, however, to make here would be to store the
+ // module summary separately from the actual module itself. Right
+ // now this is store in one large bitcode file, and the entire
+ // file is deflate-compressed. We could try to bypass some of the
+ // decompression by storing the index uncompressed and only
+ // lazily decompressing the bytecode if necessary.
+ //
+ // Note that truly taking advantage of this optimization will
+ // likely be further down the road. We'd have to implement
+ // incremental ThinLTO first where we could actually avoid
+ // looking at upstream modules entirely sometimes (the contents,
+ // we must always unconditionally look at the index).
+ let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
+
+ let cached_modules =
+ cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
+
+ for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
+ info!("upstream or cached module {:?}", name);
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: name.as_ptr(),
+ data: module.data().as_ptr(),
+ len: module.data().len(),
+ });
+ serialized.push(module);
+ module_names.push(name);
+ }
+
+ // Sanity check
+ assert_eq!(thin_modules.len(), module_names.len());
+
+ // Delegate to the C++ bindings to create some data here. Once this is a
+ // tried-and-true interface we may wish to try to upstream some of this
+ // to LLVM itself, right now we reimplement a lot of what they do
+ // upstream...
+ let data = llvm::LLVMRustCreateThinLTOData(
+ thin_modules.as_ptr(),
+ thin_modules.len() as u32,
+ symbols_below_threshold.as_ptr(),
+ symbols_below_threshold.len() as u32,
+ )
+ .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?;
+
+ let data = ThinData(data);
+
+ info!("thin LTO data created");
+
+ let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+ cgcx.incr_comp_session_dir
+ {
+ let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+ // If the previous file was deleted, or we get an IO error
+ // reading the file, then we'll just use `None` as the
+ // prev_key_map, which will force the code to be recompiled.
+ let prev =
+ if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+ let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+ (Some(path), prev, curr)
+ } else {
+ // If we don't compile incrementally, we don't need to load the
+ // import data from LLVM.
+ assert!(green_modules.is_empty());
+ let curr = ThinLTOKeysMap::default();
+ (None, None, curr)
+ };
+ info!("thin LTO cache key map loaded");
+ info!("prev_key_map: {:#?}", prev_key_map);
+ info!("curr_key_map: {:#?}", curr_key_map);
+
+ // Throw our data in an `Arc` as we'll be sharing it across threads. We
+ // also put all memory referenced by the C++ data (buffers, ids, etc)
+ // into the arc as well. After this we'll create a thin module
+ // codegen per module in this data.
+ let shared = Arc::new(ThinShared {
+ data,
+ thin_buffers,
+ serialized_modules: serialized,
+ module_names,
+ });
+
+ let mut copy_jobs = vec![];
+ let mut opt_jobs = vec![];
+
+ info!("checking which modules can be-reused and which have to be re-optimized.");
+ for (module_index, module_name) in shared.module_names.iter().enumerate() {
+ let module_name = module_name_to_str(module_name);
+ if let (Some(prev_key_map), true) =
+ (prev_key_map.as_ref(), green_modules.contains_key(module_name))
+ {
+ assert!(cgcx.incr_comp_session_dir.is_some());
+
+ // If a module exists in both the current and the previous session,
+ // and has the same LTO cache key in both sessions, then we can re-use it
+ if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
+ let work_product = green_modules[module_name].clone();
+ copy_jobs.push(work_product);
+ info!(" - {}: re-used", module_name);
+ assert!(cgcx.incr_comp_session_dir.is_some());
+ cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
+ continue;
+ }
+ }
+
+ info!(" - {}: re-compiled", module_name);
+ opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
+ shared: shared.clone(),
+ idx: module_index,
+ }));
+ }
+
+ // Save the current ThinLTO import information for the next compilation
+ // session, overwriting the previous serialized data (if any).
+ if let Some(path) = key_map_path {
+ if let Err(err) = curr_key_map.save_to_file(&path) {
+ let msg = format!("Error while writing ThinLTO key data: {}", err);
+ return Err(write::llvm_err(diag_handler, &msg));
+ }
+ }
+
+ Ok((opt_jobs, copy_jobs))
+ }
+}
+
+pub(crate) fn run_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &mut ModuleCodegen<ModuleLlvm>,
+ thin: bool,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name);
+ let config = cgcx.config(module.kind);
+
+ // Now we have one massive module inside of llmod. Time to run the
+ // LTO-specific optimization passes that LLVM provides.
+ //
+ // This code is based off the code found in llvm's LTO code generator:
+ // llvm/lib/LTO/LTOCodeGenerator.cpp
+ debug!("running the pass manager");
+ unsafe {
+ if !llvm::LLVMRustHasModuleFlag(
+ module.module_llvm.llmod(),
+ "LTOPostLink".as_ptr().cast(),
+ 11,
+ ) {
+ llvm::LLVMRustAddModuleFlag(
+ module.module_llvm.llmod(),
+ llvm::LLVMModFlagBehavior::Error,
+ "LTOPostLink\0".as_ptr().cast(),
+ 1,
+ );
+ }
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
+ let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+ let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+ write::optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ )?;
+ debug!("lto done");
+ return Ok(());
+ }
+
+ let pm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm);
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ let opt_level = config
+ .opt_level
+ .map(|x| to_llvm_opt_settings(x).0)
+ .unwrap_or(llvm::CodeGenOptLevel::None);
+ with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
+ if thin {
+ llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
+ } else {
+ llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager(
+ b, pm, /* Internalize = */ False, /* RunInliner = */ True,
+ );
+ }
+ });
+
+ // We always generate bitcode through ThinLTOBuffers,
+ // which do not support anonymous globals
+ if config.bitcode_needed() {
+ let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ llvm::LLVMRunPassManager(pm, module.module_llvm.llmod());
+
+ llvm::LLVMDisposePassManager(pm);
+ }
+ debug!("lto done");
+ Ok(())
+}
+
+pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
+
+unsafe impl Send for ModuleBuffer {}
+unsafe impl Sync for ModuleBuffer {}
+
+impl ModuleBuffer {
+ pub fn new(m: &llvm::Module) -> ModuleBuffer {
+ ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
+ }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
+ let len = llvm::LLVMRustModuleBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ModuleBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinData(&'static mut llvm::ThinLTOData);
+
+unsafe impl Send for ThinData {}
+unsafe impl Sync for ThinData {}
+
+impl Drop for ThinData {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
+
+unsafe impl Send for ThinBuffer {}
+unsafe impl Sync for ThinBuffer {}
+
+impl ThinBuffer {
+ pub fn new(m: &llvm::Module, is_thin: bool) -> ThinBuffer {
+ unsafe {
+ let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin);
+ ThinBuffer(buffer)
+ }
+ }
+}
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
+ let len = llvm::LLVMRustThinLTOBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ThinBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub unsafe fn optimize_thin_module(
+ thin_module: ThinModule<LlvmCodegenBackend>,
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ let module_name = &thin_module.shared.module_names[thin_module.idx];
+ let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
+ let tm =
+ (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
+
+ // Right now the implementation we've got only works over serialized
+ // modules, so we create a fresh new LLVM context and parse the module
+ // into that context. One day, however, we may do this for upstream
+ // crates but for locally codegened modules we may be able to reuse
+ // that LLVM Context and Module.
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
+ let mut module = ModuleCodegen {
+ module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+ name: thin_module.name().to_string(),
+ kind: ModuleKind::Regular,
+ };
+ {
+ let target = &*module.module_llvm.tm;
+ let llmod = module.module_llvm.llmod();
+ save_temp_bitcode(cgcx, &module, "thin-lto-input");
+
+ // Before we do much else find the "main" `DICompileUnit` that we'll be
+ // using below. If we find more than one though then rustc has changed
+ // in a way we're not ready for, so generate an ICE by returning
+ // an error.
+ let mut cu1 = ptr::null_mut();
+ let mut cu2 = ptr::null_mut();
+ llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
+ if !cu2.is_null() {
+ let msg = "multiple source DICompileUnits found";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+
+ // Up next comes the per-module local analyses that we do for Thin LTO.
+ // Each of these functions is basically copied from the LLVM
+ // implementation and then tailored to suit this implementation. Ideally
+ // each of these would be supported by upstream LLVM but that's perhaps
+ // a patch for another day!
+ //
+ // You can find some more comments about these functions in the LLVM
+ // bindings we've got (currently `PassWrapper.cpp`)
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
+ }
+
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
+ }
+
+ // Ok now this is a bit unfortunate. This is also something you won't
+ // find upstream in LLVM's ThinLTO passes! This is a hack for now to
+ // work around bugs in LLVM.
+ //
+ // First discovered in #45511 it was found that as part of ThinLTO
+ // importing passes LLVM will import `DICompileUnit` metadata
+ // information across modules. This means that we'll be working with one
+ // LLVM module that has multiple `DICompileUnit` instances in it (a
+ // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
+ // bugs in LLVM's backend which generates invalid DWARF in a situation
+ // like this:
+ //
+ // https://bugs.llvm.org/show_bug.cgi?id=35212
+ // https://bugs.llvm.org/show_bug.cgi?id=35562
+ //
+ // While the first bug there is fixed the second ended up causing #46346
+ // which was basically a resurgence of #45511 after LLVM's bug 35212 was
+ // fixed.
+ //
+ // This function below is a huge hack around this problem. The function
+ // below is defined in `PassWrapper.cpp` and will basically "merge"
+ // all `DICompileUnit` instances in a module. Basically it'll take all
+ // the objects, rewrite all pointers of `DISubprogram` to point to the
+ // first `DICompileUnit`, and then delete all the other units.
+ //
+ // This is probably mangling to the debug info slightly (but hopefully
+ // not too much) but for now at least gets LLVM to emit valid DWARF (or
+ // so it appears). Hopefully we can remove this once upstream bugs are
+ // fixed in LLVM.
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name());
+ llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-patch");
+ }
+
+ // Alright now that we've done everything related to the ThinLTO
+ // analysis it's time to run some optimizations! Here we use the same
+ // `run_pass_manager` as the "fat" LTO above except that we tell it to
+ // populate a thin-specific pass manager, which presumably LLVM treats a
+ // little differently.
+ {
+ info!("running thin lto passes over {}", module.name);
+ run_pass_manager(cgcx, &diag_handler, &mut module, true)?;
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
+ }
+ }
+ Ok(module)
+}
+
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
+#[derive(Debug, Default)]
+pub struct ThinLTOKeysMap {
+ // key = llvm name of importing module, value = LLVM cache key
+ keys: FxHashMap<String, String>,
+}
+
+impl ThinLTOKeysMap {
+ fn save_to_file(&self, path: &Path) -> io::Result<()> {
+ use std::io::Write;
+ let file = File::create(path)?;
+ let mut writer = io::BufWriter::new(file);
+ for (module, key) in &self.keys {
+ writeln!(writer, "{} {}", module, key)?;
+ }
+ Ok(())
+ }
+
+ fn load_from_file(path: &Path) -> io::Result<Self> {
+ use std::io::BufRead;
+ let mut keys = FxHashMap::default();
+ let file = File::open(path)?;
+ for line in io::BufReader::new(file).lines() {
+ let line = line?;
+ let mut split = line.split(' ');
+ let module = split.next().unwrap();
+ let key = split.next().unwrap();
+ assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+ keys.insert(module.to_string(), key.to_string());
+ }
+ Ok(Self { keys })
+ }
+
+ fn from_thin_lto_modules(
+ data: &ThinData,
+ modules: &[llvm::ThinLTOModule],
+ names: &[CString],
+ ) -> Self {
+ let keys = iter::zip(modules, names)
+ .map(|(module, name)| {
+ let key = build_string(|rust_str| unsafe {
+ llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+ })
+ .expect("Invalid ThinLTO module key");
+ (name.clone().into_string().unwrap(), key)
+ })
+ .collect();
+ Self { keys }
+ }
+}
+
+fn module_name_to_str(c_str: &CStr) -> &str {
+ c_str.to_str().unwrap_or_else(|e| {
+ bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
+ })
+}
+
+pub fn parse_module<'a>(
+ cx: &'a llvm::Context,
+ name: &CStr,
+ data: &[u8],
+ diag_handler: &Handler,
+) -> Result<&'a llvm::Module, FatalError> {
+ unsafe {
+ llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
+ || {
+ let msg = "failed to parse bitcode for LTO module";
+ write::llvm_err(diag_handler, msg)
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs
new file mode 100644
index 000000000..2741f7d84
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs
@@ -0,0 +1,58 @@
+use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId};
+use rustc_data_structures::profiling::{SelfProfiler, TimingGuard};
+use std::ffi::{c_void, CStr};
+use std::os::raw::c_char;
+use std::sync::Arc;
+
+fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId {
+ let pass_name = profiler.get_or_alloc_cached_string(pass_name);
+ let mut components = vec![StringComponent::Ref(pass_name)];
+ // handle that LazyCallGraph::SCC is a comma separated list within parentheses
+ let parentheses: &[_] = &['(', ')'];
+ let trimmed = ir_name.trim_matches(parentheses);
+ for part in trimmed.split(", ") {
+ let demangled_ir_name = rustc_demangle::demangle(part).to_string();
+ let ir_name = profiler.get_or_alloc_cached_string(demangled_ir_name);
+ components.push(StringComponent::Value(SEPARATOR_BYTE));
+ components.push(StringComponent::Ref(ir_name));
+ }
+ EventId::from_label(profiler.alloc_string(components.as_slice()))
+}
+
+pub struct LlvmSelfProfiler<'a> {
+ profiler: Arc<SelfProfiler>,
+ stack: Vec<TimingGuard<'a>>,
+ llvm_pass_event_kind: StringId,
+}
+
+impl<'a> LlvmSelfProfiler<'a> {
+ pub fn new(profiler: Arc<SelfProfiler>) -> Self {
+ let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass");
+ Self { profiler, stack: Vec::default(), llvm_pass_event_kind }
+ }
+
+ fn before_pass_callback(&'a mut self, pass_name: &str, ir_name: &str) {
+ let event_id = llvm_args_to_string_id(&self.profiler, pass_name, ir_name);
+
+ self.stack.push(TimingGuard::start(&self.profiler, self.llvm_pass_event_kind, event_id));
+ }
+ fn after_pass_callback(&mut self) {
+ self.stack.pop();
+ }
+}
+
+pub unsafe extern "C" fn selfprofile_before_pass_callback(
+ llvm_self_profiler: *mut c_void,
+ pass_name: *const c_char,
+ ir_name: *const c_char,
+) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ let pass_name = CStr::from_ptr(pass_name).to_str().expect("valid UTF-8");
+ let ir_name = CStr::from_ptr(ir_name).to_str().expect("valid UTF-8");
+ llvm_self_profiler.before_pass_callback(pass_name, ir_name);
+}
+
+pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ llvm_self_profiler.after_pass_callback();
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
new file mode 100644
index 000000000..534d32e8a
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -0,0 +1,1212 @@
+use crate::back::lto::ThinBuffer;
+use crate::back::profiling::{
+ selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
+};
+use crate::base;
+use crate::common;
+use crate::consts;
+use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::LlvmCodegenBackend;
+use crate::ModuleLlvm;
+use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::write::{
+ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
+ TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{FatalError, Handler, Level};
+use rustc_fs_util::{link_or_copy, path_to_c_string};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::InnerSpan;
+use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo};
+use tracing::debug;
+
+use libc::{c_char, c_int, c_uint, c_void, size_t};
+use std::ffi::CString;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::slice;
+use std::str;
+use std::sync::Arc;
+
+pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
+ match llvm::last_error() {
+ Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
+ None => handler.fatal(msg),
+ }
+}
+
+pub fn write_output_file<'ll>(
+ handler: &rustc_errors::Handler,
+ target: &'ll llvm::TargetMachine,
+ pm: &llvm::PassManager<'ll>,
+ m: &'ll llvm::Module,
+ output: &Path,
+ dwo_output: Option<&Path>,
+ file_type: llvm::FileType,
+ self_profiler_ref: &SelfProfilerRef,
+) -> Result<(), FatalError> {
+ debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
+ unsafe {
+ let output_c = path_to_c_string(output);
+ let dwo_output_c;
+ let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
+ dwo_output_c = path_to_c_string(dwo_output);
+ dwo_output_c.as_ptr()
+ } else {
+ std::ptr::null()
+ };
+ let result = llvm::LLVMRustWriteOutputFile(
+ target,
+ pm,
+ m,
+ output_c.as_ptr(),
+ dwo_output_ptr,
+ file_type,
+ );
+
+ // Record artifact sizes for self-profiling
+ if result == llvm::LLVMRustResult::Success {
+ let artifact_kind = match file_type {
+ llvm::FileType::ObjectFile => "object_file",
+ llvm::FileType::AssemblyFile => "assembly_file",
+ };
+ record_artifact_size(self_profiler_ref, artifact_kind, output);
+ if let Some(dwo_file) = dwo_output {
+ record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
+ }
+ }
+
+ result.into_result().map_err(|()| {
+ let msg = format!("could not write output to {}", output.display());
+ llvm_err(handler, &msg)
+ })
+ }
+}
+
+pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
+ let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+ // Can't use query system here quite yet because this function is invoked before the query
+ // system/tcx is set up.
+ let features = llvm_util::global_llvm_features(sess, false);
+ target_machine_factory(sess, config::OptLevel::No, &features)(config)
+ .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
+}
+
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+ let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
+ tcx.output_filenames(()).split_dwarf_path(
+ tcx.sess.split_debuginfo(),
+ tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ Some(mod_name),
+ )
+ } else {
+ None
+ };
+ let config = TargetMachineFactoryConfig { split_dwarf_file };
+ target_machine_factory(
+ &tcx.sess,
+ tcx.backend_optimization_level(()),
+ tcx.global_backend_features(()),
+ )(config)
+ .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
+}
+
+pub fn to_llvm_opt_settings(
+ cfg: config::OptLevel,
+) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
+ use self::config::OptLevel::*;
+ match cfg {
+ No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
+ Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
+ Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
+ Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
+ Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
+ SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
+ }
+}
+
+fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
+ use config::OptLevel::*;
+ match cfg {
+ No => llvm::PassBuilderOptLevel::O0,
+ Less => llvm::PassBuilderOptLevel::O1,
+ Default => llvm::PassBuilderOptLevel::O2,
+ Aggressive => llvm::PassBuilderOptLevel::O3,
+ Size => llvm::PassBuilderOptLevel::Os,
+ SizeMin => llvm::PassBuilderOptLevel::Oz,
+ }
+}
+
+fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
+ match relocation_model {
+ RelocModel::Static => llvm::RelocModel::Static,
+ // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
+ RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
+ RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
+ RelocModel::Ropi => llvm::RelocModel::ROPI,
+ RelocModel::Rwpi => llvm::RelocModel::RWPI,
+ RelocModel::RopiRwpi => llvm::RelocModel::ROPI_RWPI,
+ }
+}
+
+pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
+ match code_model {
+ Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
+ Some(CodeModel::Small) => llvm::CodeModel::Small,
+ Some(CodeModel::Kernel) => llvm::CodeModel::Kernel,
+ Some(CodeModel::Medium) => llvm::CodeModel::Medium,
+ Some(CodeModel::Large) => llvm::CodeModel::Large,
+ None => llvm::CodeModel::None,
+ }
+}
+
+pub fn target_machine_factory(
+ sess: &Session,
+ optlvl: config::OptLevel,
+ target_features: &[String],
+) -> TargetMachineFactoryFn<LlvmCodegenBackend> {
+ let reloc_model = to_llvm_relocation_model(sess.relocation_model());
+
+ let (opt_level, _) = to_llvm_opt_settings(optlvl);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ let ffunction_sections =
+ sess.opts.unstable_opts.function_sections.unwrap_or(sess.target.function_sections);
+ let fdata_sections = ffunction_sections;
+ let funique_section_names = !sess.opts.unstable_opts.no_unique_section_names;
+
+ let code_model = to_llvm_code_model(sess.code_model());
+
+ let mut singlethread = sess.target.singlethread;
+
+ // On the wasm target once the `atomics` feature is enabled that means that
+ // we're no longer single-threaded, or otherwise we don't want LLVM to
+ // lower atomic operations to single-threaded operations.
+ if singlethread && sess.target.is_like_wasm && sess.target_features.contains(&sym::atomics) {
+ singlethread = false;
+ }
+
+ let triple = SmallCStr::new(&sess.target.llvm_target);
+ let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
+ let features = CString::new(target_features.join(",")).unwrap();
+ let abi = SmallCStr::new(&sess.target.llvm_abiname);
+ let trap_unreachable =
+ sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
+ let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
+
+ let asm_comments = sess.asm_comments();
+ let relax_elf_relocations =
+ sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
+
+ let use_init_array =
+ !sess.opts.unstable_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
+
+ let path_mapping = sess.source_map().path_mapping().clone();
+
+ Arc::new(move |config: TargetMachineFactoryConfig| {
+ let split_dwarf_file =
+ path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
+ let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
+
+ let tm = unsafe {
+ llvm::LLVMRustCreateTargetMachine(
+ triple.as_ptr(),
+ cpu.as_ptr(),
+ features.as_ptr(),
+ abi.as_ptr(),
+ code_model,
+ reloc_model,
+ opt_level,
+ use_softfp,
+ ffunction_sections,
+ fdata_sections,
+ funique_section_names,
+ trap_unreachable,
+ singlethread,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ split_dwarf_file.as_ptr(),
+ )
+ };
+
+ tm.ok_or_else(|| {
+ format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
+ })
+ })
+}
+
+pub(crate) fn save_temp_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ module: &ModuleCodegen<ModuleLlvm>,
+ name: &str,
+) {
+ if !cgcx.save_temps {
+ return;
+ }
+ unsafe {
+ let ext = format!("{}.bc", name);
+ let cgu = Some(&module.name[..]);
+ let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+ let cstr = path_to_c_string(&path);
+ let llmod = module.module_llvm.llmod();
+ llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+ }
+}
+
+pub struct DiagnosticHandlers<'a> {
+ data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
+ llcx: &'a llvm::Context,
+ old_handler: Option<&'a llvm::DiagnosticHandler>,
+}
+
+impl<'a> DiagnosticHandlers<'a> {
+ pub fn new(
+ cgcx: &'a CodegenContext<LlvmCodegenBackend>,
+ handler: &'a Handler,
+ llcx: &'a llvm::Context,
+ ) -> Self {
+ let remark_passes_all: bool;
+ let remark_passes: Vec<CString>;
+ match &cgcx.remark {
+ Passes::All => {
+ remark_passes_all = true;
+ remark_passes = Vec::new();
+ }
+ Passes::Some(passes) => {
+ remark_passes_all = false;
+ remark_passes =
+ passes.iter().map(|name| CString::new(name.as_str()).unwrap()).collect();
+ }
+ };
+ let remark_passes: Vec<*const c_char> =
+ remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+ let data = Box::into_raw(Box::new((cgcx, handler)));
+ unsafe {
+ let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
+ llvm::LLVMRustContextConfigureDiagnosticHandler(
+ llcx,
+ diagnostic_handler,
+ data.cast(),
+ remark_passes_all,
+ remark_passes.as_ptr(),
+ remark_passes.len(),
+ );
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
+ DiagnosticHandlers { data, llcx, old_handler }
+ }
+ }
+}
+
+impl<'a> Drop for DiagnosticHandlers<'a> {
+ fn drop(&mut self) {
+ use std::ptr::null_mut;
+ unsafe {
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
+ llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
+ drop(Box::from_raw(self.data));
+ }
+ }
+}
+
+fn report_inline_asm(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ msg: String,
+ level: llvm::DiagnosticLevel,
+ mut cookie: c_uint,
+ source: Option<(String, Vec<InnerSpan>)>,
+) {
+ // In LTO build we may get srcloc values from other crates which are invalid
+ // since they use a different source map. To be safe we just suppress these
+ // in LTO builds.
+ if matches!(cgcx.lto, Lto::Fat | Lto::Thin) {
+ cookie = 0;
+ }
+ let level = match level {
+ llvm::DiagnosticLevel::Error => Level::Error { lint: false },
+ llvm::DiagnosticLevel::Warning => Level::Warning(None),
+ llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note,
+ };
+ cgcx.diag_emitter.inline_asm_error(cookie as u32, msg, level, source);
+}
+
+unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag);
+ report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source);
+}
+
+unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ match llvm::diagnostic::Diagnostic::unpack(info) {
+ llvm::diagnostic::InlineAsm(inline) => {
+ report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source);
+ }
+
+ llvm::diagnostic::Optimization(opt) => {
+ let enabled = match cgcx.remark {
+ Passes::All => true,
+ Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
+ };
+
+ if enabled {
+ diag_handler.note_without_error(&format!(
+ "{}:{}:{}: {}: {}",
+ opt.filename, opt.line, opt.column, opt.pass_name, opt.message,
+ ));
+ }
+ }
+ llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.warn(&msg);
+ }
+ llvm::diagnostic::Unsupported(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.err(&msg);
+ }
+ llvm::diagnostic::UnknownDiagnostic(..) => {}
+ }
+}
+
+fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
+ match config.pgo_gen {
+ SwitchWithOptPath::Enabled(ref opt_dir_path) => {
+ let path = if let Some(dir_path) = opt_dir_path {
+ dir_path.join("default_%m.profraw")
+ } else {
+ PathBuf::from("default_%m.profraw")
+ };
+
+ Some(CString::new(format!("{}", path.display())).unwrap())
+ }
+ SwitchWithOptPath::Disabled => None,
+ }
+}
+
+fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_sample_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+ opt_level: config::OptLevel,
+ opt_stage: llvm::OptStage,
+) -> Result<(), FatalError> {
+ let unroll_loops =
+ opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
+ let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let pgo_sample_use_path = get_pgo_sample_use_path(config);
+ let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
+ // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
+ let sanitizer_options = if !is_lto {
+ Some(llvm::SanitizerOptions {
+ sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
+ sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+ sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
+ sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
+ sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
+ sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
+ sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
+ sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
+ })
+ } else {
+ None
+ };
+
+ let mut llvm_profiler = if cgcx.prof.llvm_recording_enabled() {
+ Some(LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()))
+ } else {
+ None
+ };
+
+ let llvm_selfprofiler =
+ llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
+
+ let extra_passes = if !is_lto { config.passes.join(",") } else { "".to_string() };
+
+ let llvm_plugins = config.llvm_plugins.join(",");
+
+ // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
+ // We would have to add upstream support for this first, before we can support
+ // config.inline_threshold and our more aggressive default thresholds.
+ let result = llvm::LLVMRustOptimizeWithNewPassManager(
+ module.module_llvm.llmod(),
+ &*module.module_llvm.tm,
+ to_pass_builder_opt_level(opt_level),
+ opt_stage,
+ config.no_prepopulate_passes,
+ config.verify_llvm_ir,
+ using_thin_buffers,
+ config.merge_functions,
+ unroll_loops,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ config.no_builtins,
+ config.emit_lifetime_markers,
+ sanitizer_options.as_ref(),
+ pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.instrument_coverage,
+ config.instrument_gcov,
+ pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.debug_info_for_profiling,
+ llvm_selfprofiler,
+ selfprofile_before_pass_callback,
+ selfprofile_after_pass_callback,
+ extra_passes.as_ptr().cast(),
+ extra_passes.len(),
+ llvm_plugins.as_ptr().cast(),
+ llvm_plugins.len(),
+ );
+ result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
+}
+
+// Unsafe due to LLVM calls.
+pub(crate) unsafe fn optimize(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
+
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+
+ if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) {
+ diag_handler.warn(
+ "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15",
+ );
+ }
+
+ if config.emit_no_opt_bc {
+ let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
+ let out = path_to_c_string(&out);
+ llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+ }
+
+ if let Some(opt_level) = config.opt_level {
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
+ let opt_stage = match cgcx.lto {
+ Lto::Fat => llvm::OptStage::PreLinkFatLTO,
+ Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
+ _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+ _ => llvm::OptStage::PreLinkNoLTO,
+ };
+ return optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ );
+ }
+
+ if cgcx.prof.llvm_recording_enabled() {
+ diag_handler
+ .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`");
+ }
+
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ {
+ let find_pass = |pass_name: &str| {
+ let pass_name = SmallCStr::new(pass_name);
+ llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
+ };
+
+ if config.verify_llvm_ir {
+ // Verification should run as the very first pass.
+ llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
+ }
+
+ let mut extra_passes = Vec::new();
+ let mut have_name_anon_globals_pass = false;
+
+ for pass_name in &config.passes {
+ if pass_name == "lint" {
+ // Linting should also be performed early, directly on the generated IR.
+ llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
+ continue;
+ }
+
+ if let Some(pass) = find_pass(pass_name) {
+ extra_passes.push(pass);
+ } else {
+ diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
+ }
+
+ if pass_name == "name-anon-globals" {
+ have_name_anon_globals_pass = true;
+ }
+ }
+
+ // Instrumentation must be inserted before optimization,
+ // otherwise LLVM may optimize some functions away which
+ // breaks llvm-cov.
+ //
+ // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
+ if config.instrument_gcov {
+ llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap());
+ }
+ if config.instrument_coverage {
+ llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap());
+ }
+ if config.debug_info_for_profiling {
+ llvm::LLVMRustAddPass(mpm, find_pass("add-discriminators").unwrap());
+ }
+
+ add_sanitizer_passes(config, &mut extra_passes);
+
+ // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
+ // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
+ // we'll get errors in LLVM.
+ let using_thin_buffers = config.bitcode_needed();
+ if !config.no_prepopulate_passes {
+ llvm::LLVMAddAnalysisPasses(tm, fpm);
+ llvm::LLVMAddAnalysisPasses(tm, mpm);
+ let opt_level = to_llvm_opt_settings(opt_level).0;
+ let prepare_for_thin_lto = cgcx.lto == Lto::Thin
+ || cgcx.lto == Lto::ThinLocal
+ || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
+ with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| {
+ llvm::LLVMRustAddLastExtensionPasses(
+ b,
+ extra_passes.as_ptr(),
+ extra_passes.len() as size_t,
+ );
+ llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+ llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm);
+ });
+
+ have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
+ if using_thin_buffers && !prepare_for_thin_lto {
+ llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
+ have_name_anon_globals_pass = true;
+ }
+ } else {
+ // If we don't use the standard pipeline, directly populate the MPM
+ // with the extra passes.
+ for pass in extra_passes {
+ llvm::LLVMRustAddPass(mpm, pass);
+ }
+ }
+
+ if using_thin_buffers && !have_name_anon_globals_pass {
+ // As described above, this will probably cause an error in LLVM
+ if config.no_prepopulate_passes {
+ diag_handler.err(
+ "The current compilation is going to use thin LTO buffers \
+ without running LLVM's NameAnonGlobals pass. \
+ This will likely cause errors in LLVM. Consider adding \
+ -C passes=name-anon-globals to the compiler command line.",
+ );
+ } else {
+ bug!(
+ "We are using thin LTO buffers without running the NameAnonGlobals pass. \
+ This will likely cause errors in LLVM and should never happen."
+ );
+ }
+ }
+ }
+
+ diag_handler.abort_if_errors();
+
+ // Finally, run the actual optimization passes
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_function_passes",
+ &*module.name,
+ );
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
+ }
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_module_passes",
+ &*module.name,
+ );
+ llvm::LLVMRunPassManager(mpm, llmod);
+ }
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+ }
+ Ok(())
+}
+
+unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
+ if config.sanitizer.contains(SanitizerSet::ADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS);
+ passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
+ passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::MEMORY) {
+ let track_origins = config.sanitizer_memory_track_origins as c_int;
+ let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY);
+ passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::THREAD) {
+ passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
+ }
+ if config.sanitizer.contains(SanitizerSet::HWADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS);
+ passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover));
+ }
+}
+
+pub(crate) fn link(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ mut modules: Vec<ModuleCodegen<ModuleLlvm>>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ use super::lto::{Linker, ModuleBuffer};
+ // Sort the modules by name to ensure to ensure deterministic behavior.
+ modules.sort_by(|a, b| a.name.cmp(&b.name));
+ let (first, elements) =
+ modules.split_first().expect("Bug! modules must contain at least one module.");
+
+ let mut linker = Linker::new(first.module_llvm.llmod());
+ for module in elements {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ linker.add(buffer.data()).map_err(|()| {
+ let msg = format!("failed to serialize module {:?}", module.name);
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+ drop(linker);
+ Ok(modules.remove(0))
+}
+
+pub(crate) unsafe fn codegen(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+ {
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+ let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ if cgcx.msvc_imps_needed {
+ create_msvc_imps(cgcx, llcx, llmod);
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen<'ll, F, R>(
+ tm: &'ll llvm::TargetMachine,
+ llmod: &'ll llvm::Module,
+ no_builtins: bool,
+ f: F,
+ ) -> R
+ where
+ F: FnOnce(&'ll mut PassManager<'ll>) -> R,
+ {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(tm, cpm);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm)
+ }
+
+ // Two things to note:
+ // - If object files are just LLVM bitcode we write bitcode, copy it to
+ // the .o file, and delete the bitcode if it wasn't otherwise
+ // requested.
+ // - If we don't have the integrated assembler then we need to emit
+ // asm from LLVM and use `gcc` to create the object file.
+
+ let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name);
+ let thin = ThinBuffer::new(llmod, config.emit_thin_lto);
+ let data = thin.data();
+
+ if let Some(bitcode_filename) = bc_out.file_name() {
+ cgcx.prof.artifact_size(
+ "llvm_bitcode",
+ bitcode_filename.to_string_lossy(),
+ data.len() as u64,
+ );
+ }
+
+ if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
+ if let Err(e) = fs::write(&bc_out, data) {
+ let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
+ diag_handler.err(&msg);
+ }
+ }
+
+ if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name);
+ embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+ }
+ }
+
+ if config.emit_ir {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name);
+ let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+ let out_c = path_to_c_string(&out);
+
+ extern "C" fn demangle_callback(
+ input_ptr: *const c_char,
+ input_len: size_t,
+ output_ptr: *mut c_char,
+ output_len: size_t,
+ ) -> size_t {
+ let input =
+ unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
+
+ let Ok(input) = str::from_utf8(input) else { return 0 };
+
+ let output = unsafe {
+ slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
+ };
+ let mut cursor = io::Cursor::new(output);
+
+ let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
+
+ if write!(cursor, "{:#}", demangled).is_err() {
+ // Possible only if provided buffer is not big enough
+ return 0;
+ }
+
+ cursor.position() as size_t
+ }
+
+ let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
+
+ if result == llvm::LLVMRustResult::Success {
+ record_artifact_size(&cgcx.prof, "llvm_ir", &out);
+ }
+
+ result.into_result().map_err(|()| {
+ let msg = format!("failed to write LLVM IR to {}", out.display());
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+
+ if config.emit_asm {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+
+ // We can't use the same module for asm and object code output,
+ // because that triggers various errors like invalid IR or broken
+ // binaries. So we must clone the module to produce the asm output
+ // if we are also producing object code.
+ let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj {
+ llvm::LLVMCloneModule(llmod)
+ } else {
+ llmod
+ };
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &path,
+ None,
+ llvm::FileType::AssemblyFile,
+ &cgcx.prof,
+ )
+ })?;
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+
+ let dwo_out = cgcx.output_filenames.temp_path_dwo(module_name);
+ let dwo_out = match (cgcx.split_debuginfo, cgcx.split_dwarf_kind) {
+ // Don't change how DWARF is emitted when disabled.
+ (SplitDebuginfo::Off, _) => None,
+ // Don't provide a DWARF object path if split debuginfo is enabled but this is
+ // a platform that doesn't support Split DWARF.
+ _ if !cgcx.target_can_use_split_dwarf => None,
+ // Don't provide a DWARF object path in single mode, sections will be written
+ // into the object as normal but ignored by linker.
+ (_, SplitDwarfKind::Single) => None,
+ // Emit (a subset of the) DWARF into a separate dwarf object file in split
+ // mode.
+ (_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
+ };
+
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &obj_out,
+ dwo_out,
+ llvm::FileType::ObjectFile,
+ &cgcx.prof,
+ )
+ })?;
+ }
+
+ EmitObj::Bitcode => {
+ debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+ if let Err(e) = link_or_copy(&bc_out, &obj_out) {
+ diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+ }
+
+ if !config.emit_bc {
+ debug!("removing_bitcode {:?}", bc_out);
+ ensure_removed(diag_handler, &bc_out);
+ }
+ }
+
+ EmitObj::None => {}
+ }
+
+ drop(handlers);
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ cgcx.target_can_use_split_dwarf
+ && cgcx.split_debuginfo != SplitDebuginfo::Off
+ && cgcx.split_dwarf_kind == SplitDwarfKind::Split,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
+ let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+ asm.extend_from_slice(b".ascii \"");
+ asm.reserve(data.len());
+ for &byte in data {
+ if byte == b'\\' || byte == b'"' {
+ asm.push(b'\\');
+ asm.push(byte);
+ } else if byte < 0x20 || byte >= 0x80 {
+ // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
+ // width, while hex escapes will consume following characters.
+ asm.push(b'\\');
+ asm.push(b'0' + ((byte >> 6) & 0x7));
+ asm.push(b'0' + ((byte >> 3) & 0x7));
+ asm.push(b'0' + ((byte >> 0) & 0x7));
+ } else {
+ asm.push(byte);
+ }
+ }
+ asm.extend_from_slice(b"\"\n");
+ asm
+}
+
+/// Embed the bitcode of an LLVM module in the LLVM module itself.
+///
+/// This is done primarily for iOS where it appears to be standard to compile C
+/// code at least with `-fembed-bitcode` which creates two sections in the
+/// executable:
+///
+/// * __LLVM,__bitcode
+/// * __LLVM,__cmdline
+///
+/// It appears *both* of these sections are necessary to get the linker to
+/// recognize what's going on. A suitable cmdline value is taken from the
+/// target spec.
+///
+/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
+/// embed an empty section.
+///
+/// Basically all of this is us attempting to follow in the footsteps of clang
+/// on iOS. See #35968 for lots more info.
+unsafe fn embed_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+ cmdline: &str,
+ bitcode: &[u8],
+) {
+ // We're adding custom sections to the output object file, but we definitely
+ // do not want these custom sections to make their way into the final linked
+ // executable. The purpose of these custom sections is for tooling
+ // surrounding object files to work with the LLVM IR, if necessary. For
+ // example rustc's own LTO will look for LLVM IR inside of the object file
+ // in these sections by default.
+ //
+ // To handle this is a bit different depending on the object file format
+ // used by the backend, broken down into a few different categories:
+ //
+ // * Mach-O - this is for macOS. Inspecting the source code for the native
+ // linker here shows that the `.llvmbc` and `.llvmcmd` sections are
+ // automatically skipped by the linker. In that case there's nothing extra
+ // that we need to do here.
+ //
+ // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
+ // `.llvmcmd` sections, so there's nothing extra we need to do.
+ //
+ // * COFF - if we don't do anything the linker will by default copy all
+ // these sections to the output artifact, not what we want! To subvert
+ // this we want to flag the sections we inserted here as
+ // `IMAGE_SCN_LNK_REMOVE`.
+ //
+ // * ELF - this is very similar to COFF above. One difference is that these
+ // sections are removed from the output linked artifact when
+ // `--gc-sections` is passed, which we pass by default. If that flag isn't
+ // passed though then these sections will show up in the final output.
+ // Additionally the flag that we need to set here is `SHF_EXCLUDE`.
+ //
+ // Unfortunately, LLVM provides no way to set custom section flags. For ELF
+ // and COFF we emit the sections using module level inline assembly for that
+ // reason (see issue #90326 for historical background).
+ let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos")
+ || cgcx.opts.target_triple.triple().contains("-watchos");
+ if is_apple
+ || cgcx.opts.target_triple.triple().starts_with("wasm")
+ || cgcx.opts.target_triple.triple().starts_with("asmjs")
+ {
+ // We don't need custom section flags, create LLVM globals.
+ let llconst = common::bytes_in_context(llcx, bitcode);
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.module\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+
+ let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+ let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.cmdline\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+ let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ } else {
+ // We need custom section flags, so emit module-level inline assembly.
+ let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
+ let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ }
+}
+
+pub unsafe fn with_llvm_pmb(
+ llmod: &llvm::Module,
+ config: &ModuleConfig,
+ opt_level: llvm::CodeGenOptLevel,
+ prepare_for_thin_lto: bool,
+ f: &mut dyn FnMut(&llvm::PassManagerBuilder),
+) {
+ use std::ptr;
+
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMRustPassManagerBuilderCreate();
+ let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1);
+ let inline_threshold = config.inline_threshold;
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let pgo_sample_use_path = get_pgo_sample_use_path(config);
+
+ llvm::LLVMRustConfigurePassManagerBuilder(
+ builder,
+ opt_level,
+ config.merge_functions,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ prepare_for_thin_lto,
+ pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ opt_size as c_int,
+ );
+
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
+
+ // Here we match what clang does (kinda). For O0 we only inline
+ // always-inline functions (but don't add lifetime intrinsics), at O1 we
+ // inline with lifetime intrinsics, and O2+ we add an inliner with a
+ // thresholds copied from clang.
+ match (opt_level, opt_size, inline_threshold) {
+ (.., Some(t)) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t);
+ }
+ (llvm::CodeGenOptLevel::Aggressive, ..) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275);
+ }
+ (_, llvm::CodeGenOptSizeDefault, _) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75);
+ }
+ (_, llvm::CodeGenOptSizeAggressive, _) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25);
+ }
+ (llvm::CodeGenOptLevel::None, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Less, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Default, ..) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225);
+ }
+ }
+
+ f(builder);
+ llvm::LLVMRustPassManagerBuilderDispose(builder);
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker. We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_msvc_imps(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+) {
+ if !cgcx.msvc_imps_needed {
+ return;
+ }
+ // The x86 ABI seems to require that leading underscores are added to symbol
+ // names, so we need an extra underscore on x86. There's also a leading
+ // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
+ // underscores added in front).
+ let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
+
+ unsafe {
+ let i8p_ty = Type::i8p_llcx(llcx);
+ let globals = base::iter_globals(llmod)
+ .filter(|&val| {
+ llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
+ && llvm::LLVMIsDeclaration(val) == 0
+ })
+ .filter_map(|val| {
+ // Exclude some symbols that we know are not Rust symbols.
+ let name = llvm::get_value_name(val);
+ if ignored(name) { None } else { Some((val, name)) }
+ })
+ .map(move |(val, name)| {
+ let mut imp_name = prefix.as_bytes().to_vec();
+ imp_name.extend(name);
+ let imp_name = CString::new(imp_name).unwrap();
+ (imp_name, val)
+ })
+ .collect::<Vec<_>>();
+
+ for (imp_name, val) in globals {
+ let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+ llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+ }
+ }
+
+ // Use this function to exclude certain symbols from `__imp` generation.
+ fn ignored(symbol_name: &[u8]) -> bool {
+ // These are symbols generated by LLVM's profiling instrumentation
+ symbol_name.starts_with(b"__llvm_profile_")
+ }
+}
+
+fn record_artifact_size(
+ self_profiler_ref: &SelfProfilerRef,
+ artifact_kind: &'static str,
+ path: &Path,
+) {
+ // Don't stat the file if we are not going to record its size.
+ if !self_profiler_ref.enabled() {
+ return;
+ }
+
+ if let Some(artifact_name) = path.file_name() {
+ let file_size = std::fs::metadata(path).map(|m| m.len()).unwrap_or(0);
+ self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
+ }
+}